2 * drivers/video/tegra/nvmap/nvmap_mm.c
4 * Some MM related functionality specific to nvmap.
6 * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
23 #include <trace/events/nvmap.h>
25 #include "nvmap_priv.h"
27 inline static void nvmap_flush_dcache_all(void *dummy)
29 #if defined(CONFIG_DENVER_CPU)
31 asm volatile ("mrs %0, ID_AFR0_EL1" : "=r"(id_afr0));
32 if (likely((id_afr0 & 0xf00) == 0x100)) {
33 asm volatile ("msr s3_0_c15_c13_0, %0" : : "r" (0));
34 asm volatile ("dsb sy");
36 __flush_dcache_all(NULL);
39 __flush_dcache_all(NULL);
43 void inner_flush_cache_all(void)
45 #if defined(CONFIG_ARM64) && defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU)
46 nvmap_flush_dcache_all(NULL);
47 #elif defined(CONFIG_ARM64)
48 on_each_cpu(nvmap_flush_dcache_all, NULL, 1);
49 #elif defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU)
50 v7_flush_kern_cache_all();
52 on_each_cpu(v7_flush_kern_cache_all, NULL, 1);
56 extern void __clean_dcache_louis(void *);
57 extern void v7_clean_kern_cache_louis(void *);
58 void inner_clean_cache_all(void)
60 #if defined(CONFIG_ARM64) && \
61 defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU)
62 on_each_cpu(__clean_dcache_louis, NULL, 1);
63 __clean_dcache_all(NULL);
64 #elif defined(CONFIG_ARM64)
65 on_each_cpu(__clean_dcache_all, NULL, 1);
66 #elif defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU)
67 on_each_cpu(v7_clean_kern_cache_louis, NULL, 1);
68 v7_clean_kern_cache_all(NULL);
70 on_each_cpu(v7_clean_kern_cache_all, NULL, 1);
77 * __clean_dcache_page() is only available on ARM64 (well, we haven't
78 * implemented it on ARMv7).
80 #if defined(CONFIG_ARM64)
81 void nvmap_clean_cache(struct page **pages, int numpages)
85 /* Not technically a flush but that's what nvmap knows about. */
86 nvmap_stats_inc(NS_CFLUSH_DONE, numpages << PAGE_SHIFT);
87 trace_nvmap_cache_flush(numpages << PAGE_SHIFT,
88 nvmap_stats_read(NS_ALLOC),
89 nvmap_stats_read(NS_CFLUSH_RQ),
90 nvmap_stats_read(NS_CFLUSH_DONE));
92 for (i = 0; i < numpages; i++)
93 __clean_dcache_page(pages[i]);
97 void nvmap_clean_cache_page(struct page *page)
99 #if defined(CONFIG_ARM64)
100 __clean_dcache_page(page);
102 __flush_dcache_page(page_mapping(page), page);
106 void nvmap_flush_cache(struct page **pages, int numpages)
109 bool flush_inner = true;
110 __attribute__((unused)) unsigned long base;
112 nvmap_stats_inc(NS_CFLUSH_RQ, numpages << PAGE_SHIFT);
113 #if defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS)
114 if (numpages >= (cache_maint_inner_threshold >> PAGE_SHIFT)) {
115 nvmap_stats_inc(NS_CFLUSH_DONE, cache_maint_inner_threshold);
116 inner_flush_cache_all();
121 nvmap_stats_inc(NS_CFLUSH_DONE, numpages << PAGE_SHIFT);
122 trace_nvmap_cache_flush(numpages << PAGE_SHIFT,
123 nvmap_stats_read(NS_ALLOC),
124 nvmap_stats_read(NS_CFLUSH_RQ),
125 nvmap_stats_read(NS_CFLUSH_DONE));
127 for (i = 0; i < numpages; i++) {
128 struct page *page = nvmap_to_page(pages[i]);
129 #ifdef CONFIG_ARM64 //__flush_dcache_page flushes inner and outer on ARM64
131 __flush_dcache_page(page);
134 __flush_dcache_page(page_mapping(page), page);
136 base = page_to_phys(page);
137 outer_flush_range(base, base + PAGE_SIZE);
143 * Perform cache op on the list of memory regions within passed handles.
144 * A memory region within handle[i] is identified by offsets[i], sizes[i]
146 * sizes[i] == 0 is a special case which causes handle wide operation,
147 * this is done by replacing offsets[i] = 0, sizes[i] = handles[i]->size.
148 * So, the input arrays sizes, offsets are not guaranteed to be read-only
150 * This will optimze the op if it can.
151 * In the case that all the handles together are larger than the inner cache
152 * maint threshold it is possible to just do an entire inner cache flush.
154 int nvmap_do_cache_maint_list(struct nvmap_handle **handles, u32 *offsets,
155 u32 *sizes, int op, int nr)
161 #if defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS)
162 thresh = cache_maint_inner_threshold;
165 for (i = 0; i < nr; i++) {
166 if ((op == NVMAP_CACHE_OP_WB) && nvmap_handle_track_dirty(handles[i]))
167 total += atomic_read(&handles[i]->pgalloc.ndirty);
169 total += sizes[i] ? sizes[i] : handles[i]->size;
175 /* Full flush in the case the passed list is bigger than our
177 if (total >= thresh) {
178 for (i = 0; i < nr; i++) {
179 if (handles[i]->userflags &
180 NVMAP_HANDLE_CACHE_SYNC) {
181 nvmap_handle_mkclean(handles[i], 0,
183 nvmap_zap_handle(handles[i], 0,
188 if (op == NVMAP_CACHE_OP_WB) {
189 inner_clean_cache_all();
192 inner_flush_cache_all();
195 nvmap_stats_inc(NS_CFLUSH_RQ, total);
196 nvmap_stats_inc(NS_CFLUSH_DONE, thresh);
197 trace_nvmap_cache_flush(total,
198 nvmap_stats_read(NS_ALLOC),
199 nvmap_stats_read(NS_CFLUSH_RQ),
200 nvmap_stats_read(NS_CFLUSH_DONE));
202 for (i = 0; i < nr; i++) {
203 u32 size = sizes[i] ? sizes[i] : handles[i]->size;
204 u32 offset = sizes[i] ? offsets[i] : 0;
205 int err = __nvmap_do_cache_maint(handles[i]->owner,
217 void nvmap_zap_handle(struct nvmap_handle *handle, u32 offset, u32 size)
219 struct list_head *vmas;
220 struct nvmap_vma_list *vma_list;
221 struct vm_area_struct *vma;
223 if (!handle->heap_pgalloc)
226 /* if no dirty page is present, no need to zap */
227 if (nvmap_handle_track_dirty(handle) && !atomic_read(&handle->pgalloc.ndirty))
235 size = PAGE_ALIGN((offset & ~PAGE_MASK) + size);
237 mutex_lock(&handle->lock);
238 vmas = &handle->vmas;
239 list_for_each_entry(vma_list, vmas, list) {
240 struct nvmap_vma_priv *priv;
244 priv = vma->vm_private_data;
245 if ((offset + size) > (vma->vm_end - vma->vm_start))
246 vm_size = vma->vm_end - vma->vm_start - offset;
247 if (priv->offs || vma->vm_pgoff)
248 /* vma mapping starts in the middle of handle memory.
249 * zapping needs special care. zap entire range for now.
250 * FIXME: optimze zapping.
252 zap_page_range(vma, vma->vm_start,
253 vma->vm_end - vma->vm_start, NULL);
255 zap_page_range(vma, vma->vm_start + offset,
258 mutex_unlock(&handle->lock);
261 void nvmap_zap_handles(struct nvmap_handle **handles, u32 *offsets,
266 for (i = 0; i < nr; i++)
267 nvmap_zap_handle(handles[i], offsets[i], sizes[i]);
270 void nvmap_vm_insert_handle(struct nvmap_handle *handle, u32 offset, u32 size)
272 struct list_head *vmas;
273 struct nvmap_vma_list *vma_list;
274 struct vm_area_struct *vma;
276 if (!handle->heap_pgalloc)
284 mutex_lock(&handle->lock);
285 vmas = &handle->vmas;
286 list_for_each_entry(vma_list, vmas, list) {
287 struct nvmap_vma_priv *priv;
293 priv = vma->vm_private_data;
294 if ((offset + size) > (vma->vm_end - vma->vm_start))
295 vm_size = vma->vm_end - vma->vm_start - offset;
297 end = PAGE_ALIGN(offset + vm_size) >> PAGE_SHIFT;
298 offset >>= PAGE_SHIFT;
299 for (i = offset; i < end; i++) {
300 struct page *page = nvmap_to_page(handle->pgalloc.pages[i]);
304 down_write(&vma->vm_mm->mmap_sem);
305 pte = get_locked_pte(vma->vm_mm, vma->vm_start + (i << PAGE_SHIFT), &ptl);
307 pr_err("nvmap: %s get_locked_pte failed\n", __func__);
308 up_write(&vma->vm_mm->mmap_sem);
309 mutex_unlock(&handle->lock);
313 * page->_map_count gets incremented while mapping here. If _count is not
314 * incremented, zap code will see that page as a bad page and throws lot
317 atomic_inc(&page->_count);
318 do_set_pte(vma, vma->vm_start + (i << PAGE_SHIFT), page, pte, true, false);
319 pte_unmap_unlock(pte, ptl);
320 up_write(&vma->vm_mm->mmap_sem);
323 mutex_unlock(&handle->lock);
326 void nvmap_vm_insert_handles(struct nvmap_handle **handles, u32 *offsets,
331 for (i = 0; i < nr; i++) {
332 nvmap_vm_insert_handle(handles[i], offsets[i], sizes[i]);
333 nvmap_handle_mkdirty(handles[i], offsets[i],
334 sizes[i] ? sizes[i] : handles[i]->size);
338 int nvmap_reserve_pages(struct nvmap_handle **handles, u32 *offsets, u32 *sizes,
343 for (i = 0; i < nr; i++) {
344 u32 size = sizes[i] ? sizes[i] : handles[i]->size;
345 u32 offset = sizes[i] ? offsets[i] : 0;
347 if (op == NVMAP_PAGES_RESERVE)
348 nvmap_handle_mkreserved(handles[i], offset, size);
350 nvmap_handle_mkunreserved(handles[i],offset, size);
353 if (op == NVMAP_PAGES_RESERVE)
354 nvmap_zap_handles(handles, offsets, sizes, nr);
355 else if (op == NVMAP_INSERT_PAGES_ON_UNRESERVE)
356 nvmap_vm_insert_handles(handles, offsets, sizes, nr);
358 if (!(handles[0]->userflags & NVMAP_HANDLE_CACHE_SYNC_AT_RESERVE))
361 if (op == NVMAP_PAGES_RESERVE) {
362 nvmap_do_cache_maint_list(handles, offsets, sizes,
363 NVMAP_CACHE_OP_WB, nr);
364 for (i = 0; i < nr; i++)
365 nvmap_handle_mkclean(handles[i], offsets[i],
366 sizes[i] ? sizes[i] : handles[i]->size);
367 } else if ((op == NVMAP_PAGES_UNRESERVE) && handles[0]->heap_pgalloc) {
369 nvmap_do_cache_maint_list(handles, offsets, sizes,
370 NVMAP_CACHE_OP_WB_INV, nr);