2 * drivers/video/tegra/nvmap/nvmap_mm.c
4 * Some MM related functionality specific to nvmap.
6 * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
23 #include <trace/events/nvmap.h>
25 #include "nvmap_priv.h"
27 void inner_flush_cache_all(void)
29 #if defined(CONFIG_ARM64) && defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU)
30 __flush_dcache_all(NULL);
31 #elif defined(CONFIG_ARM64)
32 on_each_cpu(__flush_dcache_all, NULL, 1);
33 #elif defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU)
34 v7_flush_kern_cache_all();
36 on_each_cpu(v7_flush_kern_cache_all, NULL, 1);
40 void inner_clean_cache_all(void)
42 #if defined(CONFIG_ARM64) && \
43 defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU)
44 __clean_dcache_all(NULL);
45 #elif defined(CONFIG_ARM64)
46 on_each_cpu(__clean_dcache_all, NULL, 1);
47 #elif defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU)
48 v7_clean_kern_cache_all(NULL);
50 on_each_cpu(v7_clean_kern_cache_all, NULL, 1);
57 * __clean_dcache_page() is only available on ARM64 (well, we haven't
58 * implemented it on ARMv7).
61 void nvmap_clean_cache(struct page **pages, int numpages)
65 /* Not technically a flush but that's what nvmap knows about. */
66 nvmap_stats_inc(NS_CFLUSH_DONE, numpages << PAGE_SHIFT);
67 trace_nvmap_cache_flush(numpages << PAGE_SHIFT,
68 nvmap_stats_read(NS_ALLOC),
69 nvmap_stats_read(NS_CFLUSH_RQ),
70 nvmap_stats_read(NS_CFLUSH_DONE));
72 for (i = 0; i < numpages; i++)
73 __clean_dcache_page(pages[i]);
77 void nvmap_flush_cache(struct page **pages, int numpages)
80 bool flush_inner = true;
81 __attribute__((unused)) unsigned long base;
83 nvmap_stats_inc(NS_CFLUSH_RQ, numpages << PAGE_SHIFT);
84 #if defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS)
85 if (numpages >= (cache_maint_inner_threshold >> PAGE_SHIFT)) {
86 nvmap_stats_inc(NS_CFLUSH_DONE, cache_maint_inner_threshold);
87 inner_flush_cache_all();
92 nvmap_stats_inc(NS_CFLUSH_DONE, numpages << PAGE_SHIFT);
93 trace_nvmap_cache_flush(numpages << PAGE_SHIFT,
94 nvmap_stats_read(NS_ALLOC),
95 nvmap_stats_read(NS_CFLUSH_RQ),
96 nvmap_stats_read(NS_CFLUSH_DONE));
98 for (i = 0; i < numpages; i++) {
99 struct page *page = nvmap_to_page(pages[i]);
100 #ifdef CONFIG_ARM64 //__flush_dcache_page flushes inner and outer on ARM64
102 __flush_dcache_page(page);
105 __flush_dcache_page(page_mapping(page), page);
107 base = page_to_phys(page);
108 outer_flush_range(base, base + PAGE_SIZE);
114 * Perform cache op on the list of memory regions within passed handles.
115 * A memory region within handle[i] is identified by offsets[i], sizes[i]
117 * sizes[i] == 0 is a special case which causes handle wide operation,
118 * this is done by replacing offsets[i] = 0, sizes[i] = handles[i]->size.
119 * So, the input arrays sizes, offsets are not guaranteed to be read-only
121 * This will optimze the op if it can.
122 * In the case that all the handles together are larger than the inner cache
123 * maint threshold it is possible to just do an entire inner cache flush.
125 int nvmap_do_cache_maint_list(struct nvmap_handle **handles, u32 *offsets,
126 u32 *sizes, int op, int nr)
131 for (i = 0; i < nr; i++)
132 total += sizes[i] ? sizes[i] : handles[i]->size;
134 /* Full flush in the case the passed list is bigger than our
136 if (total >= cache_maint_inner_threshold) {
137 for (i = 0; i < nr; i++) {
138 if (handles[i]->userflags &
139 NVMAP_HANDLE_CACHE_SYNC) {
140 nvmap_handle_mkclean(handles[i], 0,
142 nvmap_zap_handle(handles[i], 0,
147 if (op == NVMAP_CACHE_OP_WB) {
148 inner_clean_cache_all();
151 inner_flush_cache_all();
154 nvmap_stats_inc(NS_CFLUSH_RQ, total);
155 nvmap_stats_inc(NS_CFLUSH_DONE, cache_maint_inner_threshold);
156 trace_nvmap_cache_flush(total,
157 nvmap_stats_read(NS_ALLOC),
158 nvmap_stats_read(NS_CFLUSH_RQ),
159 nvmap_stats_read(NS_CFLUSH_DONE));
161 for (i = 0; i < nr; i++) {
162 u32 size = sizes[i] ? sizes[i] : handles[i]->size;
163 u32 offset = sizes[i] ? offsets[i] : 0;
164 int err = __nvmap_do_cache_maint(handles[i]->owner,
176 void nvmap_zap_handle(struct nvmap_handle *handle, u32 offset, u32 size)
178 struct list_head *vmas;
179 struct nvmap_vma_list *vma_list;
180 struct vm_area_struct *vma;
182 if (!handle->heap_pgalloc)
190 size = PAGE_ALIGN((offset & ~PAGE_MASK) + size);
192 mutex_lock(&handle->lock);
193 vmas = &handle->vmas;
194 list_for_each_entry(vma_list, vmas, list) {
195 struct nvmap_vma_priv *priv;
199 priv = vma->vm_private_data;
200 if ((offset + size) > (vma->vm_end - vma->vm_start))
201 vm_size = vma->vm_end - vma->vm_start - offset;
202 if (priv->offs || vma->vm_pgoff)
203 /* vma mapping starts in the middle of handle memory.
204 * zapping needs special care. zap entire range for now.
205 * FIXME: optimze zapping.
207 zap_page_range(vma, vma->vm_start,
208 vma->vm_end - vma->vm_start, NULL);
210 zap_page_range(vma, vma->vm_start + offset,
213 mutex_unlock(&handle->lock);
216 void nvmap_zap_handles(struct nvmap_handle **handles, u32 *offsets,
221 for (i = 0; i < nr; i++)
222 nvmap_zap_handle(handles[i], offsets[i], sizes[i]);
225 int nvmap_reserve_pages(struct nvmap_handle **handles, u32 *offsets, u32 *sizes,
230 for (i = 0; i < nr; i++) {
231 u32 size = sizes[i] ? sizes[i] : handles[i]->size;
232 u32 offset = sizes[i] ? offsets[i] : 0;
234 if (op == NVMAP_PAGES_RESERVE)
235 nvmap_handle_mkreserved(handles[i], offset, size);
237 nvmap_handle_mkunreserved(handles[i],offset, size);
240 if (op == NVMAP_PAGES_RESERVE)
241 nvmap_zap_handles(handles, offsets, sizes, nr);