]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/video/tegra/nvmap/nvmap_mm.c
d7e8ceae2fae04fa1b6b7fd0c8423e00a53e752d
[sojka/nv-tegra/linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_mm.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_mm.c
3  *
4  * Some MM related functionality specific to nvmap.
5  *
6  * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <trace/events/nvmap.h>
24
25 #include "nvmap_priv.h"
26
27 inline static void nvmap_flush_dcache_all(void *dummy)
28 {
29 #if defined(CONFIG_DENVER_CPU)
30         u64 id_afr0;
31         asm volatile ("mrs %0, ID_AFR0_EL1" : "=r"(id_afr0));
32         if (likely((id_afr0 & 0xf00) == 0x100)) {
33                 asm volatile ("msr s3_0_c15_c13_0, %0" : : "r" (0));
34                 asm volatile ("dsb sy");
35         } else {
36                 __flush_dcache_all(NULL);
37         }
38 #else
39         __flush_dcache_all(NULL);
40 #endif
41 }
42
43 void inner_flush_cache_all(void)
44 {
45 #if defined(CONFIG_ARM64) && defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU)
46         nvmap_flush_dcache_all(NULL);
47 #elif defined(CONFIG_ARM64)
48         on_each_cpu(nvmap_flush_dcache_all, NULL, 1);
49 #elif defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU)
50         v7_flush_kern_cache_all();
51 #else
52         on_each_cpu(v7_flush_kern_cache_all, NULL, 1);
53 #endif
54 }
55
56 extern void __clean_dcache_louis(void *);
57 extern void v7_clean_kern_cache_louis(void *);
58 void inner_clean_cache_all(void)
59 {
60 #if defined(CONFIG_ARM64) && \
61         defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU)
62         on_each_cpu(__clean_dcache_louis, NULL, 1);
63         __clean_dcache_all(NULL);
64 #elif defined(CONFIG_ARM64)
65         on_each_cpu(__clean_dcache_all, NULL, 1);
66 #elif defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU)
67         on_each_cpu(v7_clean_kern_cache_louis, NULL, 1);
68         v7_clean_kern_cache_all(NULL);
69 #else
70         on_each_cpu(v7_clean_kern_cache_all, NULL, 1);
71 #endif
72 }
73
74 /*
75  * FIXME:
76  *
77  *   __clean_dcache_page() is only available on ARM64 (well, we haven't
78  *   implemented it on ARMv7).
79  */
80 #if defined(CONFIG_ARM64)
81 void nvmap_clean_cache(struct page **pages, int numpages)
82 {
83         int i;
84
85         /* Not technically a flush but that's what nvmap knows about. */
86         nvmap_stats_inc(NS_CFLUSH_DONE, numpages << PAGE_SHIFT);
87         trace_nvmap_cache_flush(numpages << PAGE_SHIFT,
88                 nvmap_stats_read(NS_ALLOC),
89                 nvmap_stats_read(NS_CFLUSH_RQ),
90                 nvmap_stats_read(NS_CFLUSH_DONE));
91
92         for (i = 0; i < numpages; i++)
93                 __clean_dcache_page(pages[i]);
94 }
95 #endif
96
97 void nvmap_clean_cache_page(struct page *page)
98 {
99 #if defined(CONFIG_ARM64)
100         __clean_dcache_page(page);
101 #else
102         __flush_dcache_page(page_mapping(page), page);
103 #endif
104 }
105
106 void nvmap_flush_cache(struct page **pages, int numpages)
107 {
108         unsigned int i;
109         bool flush_inner = true;
110         __attribute__((unused)) unsigned long base;
111
112         nvmap_stats_inc(NS_CFLUSH_RQ, numpages << PAGE_SHIFT);
113 #if defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS)
114         if (numpages >= (cache_maint_inner_threshold >> PAGE_SHIFT)) {
115                 nvmap_stats_inc(NS_CFLUSH_DONE, cache_maint_inner_threshold);
116                 inner_flush_cache_all();
117                 flush_inner = false;
118         }
119 #endif
120         if (flush_inner)
121                 nvmap_stats_inc(NS_CFLUSH_DONE, numpages << PAGE_SHIFT);
122         trace_nvmap_cache_flush(numpages << PAGE_SHIFT,
123                 nvmap_stats_read(NS_ALLOC),
124                 nvmap_stats_read(NS_CFLUSH_RQ),
125                 nvmap_stats_read(NS_CFLUSH_DONE));
126
127         for (i = 0; i < numpages; i++) {
128                 struct page *page = nvmap_to_page(pages[i]);
129 #ifdef CONFIG_ARM64 //__flush_dcache_page flushes inner and outer on ARM64
130                 if (flush_inner)
131                         __flush_dcache_page(page);
132 #else
133                 if (flush_inner)
134                         __flush_dcache_page(page_mapping(page), page);
135
136                 base = page_to_phys(page);
137                 outer_flush_range(base, base + PAGE_SIZE);
138 #endif
139         }
140 }
141
142 /*
143  * Perform cache op on the list of memory regions within passed handles.
144  * A memory region within handle[i] is identified by offsets[i], sizes[i]
145  *
146  * sizes[i] == 0  is a special case which causes handle wide operation,
147  * this is done by replacing offsets[i] = 0, sizes[i] = handles[i]->size.
148  * So, the input arrays sizes, offsets  are not guaranteed to be read-only
149  *
150  * This will optimze the op if it can.
151  * In the case that all the handles together are larger than the inner cache
152  * maint threshold it is possible to just do an entire inner cache flush.
153  */
154 int nvmap_do_cache_maint_list(struct nvmap_handle **handles, u32 *offsets,
155                               u32 *sizes, int op, int nr)
156 {
157         int i;
158         u64 total = 0;
159         u64 thresh = ~0;
160
161 #if defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS)
162         thresh = cache_maint_inner_threshold;
163 #endif
164
165         for (i = 0; i < nr; i++) {
166                 if ((op == NVMAP_CACHE_OP_WB) && nvmap_handle_track_dirty(handles[i]))
167                         total += atomic_read(&handles[i]->pgalloc.ndirty);
168                 else
169                         total += sizes[i] ? sizes[i] : handles[i]->size;
170         }
171
172         if (!total)
173                 return 0;
174
175         /* Full flush in the case the passed list is bigger than our
176          * threshold. */
177         if (total >= thresh) {
178                 for (i = 0; i < nr; i++) {
179                         if (handles[i]->userflags &
180                             NVMAP_HANDLE_CACHE_SYNC) {
181                                 nvmap_handle_mkclean(handles[i], 0,
182                                                      handles[i]->size);
183                                 nvmap_zap_handle(handles[i], 0,
184                                                  handles[i]->size);
185                         }
186                 }
187
188                 if (op == NVMAP_CACHE_OP_WB) {
189                         inner_clean_cache_all();
190                         outer_clean_all();
191                 } else {
192                         inner_flush_cache_all();
193                         outer_flush_all();
194                 }
195                 nvmap_stats_inc(NS_CFLUSH_RQ, total);
196                 nvmap_stats_inc(NS_CFLUSH_DONE, thresh);
197                 trace_nvmap_cache_flush(total,
198                                         nvmap_stats_read(NS_ALLOC),
199                                         nvmap_stats_read(NS_CFLUSH_RQ),
200                                         nvmap_stats_read(NS_CFLUSH_DONE));
201         } else {
202                 for (i = 0; i < nr; i++) {
203                         u32 size = sizes[i] ? sizes[i] : handles[i]->size;
204                         u32 offset = sizes[i] ? offsets[i] : 0;
205                         int err = __nvmap_do_cache_maint(handles[i]->owner,
206                                                          handles[i], offset,
207                                                          offset + size,
208                                                          op, false);
209                         if (err)
210                                 return err;
211                 }
212         }
213
214         return 0;
215 }
216
217 void nvmap_zap_handle(struct nvmap_handle *handle, u32 offset, u32 size)
218 {
219         struct list_head *vmas;
220         struct nvmap_vma_list *vma_list;
221         struct vm_area_struct *vma;
222
223         if (!handle->heap_pgalloc)
224                 return;
225
226         /* if no dirty page is present, no need to zap */
227         if (nvmap_handle_track_dirty(handle) && !atomic_read(&handle->pgalloc.ndirty))
228                 return;
229
230         if (!size) {
231                 offset = 0;
232                 size = handle->size;
233         }
234
235         size = PAGE_ALIGN((offset & ~PAGE_MASK) + size);
236
237         mutex_lock(&handle->lock);
238         vmas = &handle->vmas;
239         list_for_each_entry(vma_list, vmas, list) {
240                 struct nvmap_vma_priv *priv;
241                 u32 vm_size = size;
242
243                 vma = vma_list->vma;
244                 priv = vma->vm_private_data;
245                 if ((offset + size) > (vma->vm_end - vma->vm_start))
246                         vm_size = vma->vm_end - vma->vm_start - offset;
247                 if (priv->offs || vma->vm_pgoff)
248                         /* vma mapping starts in the middle of handle memory.
249                          * zapping needs special care. zap entire range for now.
250                          * FIXME: optimze zapping.
251                          */
252                         zap_page_range(vma, vma->vm_start,
253                                 vma->vm_end - vma->vm_start, NULL);
254                 else
255                         zap_page_range(vma, vma->vm_start + offset,
256                                 vm_size, NULL);
257         }
258         mutex_unlock(&handle->lock);
259 }
260
261 void nvmap_zap_handles(struct nvmap_handle **handles, u32 *offsets,
262                        u32 *sizes, u32 nr)
263 {
264         int i;
265
266         for (i = 0; i < nr; i++)
267                 nvmap_zap_handle(handles[i], offsets[i], sizes[i]);
268 }
269
270 int nvmap_reserve_pages(struct nvmap_handle **handles, u32 *offsets, u32 *sizes,
271                         u32 nr, u32 op)
272 {
273         int i;
274
275         for (i = 0; i < nr; i++) {
276                 u32 size = sizes[i] ? sizes[i] : handles[i]->size;
277                 u32 offset = sizes[i] ? offsets[i] : 0;
278
279                 if (op == NVMAP_PAGES_RESERVE)
280                         nvmap_handle_mkreserved(handles[i], offset, size);
281                 else
282                         nvmap_handle_mkunreserved(handles[i],offset, size);
283         }
284
285         if (op == NVMAP_PAGES_RESERVE)
286                 nvmap_zap_handles(handles, offsets, sizes, nr);
287
288         if (!(handles[0]->userflags & NVMAP_HANDLE_CACHE_SYNC_AT_RESERVE))
289                         return 0;
290
291         if (op == NVMAP_PAGES_RESERVE) {
292                 nvmap_do_cache_maint_list(handles, offsets, sizes,
293                                           NVMAP_CACHE_OP_WB, nr);
294                 for (i = 0; i < nr; i++)
295                         nvmap_handle_mkclean(handles[i], offsets[i],
296                                              sizes[i] ? sizes[i] : handles[i]->size);
297         } else if (!handles[0]->heap_pgalloc) {
298                 nvmap_do_cache_maint_list(handles, offsets, sizes,
299                                           NVMAP_CACHE_OP_WB_INV, nr);
300         }
301         return 0;
302 }
303