]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/video/tegra/nvmap/nvmap_mm.c
942394c23f788a23ecc9989fcc2cb55fc31a71ca
[sojka/nv-tegra/linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_mm.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_mm.c
3  *
4  * Some MM related functionality specific to nvmap.
5  *
6  * Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <trace/events/nvmap.h>
24
25 #include "nvmap_priv.h"
26
27 void inner_flush_cache_all(void)
28 {
29 #if defined(CONFIG_ARM64) && defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU)
30         __flush_dcache_all(NULL);
31 #elif defined(CONFIG_ARM64)
32         on_each_cpu(__flush_dcache_all, NULL, 1);
33 #elif defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU)
34         v7_flush_kern_cache_all();
35 #else
36         on_each_cpu(v7_flush_kern_cache_all, NULL, 1);
37 #endif
38 }
39
40 void inner_clean_cache_all(void)
41 {
42 #if defined(CONFIG_ARM64) && \
43         defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU)
44         __clean_dcache_all(NULL);
45 #elif defined(CONFIG_ARM64)
46         on_each_cpu(__clean_dcache_all, NULL, 1);
47 #elif defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU)
48         v7_clean_kern_cache_all(NULL);
49 #else
50         on_each_cpu(v7_clean_kern_cache_all, NULL, 1);
51 #endif
52 }
53
54 /*
55  * FIXME:
56  *
57  *   __clean_dcache_page() is only available on ARM64 (well, we haven't
58  *   implemented it on ARMv7).
59  */
60 #ifdef ARM64
61 void nvmap_clean_cache(struct page **pages, int numpages)
62 {
63         int i;
64
65         /* Not technically a flush but that's what nvmap knows about. */
66         nvmap_stats_inc(NS_CFLUSH_DONE, numpages << PAGE_SHIFT);
67         trace_nvmap_cache_flush(numpages << PAGE_SHIFT,
68                 nvmap_stats_read(NS_ALLOC),
69                 nvmap_stats_read(NS_CFLUSH_RQ),
70                 nvmap_stats_read(NS_CFLUSH_DONE));
71
72         for (i = 0; i < numpages; i++)
73                 __clean_dcache_page(pages[i]);
74 }
75 #endif
76
77 void nvmap_flush_cache(struct page **pages, int numpages)
78 {
79         unsigned int i;
80         bool flush_inner = true;
81         __attribute__((unused)) unsigned long base;
82
83         nvmap_stats_inc(NS_CFLUSH_RQ, numpages << PAGE_SHIFT);
84 #if defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS)
85         if (numpages >= (cache_maint_inner_threshold >> PAGE_SHIFT)) {
86                 nvmap_stats_inc(NS_CFLUSH_DONE, cache_maint_inner_threshold);
87                 inner_flush_cache_all();
88                 flush_inner = false;
89         }
90 #endif
91         if (flush_inner)
92                 nvmap_stats_inc(NS_CFLUSH_DONE, numpages << PAGE_SHIFT);
93         trace_nvmap_cache_flush(numpages << PAGE_SHIFT,
94                 nvmap_stats_read(NS_ALLOC),
95                 nvmap_stats_read(NS_CFLUSH_RQ),
96                 nvmap_stats_read(NS_CFLUSH_DONE));
97
98         for (i = 0; i < numpages; i++) {
99                 struct page *page = nvmap_to_page(pages[i]);
100 #ifdef CONFIG_ARM64 //__flush_dcache_page flushes inner and outer on ARM64
101                 if (flush_inner)
102                         __flush_dcache_page(page);
103 #else
104                 if (flush_inner)
105                         __flush_dcache_page(page_mapping(page), page);
106
107                 base = page_to_phys(page);
108                 outer_flush_range(base, base + PAGE_SIZE);
109 #endif
110         }
111 }
112
113 /*
114  * Perform cache op on the list of memory regions within passed handles.
115  * A memory region within handle[i] is identified by offsets[i], sizes[i]
116  *
117  * sizes[i] == 0  is a special case which causes handle wide operation,
118  * this is done by replacing offsets[i] = 0, sizes[i] = handles[i]->size.
119  * So, the input arrays sizes, offsets  are not guaranteed to be read-only
120  *
121  * This will optimze the op if it can.
122  * In the case that all the handles together are larger than the inner cache
123  * maint threshold it is possible to just do an entire inner cache flush.
124  */
125 int nvmap_do_cache_maint_list(struct nvmap_handle **handles, u32 *offsets,
126                               u32 *sizes, int op, int nr)
127 {
128         int i;
129         u64 total = 0;
130
131         for (i = 0; i < nr; i++)
132                 total += sizes[i] ? sizes[i] : handles[i]->size;
133
134         /* Full flush in the case the passed list is bigger than our
135          * threshold. */
136         if (total >= cache_maint_inner_threshold) {
137                 for (i = 0; i < nr; i++) {
138                         if (handles[i]->userflags &
139                             NVMAP_HANDLE_CACHE_SYNC) {
140                                 nvmap_handle_mkclean(handles[i], 0,
141                                                      handles[i]->size);
142                                 nvmap_zap_handle(handles[i], 0,
143                                                  handles[i]->size);
144                         }
145                 }
146
147                 if (op == NVMAP_CACHE_OP_WB) {
148                         inner_clean_cache_all();
149                         outer_clean_all();
150                 } else {
151                         inner_flush_cache_all();
152                         outer_flush_all();
153                 }
154                 nvmap_stats_inc(NS_CFLUSH_RQ, total);
155                 nvmap_stats_inc(NS_CFLUSH_DONE, cache_maint_inner_threshold);
156                 trace_nvmap_cache_flush(total,
157                                         nvmap_stats_read(NS_ALLOC),
158                                         nvmap_stats_read(NS_CFLUSH_RQ),
159                                         nvmap_stats_read(NS_CFLUSH_DONE));
160         } else {
161                 for (i = 0; i < nr; i++) {
162                         u32 size = sizes[i] ? sizes[i] : handles[i]->size;
163                         u32 offset = sizes[i] ? offsets[i] : 0;
164                         int err = __nvmap_do_cache_maint(handles[i]->owner,
165                                                          handles[i], offset,
166                                                          offset + size,
167                                                          op, false);
168                         if (err)
169                                 return err;
170                 }
171         }
172
173         return 0;
174 }
175
176 void nvmap_zap_handle(struct nvmap_handle *handle, u32 offset, u32 size)
177 {
178         struct list_head *vmas;
179         struct nvmap_vma_list *vma_list;
180         struct vm_area_struct *vma;
181
182         if (!handle->heap_pgalloc)
183                 return;
184
185         if (!size) {
186                 offset = 0;
187                 size = handle->size;
188         }
189
190         size = PAGE_ALIGN((offset & ~PAGE_MASK) + size);
191
192         mutex_lock(&handle->lock);
193         vmas = &handle->vmas;
194         list_for_each_entry(vma_list, vmas, list) {
195                 struct nvmap_vma_priv *priv;
196                 u32 vm_size = size;
197
198                 vma = vma_list->vma;
199                 priv = vma->vm_private_data;
200                 if ((offset + size) > (vma->vm_end - vma->vm_start))
201                         vm_size = vma->vm_end - vma->vm_start - offset;
202                 if (priv->offs || vma->vm_pgoff)
203                         /* vma mapping starts in the middle of handle memory.
204                          * zapping needs special care. zap entire range for now.
205                          * FIXME: optimze zapping.
206                          */
207                         zap_page_range(vma, vma->vm_start,
208                                 vma->vm_end - vma->vm_start, NULL);
209                 else
210                         zap_page_range(vma, vma->vm_start + offset,
211                                 vm_size, NULL);
212         }
213         mutex_unlock(&handle->lock);
214 }
215
216 void nvmap_zap_handles(struct nvmap_handle **handles, u32 *offsets,
217                        u32 *sizes, u32 nr)
218 {
219         int i;
220
221         for (i = 0; i < nr; i++)
222                 nvmap_zap_handle(handles[i], offsets[i], sizes[i]);
223 }
224
225 int nvmap_reserve_pages(struct nvmap_handle **handles, u32 *offsets, u32 *sizes,
226                         u32 nr, u32 op)
227 {
228         int i;
229
230         for (i = 0; i < nr; i++) {
231                 u32 size = sizes[i] ? sizes[i] : handles[i]->size;
232                 u32 offset = sizes[i] ? offsets[i] : 0;
233
234                 if (op == NVMAP_PAGES_RESERVE)
235                         nvmap_handle_mkreserved(handles[i], offset, size);
236                 else
237                         nvmap_handle_mkunreserved(handles[i],offset, size);
238         }
239
240         if (op == NVMAP_PAGES_RESERVE)
241                 nvmap_zap_handles(handles, offsets, sizes, nr);
242         return 0;
243 }
244