]> rtime.felk.cvut.cz Git - linux-imx.git/blob - drivers/gpu/drm/qxl/qxl_object.c
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target...
[linux-imx.git] / drivers / gpu / drm / qxl / qxl_object.c
1 /*
2  * Copyright 2013 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Dave Airlie
23  *          Alon Levy
24  */
25
26 #include "qxl_drv.h"
27 #include "qxl_object.h"
28
29 #include <linux/io-mapping.h>
30 static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
31 {
32         struct qxl_bo *bo;
33         struct qxl_device *qdev;
34
35         bo = container_of(tbo, struct qxl_bo, tbo);
36         qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
37
38         qxl_surface_evict(qdev, bo, false);
39         qxl_fence_fini(&bo->fence);
40         mutex_lock(&qdev->gem.mutex);
41         list_del_init(&bo->list);
42         mutex_unlock(&qdev->gem.mutex);
43         drm_gem_object_release(&bo->gem_base);
44         kfree(bo);
45 }
46
47 bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
48 {
49         if (bo->destroy == &qxl_ttm_bo_destroy)
50                 return true;
51         return false;
52 }
53
54 void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
55 {
56         u32 c = 0;
57
58         qbo->placement.fpfn = 0;
59         qbo->placement.lpfn = 0;
60         qbo->placement.placement = qbo->placements;
61         qbo->placement.busy_placement = qbo->placements;
62         if (domain == QXL_GEM_DOMAIN_VRAM)
63                 qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM;
64         if (domain == QXL_GEM_DOMAIN_SURFACE)
65                 qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0;
66         if (domain == QXL_GEM_DOMAIN_CPU)
67                 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
68         if (!c)
69                 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
70         qbo->placement.num_placement = c;
71         qbo->placement.num_busy_placement = c;
72 }
73
74
75 int qxl_bo_create(struct qxl_device *qdev,
76                   unsigned long size, bool kernel, u32 domain,
77                   struct qxl_surface *surf,
78                   struct qxl_bo **bo_ptr)
79 {
80         struct qxl_bo *bo;
81         enum ttm_bo_type type;
82         int r;
83
84         if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
85                 qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
86         if (kernel)
87                 type = ttm_bo_type_kernel;
88         else
89                 type = ttm_bo_type_device;
90         *bo_ptr = NULL;
91         bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
92         if (bo == NULL)
93                 return -ENOMEM;
94         size = roundup(size, PAGE_SIZE);
95         r = drm_gem_object_init(qdev->ddev, &bo->gem_base, size);
96         if (unlikely(r)) {
97                 kfree(bo);
98                 return r;
99         }
100         bo->gem_base.driver_private = NULL;
101         bo->type = domain;
102         bo->pin_count = 0;
103         bo->surface_id = 0;
104         qxl_fence_init(qdev, &bo->fence);
105         INIT_LIST_HEAD(&bo->list);
106         atomic_set(&bo->reserve_count, 0);
107         if (surf)
108                 bo->surf = *surf;
109
110         qxl_ttm_placement_from_domain(bo, domain);
111
112         r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
113                         &bo->placement, 0, !kernel, NULL, size,
114                         NULL, &qxl_ttm_bo_destroy);
115         if (unlikely(r != 0)) {
116                 if (r != -ERESTARTSYS)
117                         dev_err(qdev->dev,
118                                 "object_init failed for (%lu, 0x%08X)\n",
119                                 size, domain);
120                 return r;
121         }
122         *bo_ptr = bo;
123         return 0;
124 }
125
126 int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
127 {
128         bool is_iomem;
129         int r;
130
131         if (bo->kptr) {
132                 if (ptr)
133                         *ptr = bo->kptr;
134                 return 0;
135         }
136         r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
137         if (r)
138                 return r;
139         bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
140         if (ptr)
141                 *ptr = bo->kptr;
142         return 0;
143 }
144
145 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
146                               struct qxl_bo *bo, int page_offset)
147 {
148         struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
149         void *rptr;
150         int ret;
151         struct io_mapping *map;
152
153         if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
154                 map = qdev->vram_mapping;
155         else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
156                 map = qdev->surface_mapping;
157         else
158                 goto fallback;
159
160         (void) ttm_mem_io_lock(man, false);
161         ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
162         ttm_mem_io_unlock(man);
163
164         return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
165 fallback:
166         if (bo->kptr) {
167                 rptr = bo->kptr + (page_offset * PAGE_SIZE);
168                 return rptr;
169         }
170
171         ret = qxl_bo_kmap(bo, &rptr);
172         if (ret)
173                 return NULL;
174
175         rptr += page_offset * PAGE_SIZE;
176         return rptr;
177 }
178
179 void qxl_bo_kunmap(struct qxl_bo *bo)
180 {
181         if (bo->kptr == NULL)
182                 return;
183         bo->kptr = NULL;
184         ttm_bo_kunmap(&bo->kmap);
185 }
186
187 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
188                                struct qxl_bo *bo, void *pmap)
189 {
190         struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
191         struct io_mapping *map;
192
193         if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
194                 map = qdev->vram_mapping;
195         else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
196                 map = qdev->surface_mapping;
197         else
198                 goto fallback;
199
200         io_mapping_unmap_atomic(pmap);
201
202         (void) ttm_mem_io_lock(man, false);
203         ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
204         ttm_mem_io_unlock(man);
205         return ;
206  fallback:
207         qxl_bo_kunmap(bo);
208 }
209
210 void qxl_bo_unref(struct qxl_bo **bo)
211 {
212         struct ttm_buffer_object *tbo;
213
214         if ((*bo) == NULL)
215                 return;
216         tbo = &((*bo)->tbo);
217         ttm_bo_unref(&tbo);
218         if (tbo == NULL)
219                 *bo = NULL;
220 }
221
222 struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
223 {
224         ttm_bo_reference(&bo->tbo);
225         return bo;
226 }
227
228 int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
229 {
230         struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
231         int r, i;
232
233         if (bo->pin_count) {
234                 bo->pin_count++;
235                 if (gpu_addr)
236                         *gpu_addr = qxl_bo_gpu_offset(bo);
237                 return 0;
238         }
239         qxl_ttm_placement_from_domain(bo, domain);
240         for (i = 0; i < bo->placement.num_placement; i++)
241                 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
242         r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
243         if (likely(r == 0)) {
244                 bo->pin_count = 1;
245                 if (gpu_addr != NULL)
246                         *gpu_addr = qxl_bo_gpu_offset(bo);
247         }
248         if (unlikely(r != 0))
249                 dev_err(qdev->dev, "%p pin failed\n", bo);
250         return r;
251 }
252
253 int qxl_bo_unpin(struct qxl_bo *bo)
254 {
255         struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
256         int r, i;
257
258         if (!bo->pin_count) {
259                 dev_warn(qdev->dev, "%p unpin not necessary\n", bo);
260                 return 0;
261         }
262         bo->pin_count--;
263         if (bo->pin_count)
264                 return 0;
265         for (i = 0; i < bo->placement.num_placement; i++)
266                 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
267         r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
268         if (unlikely(r != 0))
269                 dev_err(qdev->dev, "%p validate failed for unpin\n", bo);
270         return r;
271 }
272
273 void qxl_bo_force_delete(struct qxl_device *qdev)
274 {
275         struct qxl_bo *bo, *n;
276
277         if (list_empty(&qdev->gem.objects))
278                 return;
279         dev_err(qdev->dev, "Userspace still has active objects !\n");
280         list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
281                 mutex_lock(&qdev->ddev->struct_mutex);
282                 dev_err(qdev->dev, "%p %p %lu %lu force free\n",
283                         &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
284                         *((unsigned long *)&bo->gem_base.refcount));
285                 mutex_lock(&qdev->gem.mutex);
286                 list_del_init(&bo->list);
287                 mutex_unlock(&qdev->gem.mutex);
288                 /* this should unref the ttm bo */
289                 drm_gem_object_unreference(&bo->gem_base);
290                 mutex_unlock(&qdev->ddev->struct_mutex);
291         }
292 }
293
294 int qxl_bo_init(struct qxl_device *qdev)
295 {
296         return qxl_ttm_init(qdev);
297 }
298
299 void qxl_bo_fini(struct qxl_device *qdev)
300 {
301         qxl_ttm_fini(qdev);
302 }
303
304 int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
305 {
306         int ret;
307         if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
308                 /* allocate a surface id for this surface now */
309                 ret = qxl_surface_id_alloc(qdev, bo);
310                 if (ret)
311                         return ret;
312
313                 ret = qxl_hw_surface_alloc(qdev, bo, NULL);
314                 if (ret)
315                         return ret;
316         }
317         return 0;
318 }
319
320 void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed)
321 {
322         struct qxl_bo_list *entry, *sf;
323
324         list_for_each_entry_safe(entry, sf, &reloc_list->bos, lhead) {
325                 qxl_bo_unreserve(entry->bo);
326                 list_del(&entry->lhead);
327                 kfree(entry);
328         }
329 }
330
331 int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo)
332 {
333         struct qxl_bo_list *entry;
334         int ret;
335
336         list_for_each_entry(entry, &reloc_list->bos, lhead) {
337                 if (entry->bo == bo)
338                         return 0;
339         }
340
341         entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
342         if (!entry)
343                 return -ENOMEM;
344
345         entry->bo = bo;
346         list_add(&entry->lhead, &reloc_list->bos);
347
348         ret = qxl_bo_reserve(bo, false);
349         if (ret)
350                 return ret;
351
352         if (!bo->pin_count) {
353                 qxl_ttm_placement_from_domain(bo, bo->type);
354                 ret = ttm_bo_validate(&bo->tbo, &bo->placement,
355                                       true, false);
356                 if (ret)
357                         return ret;
358         }
359
360         /* allocate a surface for reserved + validated buffers */
361         ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
362         if (ret)
363                 return ret;
364         return 0;
365 }
366
367 int qxl_surf_evict(struct qxl_device *qdev)
368 {
369         return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
370 }
371
372 int qxl_vram_evict(struct qxl_device *qdev)
373 {
374         return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM);
375 }