]> rtime.felk.cvut.cz Git - linux-imx.git/blob - drivers/gpu/drm/qxl/qxl_object.c
Merge tag 'fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty...
[linux-imx.git] / drivers / gpu / drm / qxl / qxl_object.c
1 /*
2  * Copyright 2013 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Dave Airlie
23  *          Alon Levy
24  */
25
26 #include "qxl_drv.h"
27 #include "qxl_object.h"
28
29 #include <linux/io-mapping.h>
30 static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
31 {
32         struct qxl_bo *bo;
33         struct qxl_device *qdev;
34
35         bo = container_of(tbo, struct qxl_bo, tbo);
36         qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
37
38         qxl_surface_evict(qdev, bo, false);
39         qxl_fence_fini(&bo->fence);
40         mutex_lock(&qdev->gem.mutex);
41         list_del_init(&bo->list);
42         mutex_unlock(&qdev->gem.mutex);
43         drm_gem_object_release(&bo->gem_base);
44         kfree(bo);
45 }
46
47 bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
48 {
49         if (bo->destroy == &qxl_ttm_bo_destroy)
50                 return true;
51         return false;
52 }
53
54 void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
55 {
56         u32 c = 0;
57         u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
58
59         qbo->placement.fpfn = 0;
60         qbo->placement.lpfn = 0;
61         qbo->placement.placement = qbo->placements;
62         qbo->placement.busy_placement = qbo->placements;
63         if (domain == QXL_GEM_DOMAIN_VRAM)
64                 qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
65         if (domain == QXL_GEM_DOMAIN_SURFACE)
66                 qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag;
67         if (domain == QXL_GEM_DOMAIN_CPU)
68                 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
69         if (!c)
70                 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
71         qbo->placement.num_placement = c;
72         qbo->placement.num_busy_placement = c;
73 }
74
75
76 int qxl_bo_create(struct qxl_device *qdev,
77                   unsigned long size, bool kernel, bool pinned, u32 domain,
78                   struct qxl_surface *surf,
79                   struct qxl_bo **bo_ptr)
80 {
81         struct qxl_bo *bo;
82         enum ttm_bo_type type;
83         int r;
84
85         if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
86                 qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
87         if (kernel)
88                 type = ttm_bo_type_kernel;
89         else
90                 type = ttm_bo_type_device;
91         *bo_ptr = NULL;
92         bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
93         if (bo == NULL)
94                 return -ENOMEM;
95         size = roundup(size, PAGE_SIZE);
96         r = drm_gem_object_init(qdev->ddev, &bo->gem_base, size);
97         if (unlikely(r)) {
98                 kfree(bo);
99                 return r;
100         }
101         bo->gem_base.driver_private = NULL;
102         bo->type = domain;
103         bo->pin_count = pinned ? 1 : 0;
104         bo->surface_id = 0;
105         qxl_fence_init(qdev, &bo->fence);
106         INIT_LIST_HEAD(&bo->list);
107
108         if (surf)
109                 bo->surf = *surf;
110
111         qxl_ttm_placement_from_domain(bo, domain, pinned);
112
113         r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
114                         &bo->placement, 0, !kernel, NULL, size,
115                         NULL, &qxl_ttm_bo_destroy);
116         if (unlikely(r != 0)) {
117                 if (r != -ERESTARTSYS)
118                         dev_err(qdev->dev,
119                                 "object_init failed for (%lu, 0x%08X)\n",
120                                 size, domain);
121                 return r;
122         }
123         *bo_ptr = bo;
124         return 0;
125 }
126
127 int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
128 {
129         bool is_iomem;
130         int r;
131
132         if (bo->kptr) {
133                 if (ptr)
134                         *ptr = bo->kptr;
135                 return 0;
136         }
137         r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
138         if (r)
139                 return r;
140         bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
141         if (ptr)
142                 *ptr = bo->kptr;
143         return 0;
144 }
145
146 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
147                               struct qxl_bo *bo, int page_offset)
148 {
149         struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
150         void *rptr;
151         int ret;
152         struct io_mapping *map;
153
154         if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
155                 map = qdev->vram_mapping;
156         else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
157                 map = qdev->surface_mapping;
158         else
159                 goto fallback;
160
161         (void) ttm_mem_io_lock(man, false);
162         ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
163         ttm_mem_io_unlock(man);
164
165         return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
166 fallback:
167         if (bo->kptr) {
168                 rptr = bo->kptr + (page_offset * PAGE_SIZE);
169                 return rptr;
170         }
171
172         ret = qxl_bo_kmap(bo, &rptr);
173         if (ret)
174                 return NULL;
175
176         rptr += page_offset * PAGE_SIZE;
177         return rptr;
178 }
179
180 void qxl_bo_kunmap(struct qxl_bo *bo)
181 {
182         if (bo->kptr == NULL)
183                 return;
184         bo->kptr = NULL;
185         ttm_bo_kunmap(&bo->kmap);
186 }
187
188 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
189                                struct qxl_bo *bo, void *pmap)
190 {
191         struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
192         struct io_mapping *map;
193
194         if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
195                 map = qdev->vram_mapping;
196         else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
197                 map = qdev->surface_mapping;
198         else
199                 goto fallback;
200
201         io_mapping_unmap_atomic(pmap);
202
203         (void) ttm_mem_io_lock(man, false);
204         ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
205         ttm_mem_io_unlock(man);
206         return ;
207  fallback:
208         qxl_bo_kunmap(bo);
209 }
210
211 void qxl_bo_unref(struct qxl_bo **bo)
212 {
213         struct ttm_buffer_object *tbo;
214
215         if ((*bo) == NULL)
216                 return;
217         tbo = &((*bo)->tbo);
218         ttm_bo_unref(&tbo);
219         if (tbo == NULL)
220                 *bo = NULL;
221 }
222
223 struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
224 {
225         ttm_bo_reference(&bo->tbo);
226         return bo;
227 }
228
229 int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
230 {
231         struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
232         int r;
233
234         if (bo->pin_count) {
235                 bo->pin_count++;
236                 if (gpu_addr)
237                         *gpu_addr = qxl_bo_gpu_offset(bo);
238                 return 0;
239         }
240         qxl_ttm_placement_from_domain(bo, domain, true);
241         r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
242         if (likely(r == 0)) {
243                 bo->pin_count = 1;
244                 if (gpu_addr != NULL)
245                         *gpu_addr = qxl_bo_gpu_offset(bo);
246         }
247         if (unlikely(r != 0))
248                 dev_err(qdev->dev, "%p pin failed\n", bo);
249         return r;
250 }
251
252 int qxl_bo_unpin(struct qxl_bo *bo)
253 {
254         struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
255         int r, i;
256
257         if (!bo->pin_count) {
258                 dev_warn(qdev->dev, "%p unpin not necessary\n", bo);
259                 return 0;
260         }
261         bo->pin_count--;
262         if (bo->pin_count)
263                 return 0;
264         for (i = 0; i < bo->placement.num_placement; i++)
265                 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
266         r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
267         if (unlikely(r != 0))
268                 dev_err(qdev->dev, "%p validate failed for unpin\n", bo);
269         return r;
270 }
271
272 void qxl_bo_force_delete(struct qxl_device *qdev)
273 {
274         struct qxl_bo *bo, *n;
275
276         if (list_empty(&qdev->gem.objects))
277                 return;
278         dev_err(qdev->dev, "Userspace still has active objects !\n");
279         list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
280                 mutex_lock(&qdev->ddev->struct_mutex);
281                 dev_err(qdev->dev, "%p %p %lu %lu force free\n",
282                         &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
283                         *((unsigned long *)&bo->gem_base.refcount));
284                 mutex_lock(&qdev->gem.mutex);
285                 list_del_init(&bo->list);
286                 mutex_unlock(&qdev->gem.mutex);
287                 /* this should unref the ttm bo */
288                 drm_gem_object_unreference(&bo->gem_base);
289                 mutex_unlock(&qdev->ddev->struct_mutex);
290         }
291 }
292
293 int qxl_bo_init(struct qxl_device *qdev)
294 {
295         return qxl_ttm_init(qdev);
296 }
297
298 void qxl_bo_fini(struct qxl_device *qdev)
299 {
300         qxl_ttm_fini(qdev);
301 }
302
303 int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
304 {
305         int ret;
306         if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
307                 /* allocate a surface id for this surface now */
308                 ret = qxl_surface_id_alloc(qdev, bo);
309                 if (ret)
310                         return ret;
311
312                 ret = qxl_hw_surface_alloc(qdev, bo, NULL);
313                 if (ret)
314                         return ret;
315         }
316         return 0;
317 }
318
319 int qxl_surf_evict(struct qxl_device *qdev)
320 {
321         return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
322 }
323
324 int qxl_vram_evict(struct qxl_device *qdev)
325 {
326         return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM);
327 }