]> rtime.felk.cvut.cz Git - linux-imx.git/blob - drivers/gpu/drm/radeon/radeon_object.c
drm/radeon/dpm: require rlc for dpm
[linux-imx.git] / drivers / gpu / drm / radeon / radeon_object.c
1 /*
2  * Copyright 2009 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Jerome Glisse <glisse@freedesktop.org>
29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30  *    Dave Airlie
31  */
32 #include <linux/list.h>
33 #include <linux/slab.h>
34 #include <drm/drmP.h>
35 #include <drm/radeon_drm.h>
36 #include "radeon.h"
37 #include "radeon_trace.h"
38
39
40 int radeon_ttm_init(struct radeon_device *rdev);
41 void radeon_ttm_fini(struct radeon_device *rdev);
42 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
43
44 /*
45  * To exclude mutual BO access we rely on bo_reserve exclusion, as all
46  * function are calling it.
47  */
48
49 void radeon_bo_clear_va(struct radeon_bo *bo)
50 {
51         struct radeon_bo_va *bo_va, *tmp;
52
53         list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
54                 /* remove from all vm address space */
55                 radeon_vm_bo_rmv(bo->rdev, bo_va);
56         }
57 }
58
59 static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
60 {
61         struct radeon_bo *bo;
62
63         bo = container_of(tbo, struct radeon_bo, tbo);
64         mutex_lock(&bo->rdev->gem.mutex);
65         list_del_init(&bo->list);
66         mutex_unlock(&bo->rdev->gem.mutex);
67         radeon_bo_clear_surface_reg(bo);
68         radeon_bo_clear_va(bo);
69         drm_gem_object_release(&bo->gem_base);
70         kfree(bo);
71 }
72
73 bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
74 {
75         if (bo->destroy == &radeon_ttm_bo_destroy)
76                 return true;
77         return false;
78 }
79
80 void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
81 {
82         u32 c = 0;
83
84         rbo->placement.fpfn = 0;
85         rbo->placement.lpfn = 0;
86         rbo->placement.placement = rbo->placements;
87         rbo->placement.busy_placement = rbo->placements;
88         if (domain & RADEON_GEM_DOMAIN_VRAM)
89                 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
90                                         TTM_PL_FLAG_VRAM;
91         if (domain & RADEON_GEM_DOMAIN_GTT) {
92                 if (rbo->rdev->flags & RADEON_IS_AGP) {
93                         rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
94                 } else {
95                         rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
96                 }
97         }
98         if (domain & RADEON_GEM_DOMAIN_CPU) {
99                 if (rbo->rdev->flags & RADEON_IS_AGP) {
100                         rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM;
101                 } else {
102                         rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
103                 }
104         }
105         if (!c)
106                 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
107         rbo->placement.num_placement = c;
108         rbo->placement.num_busy_placement = c;
109 }
110
111 int radeon_bo_create(struct radeon_device *rdev,
112                      unsigned long size, int byte_align, bool kernel, u32 domain,
113                      struct sg_table *sg, struct radeon_bo **bo_ptr)
114 {
115         struct radeon_bo *bo;
116         enum ttm_bo_type type;
117         unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
118         size_t acc_size;
119         int r;
120
121         size = ALIGN(size, PAGE_SIZE);
122
123         rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
124         if (kernel) {
125                 type = ttm_bo_type_kernel;
126         } else if (sg) {
127                 type = ttm_bo_type_sg;
128         } else {
129                 type = ttm_bo_type_device;
130         }
131         *bo_ptr = NULL;
132
133         acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
134                                        sizeof(struct radeon_bo));
135
136         bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
137         if (bo == NULL)
138                 return -ENOMEM;
139         r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
140         if (unlikely(r)) {
141                 kfree(bo);
142                 return r;
143         }
144         bo->rdev = rdev;
145         bo->gem_base.driver_private = NULL;
146         bo->surface_reg = -1;
147         INIT_LIST_HEAD(&bo->list);
148         INIT_LIST_HEAD(&bo->va);
149         radeon_ttm_placement_from_domain(bo, domain);
150         /* Kernel allocation are uninterruptible */
151         down_read(&rdev->pm.mclk_lock);
152         r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
153                         &bo->placement, page_align, !kernel, NULL,
154                         acc_size, sg, &radeon_ttm_bo_destroy);
155         up_read(&rdev->pm.mclk_lock);
156         if (unlikely(r != 0)) {
157                 return r;
158         }
159         *bo_ptr = bo;
160
161         trace_radeon_bo_create(bo);
162
163         return 0;
164 }
165
166 int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
167 {
168         bool is_iomem;
169         int r;
170
171         if (bo->kptr) {
172                 if (ptr) {
173                         *ptr = bo->kptr;
174                 }
175                 return 0;
176         }
177         r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
178         if (r) {
179                 return r;
180         }
181         bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
182         if (ptr) {
183                 *ptr = bo->kptr;
184         }
185         radeon_bo_check_tiling(bo, 0, 0);
186         return 0;
187 }
188
189 void radeon_bo_kunmap(struct radeon_bo *bo)
190 {
191         if (bo->kptr == NULL)
192                 return;
193         bo->kptr = NULL;
194         radeon_bo_check_tiling(bo, 0, 0);
195         ttm_bo_kunmap(&bo->kmap);
196 }
197
198 void radeon_bo_unref(struct radeon_bo **bo)
199 {
200         struct ttm_buffer_object *tbo;
201         struct radeon_device *rdev;
202
203         if ((*bo) == NULL)
204                 return;
205         rdev = (*bo)->rdev;
206         tbo = &((*bo)->tbo);
207         down_read(&rdev->pm.mclk_lock);
208         ttm_bo_unref(&tbo);
209         up_read(&rdev->pm.mclk_lock);
210         if (tbo == NULL)
211                 *bo = NULL;
212 }
213
214 int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
215                              u64 *gpu_addr)
216 {
217         int r, i;
218
219         if (bo->pin_count) {
220                 bo->pin_count++;
221                 if (gpu_addr)
222                         *gpu_addr = radeon_bo_gpu_offset(bo);
223
224                 if (max_offset != 0) {
225                         u64 domain_start;
226
227                         if (domain == RADEON_GEM_DOMAIN_VRAM)
228                                 domain_start = bo->rdev->mc.vram_start;
229                         else
230                                 domain_start = bo->rdev->mc.gtt_start;
231                         WARN_ON_ONCE(max_offset <
232                                      (radeon_bo_gpu_offset(bo) - domain_start));
233                 }
234
235                 return 0;
236         }
237         radeon_ttm_placement_from_domain(bo, domain);
238         if (domain == RADEON_GEM_DOMAIN_VRAM) {
239                 /* force to pin into visible video ram */
240                 bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
241         }
242         if (max_offset) {
243                 u64 lpfn = max_offset >> PAGE_SHIFT;
244
245                 if (!bo->placement.lpfn)
246                         bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT;
247
248                 if (lpfn < bo->placement.lpfn)
249                         bo->placement.lpfn = lpfn;
250         }
251         for (i = 0; i < bo->placement.num_placement; i++)
252                 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
253         r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
254         if (likely(r == 0)) {
255                 bo->pin_count = 1;
256                 if (gpu_addr != NULL)
257                         *gpu_addr = radeon_bo_gpu_offset(bo);
258         }
259         if (unlikely(r != 0))
260                 dev_err(bo->rdev->dev, "%p pin failed\n", bo);
261         return r;
262 }
263
264 int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
265 {
266         return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
267 }
268
269 int radeon_bo_unpin(struct radeon_bo *bo)
270 {
271         int r, i;
272
273         if (!bo->pin_count) {
274                 dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
275                 return 0;
276         }
277         bo->pin_count--;
278         if (bo->pin_count)
279                 return 0;
280         for (i = 0; i < bo->placement.num_placement; i++)
281                 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
282         r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
283         if (unlikely(r != 0))
284                 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
285         return r;
286 }
287
288 int radeon_bo_evict_vram(struct radeon_device *rdev)
289 {
290         /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
291         if (0 && (rdev->flags & RADEON_IS_IGP)) {
292                 if (rdev->mc.igp_sideport_enabled == false)
293                         /* Useless to evict on IGP chips */
294                         return 0;
295         }
296         return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
297 }
298
299 void radeon_bo_force_delete(struct radeon_device *rdev)
300 {
301         struct radeon_bo *bo, *n;
302
303         if (list_empty(&rdev->gem.objects)) {
304                 return;
305         }
306         dev_err(rdev->dev, "Userspace still has active objects !\n");
307         list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
308                 mutex_lock(&rdev->ddev->struct_mutex);
309                 dev_err(rdev->dev, "%p %p %lu %lu force free\n",
310                         &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
311                         *((unsigned long *)&bo->gem_base.refcount));
312                 mutex_lock(&bo->rdev->gem.mutex);
313                 list_del_init(&bo->list);
314                 mutex_unlock(&bo->rdev->gem.mutex);
315                 /* this should unref the ttm bo */
316                 drm_gem_object_unreference(&bo->gem_base);
317                 mutex_unlock(&rdev->ddev->struct_mutex);
318         }
319 }
320
321 int radeon_bo_init(struct radeon_device *rdev)
322 {
323         /* Add an MTRR for the VRAM */
324         if (!rdev->fastfb_working) {
325                 rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
326                                                       rdev->mc.aper_size);
327         }
328         DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
329                 rdev->mc.mc_vram_size >> 20,
330                 (unsigned long long)rdev->mc.aper_size >> 20);
331         DRM_INFO("RAM width %dbits %cDR\n",
332                         rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
333         return radeon_ttm_init(rdev);
334 }
335
336 void radeon_bo_fini(struct radeon_device *rdev)
337 {
338         radeon_ttm_fini(rdev);
339         arch_phys_wc_del(rdev->mc.vram_mtrr);
340 }
341
342 void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
343                                 struct list_head *head)
344 {
345         if (lobj->written) {
346                 list_add(&lobj->tv.head, head);
347         } else {
348                 list_add_tail(&lobj->tv.head, head);
349         }
350 }
351
352 int radeon_bo_list_validate(struct ww_acquire_ctx *ticket,
353                             struct list_head *head, int ring)
354 {
355         struct radeon_bo_list *lobj;
356         struct radeon_bo *bo;
357         u32 domain;
358         int r;
359
360         r = ttm_eu_reserve_buffers(ticket, head);
361         if (unlikely(r != 0)) {
362                 return r;
363         }
364         list_for_each_entry(lobj, head, tv.head) {
365                 bo = lobj->bo;
366                 if (!bo->pin_count) {
367                         domain = lobj->domain;
368                         
369                 retry:
370                         radeon_ttm_placement_from_domain(bo, domain);
371                         if (ring == R600_RING_TYPE_UVD_INDEX)
372                                 radeon_uvd_force_into_uvd_segment(bo);
373                         r = ttm_bo_validate(&bo->tbo, &bo->placement,
374                                                 true, false);
375                         if (unlikely(r)) {
376                                 if (r != -ERESTARTSYS && domain != lobj->alt_domain) {
377                                         domain = lobj->alt_domain;
378                                         goto retry;
379                                 }
380                                 ttm_eu_backoff_reservation(ticket, head);
381                                 return r;
382                         }
383                 }
384                 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
385                 lobj->tiling_flags = bo->tiling_flags;
386         }
387         return 0;
388 }
389
390 int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
391                              struct vm_area_struct *vma)
392 {
393         return ttm_fbdev_mmap(vma, &bo->tbo);
394 }
395
396 int radeon_bo_get_surface_reg(struct radeon_bo *bo)
397 {
398         struct radeon_device *rdev = bo->rdev;
399         struct radeon_surface_reg *reg;
400         struct radeon_bo *old_object;
401         int steal;
402         int i;
403
404         lockdep_assert_held(&bo->tbo.resv->lock.base);
405
406         if (!bo->tiling_flags)
407                 return 0;
408
409         if (bo->surface_reg >= 0) {
410                 reg = &rdev->surface_regs[bo->surface_reg];
411                 i = bo->surface_reg;
412                 goto out;
413         }
414
415         steal = -1;
416         for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
417
418                 reg = &rdev->surface_regs[i];
419                 if (!reg->bo)
420                         break;
421
422                 old_object = reg->bo;
423                 if (old_object->pin_count == 0)
424                         steal = i;
425         }
426
427         /* if we are all out */
428         if (i == RADEON_GEM_MAX_SURFACES) {
429                 if (steal == -1)
430                         return -ENOMEM;
431                 /* find someone with a surface reg and nuke their BO */
432                 reg = &rdev->surface_regs[steal];
433                 old_object = reg->bo;
434                 /* blow away the mapping */
435                 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
436                 ttm_bo_unmap_virtual(&old_object->tbo);
437                 old_object->surface_reg = -1;
438                 i = steal;
439         }
440
441         bo->surface_reg = i;
442         reg->bo = bo;
443
444 out:
445         radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
446                                bo->tbo.mem.start << PAGE_SHIFT,
447                                bo->tbo.num_pages << PAGE_SHIFT);
448         return 0;
449 }
450
451 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
452 {
453         struct radeon_device *rdev = bo->rdev;
454         struct radeon_surface_reg *reg;
455
456         if (bo->surface_reg == -1)
457                 return;
458
459         reg = &rdev->surface_regs[bo->surface_reg];
460         radeon_clear_surface_reg(rdev, bo->surface_reg);
461
462         reg->bo = NULL;
463         bo->surface_reg = -1;
464 }
465
466 int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
467                                 uint32_t tiling_flags, uint32_t pitch)
468 {
469         struct radeon_device *rdev = bo->rdev;
470         int r;
471
472         if (rdev->family >= CHIP_CEDAR) {
473                 unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;
474
475                 bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
476                 bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
477                 mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
478                 tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
479                 stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
480                 switch (bankw) {
481                 case 0:
482                 case 1:
483                 case 2:
484                 case 4:
485                 case 8:
486                         break;
487                 default:
488                         return -EINVAL;
489                 }
490                 switch (bankh) {
491                 case 0:
492                 case 1:
493                 case 2:
494                 case 4:
495                 case 8:
496                         break;
497                 default:
498                         return -EINVAL;
499                 }
500                 switch (mtaspect) {
501                 case 0:
502                 case 1:
503                 case 2:
504                 case 4:
505                 case 8:
506                         break;
507                 default:
508                         return -EINVAL;
509                 }
510                 if (tilesplit > 6) {
511                         return -EINVAL;
512                 }
513                 if (stilesplit > 6) {
514                         return -EINVAL;
515                 }
516         }
517         r = radeon_bo_reserve(bo, false);
518         if (unlikely(r != 0))
519                 return r;
520         bo->tiling_flags = tiling_flags;
521         bo->pitch = pitch;
522         radeon_bo_unreserve(bo);
523         return 0;
524 }
525
526 void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
527                                 uint32_t *tiling_flags,
528                                 uint32_t *pitch)
529 {
530         lockdep_assert_held(&bo->tbo.resv->lock.base);
531
532         if (tiling_flags)
533                 *tiling_flags = bo->tiling_flags;
534         if (pitch)
535                 *pitch = bo->pitch;
536 }
537
538 int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
539                                 bool force_drop)
540 {
541         if (!force_drop)
542                 lockdep_assert_held(&bo->tbo.resv->lock.base);
543
544         if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
545                 return 0;
546
547         if (force_drop) {
548                 radeon_bo_clear_surface_reg(bo);
549                 return 0;
550         }
551
552         if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
553                 if (!has_moved)
554                         return 0;
555
556                 if (bo->surface_reg >= 0)
557                         radeon_bo_clear_surface_reg(bo);
558                 return 0;
559         }
560
561         if ((bo->surface_reg >= 0) && !has_moved)
562                 return 0;
563
564         return radeon_bo_get_surface_reg(bo);
565 }
566
567 void radeon_bo_move_notify(struct ttm_buffer_object *bo,
568                            struct ttm_mem_reg *mem)
569 {
570         struct radeon_bo *rbo;
571         if (!radeon_ttm_bo_is_radeon_bo(bo))
572                 return;
573         rbo = container_of(bo, struct radeon_bo, tbo);
574         radeon_bo_check_tiling(rbo, 0, 1);
575         radeon_vm_bo_invalidate(rbo->rdev, rbo);
576 }
577
578 int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
579 {
580         struct radeon_device *rdev;
581         struct radeon_bo *rbo;
582         unsigned long offset, size;
583         int r;
584
585         if (!radeon_ttm_bo_is_radeon_bo(bo))
586                 return 0;
587         rbo = container_of(bo, struct radeon_bo, tbo);
588         radeon_bo_check_tiling(rbo, 0, 0);
589         rdev = rbo->rdev;
590         if (bo->mem.mem_type == TTM_PL_VRAM) {
591                 size = bo->mem.num_pages << PAGE_SHIFT;
592                 offset = bo->mem.start << PAGE_SHIFT;
593                 if ((offset + size) > rdev->mc.visible_vram_size) {
594                         /* hurrah the memory is not visible ! */
595                         radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
596                         rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
597                         r = ttm_bo_validate(bo, &rbo->placement, false, false);
598                         if (unlikely(r != 0))
599                                 return r;
600                         offset = bo->mem.start << PAGE_SHIFT;
601                         /* this should not happen */
602                         if ((offset + size) > rdev->mc.visible_vram_size)
603                                 return -EINVAL;
604                 }
605         }
606         return 0;
607 }
608
609 int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
610 {
611         int r;
612
613         r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
614         if (unlikely(r != 0))
615                 return r;
616         spin_lock(&bo->tbo.bdev->fence_lock);
617         if (mem_type)
618                 *mem_type = bo->tbo.mem.mem_type;
619         if (bo->tbo.sync_obj)
620                 r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
621         spin_unlock(&bo->tbo.bdev->fence_lock);
622         ttm_bo_unreserve(&bo->tbo);
623         return r;
624 }