]> rtime.felk.cvut.cz Git - linux-imx.git/commitdiff
radeon: Unmap vram pages when reclocking
authorMatthew Garrett <mjg@redhat.com>
Mon, 26 Apr 2010 19:52:20 +0000 (15:52 -0400)
committerDave Airlie <airlied@redhat.com>
Tue, 18 May 2010 08:21:17 +0000 (18:21 +1000)
Touching vram while the card is reclocking can lead to lockups. Unmap
any pages that could be touched by the CPU and block any accesses to
vram until the reclocking is complete.

Signed-off-by: Matthew Garrett <mjg@redhat.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_ttm.c

index cdcf5eaf671455d4f41aeaf50759657e2388005d..bed84b316bba4fdfb79243a946be5dec767dbe56 100644 (file)
@@ -1024,6 +1024,7 @@ struct radeon_device {
        struct work_struct hotplug_work;
        int num_crtc; /* number of crtcs */
        struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
+       struct mutex vram_mutex;
 
        /* audio stuff */
        struct timer_list       audio_timer;
index 53a2c27dd8fa3556985c1844b07e3d8b41d1b5d3..0372ec96020f02e9c6196e1a1e417b3faf6415e6 100644 (file)
@@ -599,6 +599,7 @@ int radeon_device_init(struct radeon_device *rdev,
                spin_lock_init(&rdev->ih.lock);
        mutex_init(&rdev->gem.mutex);
        mutex_init(&rdev->pm.mutex);
+       mutex_init(&rdev->vram_mutex);
        rwlock_init(&rdev->fence_drv.lock);
        INIT_LIST_HEAD(&rdev->gem.objects);
        init_waitqueue_head(&rdev->irq.vblank_queue);
index 6a8617bac1429400105b626d796f86029f7c132d..06def708b014100c6d935ce82b1a4ea92e94f2d0 100644 (file)
@@ -112,9 +112,11 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
 
        radeon_ttm_placement_from_domain(bo, domain);
        /* Kernel allocation are uninterruptible */
+       mutex_lock(&rdev->vram_mutex);
        r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
                        &bo->placement, 0, 0, !kernel, NULL, size,
                        &radeon_ttm_bo_destroy);
+       mutex_unlock(&rdev->vram_mutex);
        if (unlikely(r != 0)) {
                if (r != -ERESTARTSYS)
                        dev_err(rdev->dev,
@@ -170,7 +172,9 @@ void radeon_bo_unref(struct radeon_bo **bo)
        if ((*bo) == NULL)
                return;
        tbo = &((*bo)->tbo);
+       mutex_lock(&(*bo)->rdev->vram_mutex);
        ttm_bo_unref(&tbo);
+       mutex_unlock(&(*bo)->rdev->vram_mutex);
        if (tbo == NULL)
                *bo = NULL;
 }
index a61de1f9ff64ec347ae6394d127859e7462b2a20..da35bd7f38dc2f28b3f2b7df031e601091d99d22 100644 (file)
 static void radeon_pm_idle_work_handler(struct work_struct *work);
 static int radeon_debugfs_pm_init(struct radeon_device *rdev);
 
+static void radeon_unmap_vram_bos(struct radeon_device *rdev)
+{
+       struct radeon_bo *bo, *n;
+
+       if (list_empty(&rdev->gem.objects))
+               return;
+
+       list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
+               if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
+                       ttm_bo_unmap_virtual(&bo->tbo);
+       }
+
+       if (rdev->gart.table.vram.robj)
+               ttm_bo_unmap_virtual(&rdev->gart.table.vram.robj->tbo);
+
+       if (rdev->stollen_vga_memory)
+               ttm_bo_unmap_virtual(&rdev->stollen_vga_memory->tbo);
+
+       if (rdev->r600_blit.shader_obj)
+               ttm_bo_unmap_virtual(&rdev->r600_blit.shader_obj->tbo);
+}
+
 static void radeon_pm_set_clocks(struct radeon_device *rdev, int static_switch)
 {
        int i;
@@ -48,6 +70,10 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev, int static_switch)
        rdev->irq.gui_idle = false;
        radeon_irq_set(rdev);
 
+       mutex_lock(&rdev->vram_mutex);
+
+       radeon_unmap_vram_bos(rdev);
+
        if (!static_switch) {
                for (i = 0; i < rdev->num_crtc; i++) {
                        if (rdev->pm.active_crtcs & (1 << i)) {
@@ -67,6 +93,8 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev, int static_switch)
                        }
                }
        }
+
+       mutex_unlock(&rdev->vram_mutex);
        
        /* update display watermarks based on new power state */
        radeon_update_bandwidth_info(rdev);
index af98f45954b31884e748a1d922dee33964949424..3aa3a65800abd1354aefafcd9e40287de651cc84 100644 (file)
@@ -607,13 +607,17 @@ static const struct vm_operations_struct *ttm_vm_ops = NULL;
 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct ttm_buffer_object *bo;
+       struct radeon_device *rdev;
        int r;
 
-       bo = (struct ttm_buffer_object *)vma->vm_private_data;
+       bo = (struct ttm_buffer_object *)vma->vm_private_data;  
        if (bo == NULL) {
                return VM_FAULT_NOPAGE;
        }
+       rdev = radeon_get_rdev(bo->bdev);
+       mutex_lock(&rdev->vram_mutex);
        r = ttm_vm_ops->fault(vma, vmf);
+       mutex_unlock(&rdev->vram_mutex);
        return r;
 }