]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
video: tegra: nvmap: fix potential deadlock
authorKirill Artamonov <kartamonov@nvidia.com>
Fri, 18 Feb 2011 12:58:41 +0000 (14:58 +0200)
committerDan Willemsen <dwillemsen@nvidia.com>
Sat, 14 Sep 2013 08:18:14 +0000 (01:18 -0700)
Enabled mutex debugging reavealed potential deadlocks
introduced with compaction.

Handle spin lock replaced with mutex. Heap functions cannot be
protected with spinlock because they call kernel slab allocation
functions which cannot be called from atomic context.

nvmap_client ref_lock is also replaced with mutex. Otherwise we
cannot access heap parameters protected by mutex nvmap_handle lock.

Extra locking for handle->owner removed.

bug 793364

Original-Change-Id: I635ce9ebf259dd7bf8802457567f93b7be5795ea
Reviewed-on: http://git-master/r/19850
Reviewed-by: Kirill Artamonov <kartamonov@nvidia.com>
Tested-by: Kirill Artamonov <kartamonov@nvidia.com>
Reviewed-by: Daniel Willemsen <dwillemsen@nvidia.com>
Rebase-Id: Reaa132703e278d75371d5e2b25426794aa8e0e4e

drivers/video/tegra/nvmap/nvmap.c
drivers/video/tegra/nvmap/nvmap.h
drivers/video/tegra/nvmap/nvmap_dev.c
drivers/video/tegra/nvmap/nvmap_handle.c
drivers/video/tegra/nvmap/nvmap_heap.c
drivers/video/tegra/nvmap/nvmap_ioctl.c

index f12eb0c02bf961478a6e052d57f3589c6fac2f1a..ecc3b49bf60e4119403e54e2627aba63ae217793 100644 (file)
@@ -578,9 +578,9 @@ unsigned long nvmap_handle_address(struct nvmap_client *c, unsigned long id)
        h = nvmap_get_handle_id(c, id);
        if (!h)
                return -EPERM;
-       spin_lock(&h->lock);
+       mutex_lock(&h->lock);
        phys = handle_phys(h);
-       spin_unlock(&h->lock);
+       mutex_unlock(&h->lock);
        nvmap_handle_put(h);
 
        return phys;
index 0d1e1eec48b5564cbd9eebc877c980eb34e60aa7..923ff8fc8d8a8ee15b8b7f09bc99583f0871356b 100644 (file)
@@ -81,7 +81,7 @@ struct nvmap_handle {
        bool secure;            /* zap IOVMM area on unpin */
        bool heap_pgalloc;      /* handle is page allocated (sysmem / iovmm) */
        bool alloc;             /* handle has memory allocated */
-       spinlock_t lock;
+       struct mutex lock;
 };
 
 struct nvmap_share {
@@ -107,7 +107,7 @@ struct nvmap_client {
        struct rb_root                  handle_refs;
        atomic_t                        iovm_commit;
        size_t                          iovm_limit;
-       spinlock_t                      ref_lock;
+       struct mutex                    ref_lock;
        bool                            super;
        atomic_t                        count;
        struct task_struct              *task;
@@ -133,12 +133,12 @@ struct nvmap_vma_priv {
 
 static inline void nvmap_ref_lock(struct nvmap_client *priv)
 {
-       spin_lock(&priv->ref_lock);
+       mutex_lock(&priv->ref_lock);
 }
 
 static inline void nvmap_ref_unlock(struct nvmap_client *priv)
 {
-       spin_unlock(&priv->ref_lock);
+       mutex_unlock(&priv->ref_lock);
 }
 
 struct device *nvmap_client_to_device(struct nvmap_client *client);
index 301366925a1d49e363e24cf4f6918b7e65e67a26..26ac16c375f4f7e548c203bc24ff86fb56c72daa 100644 (file)
@@ -627,7 +627,7 @@ struct nvmap_client *nvmap_create_client(struct nvmap_device *dev,
        task_unlock(current->group_leader);
        client->task = task;
 
-       spin_lock_init(&client->ref_lock);
+       mutex_init(&client->ref_lock);
        atomic_set(&client->count, 1);
 
        return client;
@@ -652,10 +652,8 @@ static void destroy_client(struct nvmap_client *client)
                smp_rmb();
                pins = atomic_read(&ref->pin);
 
-               spin_lock(&ref->handle->lock);
                if (ref->handle->owner == client)
                    ref->handle->owner = NULL;
-               spin_unlock(&ref->handle->lock);
 
                while (pins--)
                        nvmap_unpin_handles(client, &ref->handle, 1);
index c8a2c1fc9c79e645167909ee4912a98aa836ad20..dc3be30ca2f5426ad44969ae4594e034280e839d 100644 (file)
@@ -371,11 +371,11 @@ void nvmap_free_handle_id(struct nvmap_client *client, unsigned long id)
                atomic_sub(h->size, &client->iovm_commit);
 
        if (h->alloc && !h->heap_pgalloc) {
-               spin_lock(&h->lock);
+               mutex_lock(&h->lock);
                nvmap_carveout_commit_subtract(client,
                        nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
                        h->size);
-               spin_unlock(&h->lock);
+               mutex_unlock(&h->lock);
        }
 
        nvmap_ref_unlock(client);
@@ -387,10 +387,8 @@ void nvmap_free_handle_id(struct nvmap_client *client, unsigned long id)
        while (pins--)
                nvmap_unpin_handles(client, &ref->handle, 1);
 
-       spin_lock(&h->lock);
        if (h->owner == client)
                h->owner = NULL;
-       spin_unlock(&h->lock);
 
        kfree(ref);
 
@@ -446,7 +444,7 @@ struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
        BUG_ON(!h->owner);
        h->size = h->orig_size = size;
        h->flags = NVMAP_HANDLE_WRITE_COMBINE;
-       spin_lock_init(&h->lock);
+       mutex_init(&h->lock);
 
        nvmap_handle_add(client->dev, h);
 
@@ -516,11 +514,11 @@ struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client,
        }
 
        if (!h->heap_pgalloc) {
-               spin_lock(&h->lock);
+               mutex_lock(&h->lock);
                nvmap_carveout_commit_add(client,
                        nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
                        h->size);
-               spin_unlock(&h->lock);
+               mutex_unlock(&h->lock);
        }
 
        atomic_set(&ref->dupes, 1);
index b7a9050938d3a44dfb93cb49fd829e968d693ad7..1342d309270101b9aa8512d7ab47c089b22fb97b 100644 (file)
@@ -697,24 +697,22 @@ static struct nvmap_heap_block *do_heap_relocate_listblock(
        size_t src_align = block->align;
        unsigned int src_prot = block->mem_prot;
        int error = 0;
+       struct nvmap_share *share;
 
        if (!handle) {
                pr_err("INVALID HANDLE!\n");
                return NULL;
        }
 
-       spin_lock(&handle->lock);
+       mutex_lock(&handle->lock);
 
-       if (!handle->owner) {
-               spin_unlock(&handle->lock);
-               return NULL;
-       }
+       share = nvmap_get_share_from_dev(handle->dev);
 
        /* TODO: It is possible to use only handle lock and no share
         * pin_lock, but then we'll need to lock every handle during
         * each pinning operation. Need to estimate performance impact
         * if we decide to simplify locking this way. */
-       mutex_lock(&handle->owner->share->pin_lock);
+       mutex_lock(&share->pin_lock);
 
        /* abort if block is pinned */
        if (atomic_read(&handle->pin))
@@ -756,8 +754,8 @@ static struct nvmap_heap_block *do_heap_relocate_listblock(
        BUG_ON(error);
 
 fail:
-       mutex_unlock(&handle->owner->share->pin_lock);
-       spin_unlock(&handle->lock);
+       mutex_unlock(&share->pin_lock);
+       mutex_unlock(&handle->lock);
        return heap_block_new;
 }
 
@@ -830,9 +828,9 @@ static void nvmap_heap_compact(struct nvmap_heap *heap,
 void nvmap_usecount_inc(struct nvmap_handle *h)
 {
        if (h->alloc && !h->heap_pgalloc) {
-               spin_lock(&h->lock);
+               mutex_lock(&h->lock);
                h->usecount++;
-               spin_unlock(&h->lock);
+               mutex_unlock(&h->lock);
        } else {
                h->usecount++;
        }
index 52cb5439f9ce403e1e77d77f57efd07d456d51e3..91d0a148202bc2800891040daa5044f06076e955 100644 (file)
@@ -311,7 +311,7 @@ int nvmap_ioctl_get_param(struct file *filp, void __user* arg)
                op.result = h->orig_size;
                break;
        case NVMAP_HANDLE_PARAM_ALIGNMENT:
-               spin_lock(&h->lock);
+               mutex_lock(&h->lock);
                if (!h->alloc)
                        op.result = 0;
                else if (h->heap_pgalloc)
@@ -320,15 +320,15 @@ int nvmap_ioctl_get_param(struct file *filp, void __user* arg)
                        op.result = (h->carveout->base & -h->carveout->base);
                else
                        op.result = SZ_4M;
-               spin_unlock(&h->lock);
+               mutex_unlock(&h->lock);
                break;
        case NVMAP_HANDLE_PARAM_BASE:
                if (WARN_ON(!h->alloc || !atomic_add_return(0, &h->pin)))
                        op.result = -1ul;
                else if (!h->heap_pgalloc) {
-                       spin_lock(&h->lock);
+                       mutex_lock(&h->lock);
                        op.result = h->carveout->base;
-                       spin_unlock(&h->lock);
+                       mutex_unlock(&h->lock);
                }
                else if (h->pgalloc.contig)
                        op.result = page_to_phys(h->pgalloc.pages[0]);
@@ -341,9 +341,9 @@ int nvmap_ioctl_get_param(struct file *filp, void __user* arg)
                if (!h->alloc)
                        op.result = 0;
                else if (!h->heap_pgalloc) {
-                       spin_lock(&h->lock);
+                       mutex_lock(&h->lock);
                        op.result = nvmap_carveout_usage(client, h->carveout);
-                       spin_unlock(&h->lock);
+                       mutex_unlock(&h->lock);
                }
                else if (h->pgalloc.contig)
                        op.result = NVMAP_HEAP_SYSMEM;