From 57abd0b4e27d9bc6a84b18ce28f8aa9200304ec3 Mon Sep 17 00:00:00 2001 From: Maneet Singh Date: Thu, 11 Sep 2014 20:12:33 -0700 Subject: [PATCH] video: tegra: nvmap: fix use-after-free race condition Incremented nvmap_handle ref count in utility function nvmap_get_id_from_dmabuf_fd() before the function release reference to dma buffer. This is required to avoid race conditions in nvmap code where nvmap_handle returned by this function could be freed concurrently while the caller is still using it. As a side effect of above change, every caller of this utility function must decrement nvmap_handle ref count after using the returned nvmap_handle. Bug 1553082 Change-Id: Iffc2e5819f8b493d5ed95a9d0c422ccd52438965 Signed-off-by: Maneet Singh Reviewed-on: http://git-master/r/498135 (cherry picked from commit afddea745cc4f4a824be501ecbbb50f55e7e6f04) Reviewed-on: http://git-master/r/556843 Reviewed-by: Mitch Luban Tested-by: Mitch Luban --- drivers/video/tegra/nvmap/nvmap_dmabuf.c | 8 +++ drivers/video/tegra/nvmap/nvmap_handle.c | 7 ++- drivers/video/tegra/nvmap/nvmap_ioctl.c | 69 +++++++++++++----------- 3 files changed, 52 insertions(+), 32 deletions(-) diff --git a/drivers/video/tegra/nvmap/nvmap_dmabuf.c b/drivers/video/tegra/nvmap/nvmap_dmabuf.c index 2a165946704..06e8ba14e34 100644 --- a/drivers/video/tegra/nvmap/nvmap_dmabuf.c +++ b/drivers/video/tegra/nvmap/nvmap_dmabuf.c @@ -680,6 +680,11 @@ struct dma_buf *__nvmap_dmabuf_export_from_ref(struct nvmap_handle_ref *ref) /* * Returns the nvmap handle ID associated with the passed dma_buf's fd. This * does not affect the ref count of the dma_buf. + * NOTE: Callers of this utility function must invoke nvmap_handle_put after + * using the returned nvmap_handle. Call to nvmap_handle_get is required in + * this utility function to avoid race conditions in code where nvmap_handle + * returned by this function is freed concurrently while the caller is still + * using it. */ struct nvmap_handle *nvmap_get_id_from_dmabuf_fd(struct nvmap_client *client, int fd) @@ -694,6 +699,8 @@ struct nvmap_handle *nvmap_get_id_from_dmabuf_fd(struct nvmap_client *client, if (dmabuf->ops == &nvmap_dma_buf_ops) { info = dmabuf->priv; handle = info->handle; + if (!nvmap_handle_get(handle)) + handle = ERR_PTR(-EINVAL); } dma_buf_put(dmabuf); return handle; @@ -715,6 +722,7 @@ int nvmap_ioctl_share_dmabuf(struct file *filp, void __user *arg) return -EINVAL; op.fd = nvmap_get_dmabuf_fd(client, handle); + nvmap_handle_put(handle); if (op.fd < 0) return op.fd; diff --git a/drivers/video/tegra/nvmap/nvmap_handle.c b/drivers/video/tegra/nvmap/nvmap_handle.c index da26f985403..ae7e0b387dd 100644 --- a/drivers/video/tegra/nvmap/nvmap_handle.c +++ b/drivers/video/tegra/nvmap/nvmap_handle.c @@ -464,7 +464,11 @@ EXPORT_SYMBOL(nvmap_free_handle); void nvmap_free_handle_user_id(struct nvmap_client *client, unsigned long user_id) { - nvmap_free_handle(client, unmarshal_user_handle(user_id)); + struct nvmap_handle *handle = unmarshal_user_handle(user_id); + if (handle) { + nvmap_free_handle(client, handle); + nvmap_handle_put(handle); + } } static void add_handle_ref(struct nvmap_client *client, @@ -638,6 +642,7 @@ struct nvmap_handle_ref *nvmap_create_handle_from_fd( if (IS_ERR(handle)) return ERR_CAST(handle); ref = nvmap_duplicate_handle(client, handle, 1); + nvmap_handle_put(handle); return ref; } diff --git a/drivers/video/tegra/nvmap/nvmap_ioctl.c b/drivers/video/tegra/nvmap/nvmap_ioctl.c index 2eafb680e8f..4e70335cadd 100644 --- a/drivers/video/tegra/nvmap/nvmap_ioctl.c +++ b/drivers/video/tegra/nvmap/nvmap_ioctl.c @@ -46,6 +46,9 @@ static ssize_t rw_handle(struct nvmap_client *client, struct nvmap_handle *h, unsigned long sys_stride, unsigned long elem_size, unsigned long count); +/* NOTE: Callers of this utility function must invoke nvmap_handle_put after + * using the returned nvmap_handle. + */ struct nvmap_handle *unmarshal_user_handle(__u32 handle) { struct nvmap_handle *h; @@ -103,8 +106,8 @@ int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg, struct nvmap_handle *on_stack[16]; struct nvmap_handle **refs; unsigned long __user *output; - unsigned int i; int err = 0; + u32 i, n_unmarshal_handles = 0; #ifdef CONFIG_COMPAT if (is32) { @@ -147,6 +150,7 @@ int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg, err = -EINVAL; goto out; } + n_unmarshal_handles++; } } else { refs = on_stack; @@ -157,6 +161,7 @@ int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg, err = -EINVAL; goto out; } + n_unmarshal_handles++; } trace_nvmap_ioctl_pinop(filp->private_data, is_pin, op.count, refs); @@ -221,6 +226,9 @@ int nvmap_ioctl_pinop(struct file *filp, bool is_pin, void __user *arg, nvmap_unpin_ids(filp->private_data, op.count, refs); out: + for (i = 0; i < n_unmarshal_handles; i++) + nvmap_handle_put(refs[i]); + if (refs != on_stack) kfree(refs); @@ -240,11 +248,6 @@ int nvmap_ioctl_getid(struct file *filp, void __user *arg) if (!h) return -EINVAL; - h = nvmap_handle_get(h); - - if (!h) - return -EPERM; - op.id = marshal_id(h); if (client == h->owner) h->global = true; @@ -289,6 +292,7 @@ int nvmap_ioctl_getfd(struct file *filp, void __user *arg) return -EINVAL; op.fd = nvmap_get_dmabuf_fd(client, handle); + nvmap_handle_put(handle); if (op.fd < 0) return op.fd; @@ -304,15 +308,16 @@ int nvmap_ioctl_alloc(struct file *filp, void __user *arg) struct nvmap_alloc_handle op; struct nvmap_client *client = filp->private_data; struct nvmap_handle *handle; + int err; if (copy_from_user(&op, arg, sizeof(op))) return -EFAULT; - handle = unmarshal_user_handle(op.handle); - if (!handle) + if (op.align & (op.align - 1)) return -EINVAL; - if (op.align & (op.align - 1)) + handle = unmarshal_user_handle(op.handle); + if (!handle) return -EINVAL; /* user-space handles are aligned to page boundaries, to prevent @@ -322,9 +327,11 @@ int nvmap_ioctl_alloc(struct file *filp, void __user *arg) op.flags |= NVMAP_HANDLE_ZEROED_PAGES; #endif - return nvmap_alloc_handle(client, handle, op.heap_mask, op.align, + err = nvmap_alloc_handle(client, handle, op.heap_mask, op.align, 0, /* no kind */ op.flags & (~NVMAP_HANDLE_KIND_SPECIFIED)); + nvmap_handle_put(handle); + return err; } int nvmap_ioctl_alloc_kind(struct file *filp, void __user *arg) @@ -332,15 +339,16 @@ int nvmap_ioctl_alloc_kind(struct file *filp, void __user *arg) struct nvmap_alloc_kind_handle op; struct nvmap_client *client = filp->private_data; struct nvmap_handle *handle; + int err; if (copy_from_user(&op, arg, sizeof(op))) return -EFAULT; - handle = unmarshal_user_handle(op.handle); - if (!handle) + if (op.align & (op.align - 1)) return -EINVAL; - if (op.align & (op.align - 1)) + handle = unmarshal_user_handle(op.handle); + if (!handle) return -EINVAL; /* user-space handles are aligned to page boundaries, to prevent @@ -350,11 +358,13 @@ int nvmap_ioctl_alloc_kind(struct file *filp, void __user *arg) op.flags |= NVMAP_HANDLE_ZEROED_PAGES; #endif - return nvmap_alloc_handle(client, handle, + err = nvmap_alloc_handle(client, handle, op.heap_mask, op.align, op.kind, op.flags); + nvmap_handle_put(handle); + return err; } int nvmap_create_fd(struct nvmap_handle *h) @@ -447,15 +457,9 @@ int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg, bool is32) return -EFAULT; h = unmarshal_user_handle(op.handle); - if (!h) return -EINVAL; - h = nvmap_handle_get(h); - - if (!h) - return -EPERM; - if(!h->alloc) { nvmap_handle_put(h); return -EFAULT; @@ -548,10 +552,6 @@ int nvmap_ioctl_get_param(struct file *filp, void __user *arg, bool is32) if (!h) return -EINVAL; - h = nvmap_handle_get(h); - if (!h) - return -EINVAL; - nvmap_ref_lock(client); ref = __nvmap_validate_locked(client, h); if (IS_ERR_OR_NULL(ref)) { @@ -604,13 +604,12 @@ int nvmap_ioctl_rw_handle(struct file *filp, int is_read, void __user *arg, if (copy_from_user(&op, arg, sizeof(op))) return -EFAULT; - h = unmarshal_user_handle(op.handle); - if (!h || !op.addr || !op.count || !op.elem_size) + if (!op.addr || !op.count || !op.elem_size) return -EINVAL; - h = nvmap_handle_get(h); + h = unmarshal_user_handle(op.handle); if (!h) - return -EPERM; + return -EINVAL; nvmap_kmaps_inc(h); trace_nvmap_ioctl_rw_handle(client, h, is_read, op.offset, @@ -649,11 +648,14 @@ static int __nvmap_cache_maint(struct nvmap_client *client, unsigned long end; int err = 0; - handle = unmarshal_user_handle(op->handle); - if (!handle || !op->addr || op->op < NVMAP_CACHE_OP_WB || + if (!op->addr || op->op < NVMAP_CACHE_OP_WB || op->op > NVMAP_CACHE_OP_WB_INV) return -EINVAL; + handle = unmarshal_user_handle(op->handle); + if (!handle) + return -EINVAL; + down_read(¤t->mm->mmap_sem); vma = find_vma(current->active_mm, (unsigned long)op->addr); @@ -680,6 +682,7 @@ static int __nvmap_cache_maint(struct nvmap_client *client, false); out: up_read(¤t->mm->mmap_sem); + nvmap_handle_put(handle); return err; } @@ -1147,7 +1150,8 @@ int nvmap_ioctl_cache_maint_list(struct file *filp, void __user *arg, u32 *offset_ptr; u32 *size_ptr; struct nvmap_handle **refs; - int i, err = 0; + int err = 0; + u32 i, n_unmarshal_handles = 0; if (copy_from_user(&op, arg, sizeof(op))) return -EFAULT; @@ -1189,6 +1193,7 @@ int nvmap_ioctl_cache_maint_list(struct file *filp, void __user *arg, err = -EINVAL; goto free_mem; } + n_unmarshal_handles++; } if (is_reserve_ioctl) @@ -1199,6 +1204,8 @@ int nvmap_ioctl_cache_maint_list(struct file *filp, void __user *arg, op.op, op.nr); free_mem: + for (i = 0; i < n_unmarshal_handles; i++) + nvmap_handle_put(refs[i]); kfree(refs); return err; } -- 2.39.2