unsigned long sys_stride, unsigned long elem_size,
unsigned long count);
+/* NOTE: Callers of this utility function must invoke nvmap_handle_put after
+ * using the returned nvmap_handle.
+ */
struct nvmap_handle *unmarshal_user_handle(__u32 handle)
{
struct nvmap_handle *h;
struct nvmap_handle *on_stack[16];
struct nvmap_handle **refs;
unsigned long __user *output;
- unsigned int i;
int err = 0;
+ u32 i, n_unmarshal_handles = 0;
#ifdef CONFIG_COMPAT
if (is32) {
err = -EINVAL;
goto out;
}
+ n_unmarshal_handles++;
}
} else {
refs = on_stack;
err = -EINVAL;
goto out;
}
+ n_unmarshal_handles++;
}
trace_nvmap_ioctl_pinop(filp->private_data, is_pin, op.count, refs);
nvmap_unpin_ids(filp->private_data, op.count, refs);
out:
+ for (i = 0; i < n_unmarshal_handles; i++)
+ nvmap_handle_put(refs[i]);
+
if (refs != on_stack)
kfree(refs);
if (!h)
return -EINVAL;
- h = nvmap_handle_get(h);
-
- if (!h)
- return -EPERM;
-
op.id = marshal_id(h);
if (client == h->owner)
h->global = true;
return -EINVAL;
op.fd = nvmap_get_dmabuf_fd(client, handle);
+ nvmap_handle_put(handle);
if (op.fd < 0)
return op.fd;
struct nvmap_alloc_handle op;
struct nvmap_client *client = filp->private_data;
struct nvmap_handle *handle;
+ int err;
if (copy_from_user(&op, arg, sizeof(op)))
return -EFAULT;
- handle = unmarshal_user_handle(op.handle);
- if (!handle)
+ if (op.align & (op.align - 1))
return -EINVAL;
- if (op.align & (op.align - 1))
+ handle = unmarshal_user_handle(op.handle);
+ if (!handle)
return -EINVAL;
/* user-space handles are aligned to page boundaries, to prevent
op.flags |= NVMAP_HANDLE_ZEROED_PAGES;
#endif
- return nvmap_alloc_handle(client, handle, op.heap_mask, op.align,
+ err = nvmap_alloc_handle(client, handle, op.heap_mask, op.align,
0, /* no kind */
op.flags & (~NVMAP_HANDLE_KIND_SPECIFIED));
+ nvmap_handle_put(handle);
+ return err;
}
int nvmap_ioctl_alloc_kind(struct file *filp, void __user *arg)
struct nvmap_alloc_kind_handle op;
struct nvmap_client *client = filp->private_data;
struct nvmap_handle *handle;
+ int err;
if (copy_from_user(&op, arg, sizeof(op)))
return -EFAULT;
- handle = unmarshal_user_handle(op.handle);
- if (!handle)
+ if (op.align & (op.align - 1))
return -EINVAL;
- if (op.align & (op.align - 1))
+ handle = unmarshal_user_handle(op.handle);
+ if (!handle)
return -EINVAL;
/* user-space handles are aligned to page boundaries, to prevent
op.flags |= NVMAP_HANDLE_ZEROED_PAGES;
#endif
- return nvmap_alloc_handle(client, handle,
+ err = nvmap_alloc_handle(client, handle,
op.heap_mask,
op.align,
op.kind,
op.flags);
+ nvmap_handle_put(handle);
+ return err;
}
int nvmap_create_fd(struct nvmap_handle *h)
return -EFAULT;
h = unmarshal_user_handle(op.handle);
-
if (!h)
return -EINVAL;
- h = nvmap_handle_get(h);
-
- if (!h)
- return -EPERM;
-
if(!h->alloc) {
nvmap_handle_put(h);
return -EFAULT;
if (!h)
return -EINVAL;
- h = nvmap_handle_get(h);
- if (!h)
- return -EINVAL;
-
nvmap_ref_lock(client);
ref = __nvmap_validate_locked(client, h);
if (IS_ERR_OR_NULL(ref)) {
if (copy_from_user(&op, arg, sizeof(op)))
return -EFAULT;
- h = unmarshal_user_handle(op.handle);
- if (!h || !op.addr || !op.count || !op.elem_size)
+ if (!op.addr || !op.count || !op.elem_size)
return -EINVAL;
- h = nvmap_handle_get(h);
+ h = unmarshal_user_handle(op.handle);
if (!h)
- return -EPERM;
+ return -EINVAL;
nvmap_kmaps_inc(h);
trace_nvmap_ioctl_rw_handle(client, h, is_read, op.offset,
unsigned long end;
int err = 0;
- handle = unmarshal_user_handle(op->handle);
- if (!handle || !op->addr || op->op < NVMAP_CACHE_OP_WB ||
+ if (!op->addr || op->op < NVMAP_CACHE_OP_WB ||
op->op > NVMAP_CACHE_OP_WB_INV)
return -EINVAL;
+ handle = unmarshal_user_handle(op->handle);
+ if (!handle)
+ return -EINVAL;
+
down_read(¤t->mm->mmap_sem);
vma = find_vma(current->active_mm, (unsigned long)op->addr);
false);
out:
up_read(¤t->mm->mmap_sem);
+ nvmap_handle_put(handle);
return err;
}
u32 *offset_ptr;
u32 *size_ptr;
struct nvmap_handle **refs;
- int i, err = 0;
+ int err = 0;
+ u32 i, n_unmarshal_handles = 0;
if (copy_from_user(&op, arg, sizeof(op)))
return -EFAULT;
err = -EINVAL;
goto free_mem;
}
+ n_unmarshal_handles++;
}
if (is_reserve_ioctl)
op.op, op.nr);
free_mem:
+ for (i = 0; i < n_unmarshal_handles; i++)
+ nvmap_handle_put(refs[i]);
kfree(refs);
return err;
}