err = nvmap_ioctl_cache_maint(filp, uarg, false);
break;
+ case NVMAP_IOC_CACHE_LIST:
+ err = nvmap_ioctl_cache_maint_list(filp, uarg);
+ break;
+
case NVMAP_IOC_SHARE:
err = nvmap_ioctl_share_dmabuf(filp, uarg);
break;
nvmap_free_pte(nvmap_dev, pte);
return ret ?: copied;
}
+
+int nvmap_ioctl_cache_maint_list(struct file *filp, void __user *arg)
+{
+ struct nvmap_cache_op_list op;
+ u32 *handle_ptr;
+ u32 *offset_ptr;
+ u32 *size_ptr;
+ struct nvmap_handle **refs;
+ int i, err = 0;
+
+ if (copy_from_user(&op, arg, sizeof(op)))
+ return -EFAULT;
+
+ if (!op.nr)
+ return -EINVAL;
+
+ if (!access_ok(VERIFY_READ, op.handles, op.nr * sizeof(u32)))
+ return -EFAULT;
+
+ if (!access_ok(VERIFY_READ, op.offsets, op.nr * sizeof(u32)))
+ return -EFAULT;
+
+ if (!access_ok(VERIFY_READ, op.sizes, op.nr * sizeof(u32)))
+ return -EFAULT;
+
+ if (!op.offsets || !op.sizes)
+ return -EINVAL;
+
+ refs = kcalloc(op.nr, sizeof(*refs), GFP_KERNEL);
+
+ if (!refs)
+ return -ENOMEM;
+
+ handle_ptr = (u32 *)(uintptr_t)op.handles;
+ offset_ptr = (u32 *)(uintptr_t)op.offsets;
+ size_ptr = (u32 *)(uintptr_t)op.sizes;
+
+ for (i = 0; i < op.nr; i++) {
+ u32 handle;
+
+ if (copy_from_user(&handle, &handle_ptr[i], sizeof(handle))) {
+ err = -EFAULT;
+ goto free_mem;
+ }
+
+ refs[i] = unmarshal_user_handle(handle);
+ if (!refs[i]) {
+ err = -EINVAL;
+ goto free_mem;
+ }
+ }
+
+ err = nvmap_do_cache_maint_list(refs, offset_ptr, size_ptr, op.op, op.nr);
+
+free_mem:
+ kfree(refs);
+ return err;
+}
int nvmap_do_cache_maint_list(struct nvmap_handle **handles, u32 *offsets,
u32 *sizes, int op, int nr)
{
- int i, err = 0;
+ int i;
u64 total = 0;
- for (i = 0; i < nr; i++) {
- offsets[i] = sizes[i] ? offsets[i] : 0;
- sizes[i] = sizes[i] ? sizes[i] : handles[i]->size;
- total += sizes[i];
- }
+ for (i = 0; i < nr; i++)
+ total += sizes[i] ? sizes[i] : handles[i]->size;
/* Full flush in the case the passed list is bigger than our
* threshold. */
nvmap_stats_read(NS_CFLUSH_DONE));
} else {
for (i = 0; i < nr; i++) {
- err = __nvmap_do_cache_maint(handles[i]->owner,
- handles[i], offsets[i],
- offsets[i] + sizes[i],
- op, false);
+ u32 size = sizes[i] ? sizes[i] : handles[i]->size;
+ u32 offset = sizes[i] ? offsets[i] : 0;
+ int err = __nvmap_do_cache_maint(handles[i]->owner,
+ handles[i], offset,
+ offset + size,
+ op, false);
if (err)
- break;
+ return err;
}
}
- return err;
+ return 0;
}
void nvmap_zap_handle(struct nvmap_handle *handle,
#endif
struct nvmap_cache_op_list {
-#ifdef CONFIG_COMPAT
- __u32 handles; /* Uspace ptr to list of handles */
-#else
- struct nvmap_handle **handles;
-#endif
+ __u64 handles; /* Ptr to u32 type array, holding handles */
+ __u64 offsets; /* Ptr to u32 type array, holding offsets
+ * into handle mem */
+ __u64 sizes; /* Ptr to u32 type array, holindg sizes of memory
+ * regions within each handle */
__u32 nr; /* Number of handles */
+ __s32 op; /* wb/wb_inv/inv */
};
#define NVMAP_IOC_MAGIC 'N'