int memmgr_fd,
ulong mem_id,
u64 *offset_align,
- u32 flags /*NVHOST_AS_MAP_BUFFER_FLAGS_*/)
+ u32 flags, /*NVHOST_AS_MAP_BUFFER_FLAGS_*/
+ u32 kind)
{
int err = 0;
struct vm_gk20a *vm = (struct vm_gk20a *)as_share->priv;
}
ret_va = gk20a_vm_map(vm, memmgr, r, *offset_align,
- flags, NV_KIND_DEFAULT, NULL, true,
+ flags, kind, NULL, true,
mem_flag_none);
*offset_align = ret_va;
if (!ret_va) {
err = nvhost_as_ioctl_map_buffer(as_share,
(struct nvhost_as_map_buffer_args *)buf);
break;
+ case NVHOST_AS_IOCTL_MAP_BUFFER_EX:
+ trace_nvhost_as_ioctl_map_buffer(dev_name(&ch->dev->dev));
+ err = nvhost_as_ioctl_map_buffer_ex(as_share,
+ (struct nvhost_as_map_buffer_ex_args *)buf);
+ break;
case NVHOST_AS_IOCTL_UNMAP_BUFFER:
trace_nvhost_as_ioctl_unmap_buffer(dev_name(&ch->dev->dev));
err = nvhost_as_ioctl_unmap_buffer(as_share,
return pdata->as_ops->free_space(as_share, args);
}
+int nvhost_as_ioctl_map_buffer_ex(struct nvhost_as_share *as_share,
+ struct nvhost_as_map_buffer_ex_args *args)
+{
+ struct nvhost_device_data *pdata =
+ nvhost_get_devdata(as_share->ch->dev);
+ int i;
+
+ nvhost_dbg_fn("");
+
+ /* ensure that padding is not set. this is required for ensuring that
+ * we can safely use these fields later */
+ for (i = 0; i < ARRAY_SIZE(args->padding); i++)
+ if (args->padding[i])
+ return -EINVAL;
+
+ return pdata->as_ops->map_buffer(as_share, 0, args->dmabuf_fd,
+ &args->offset, args->flags,
+ args->kind);
+}
+
int nvhost_as_ioctl_map_buffer(struct nvhost_as_share *as_share,
struct nvhost_as_map_buffer_args *args)
{
nvhost_get_devdata(as_share->ch->dev);
nvhost_dbg_fn("");
- return pdata->as_ops->map_buffer(as_share,
- args->nvmap_fd, args->nvmap_handle,
- &args->o_a.align, args->flags);
+ return pdata->as_ops->map_buffer(as_share, args->nvmap_fd,
+ args->nvmap_handle, &args->o_a.align,
+ args->flags, NV_KIND_DEFAULT);
/* args->o_a.offset will be set if !err */
}
*
* Tegra Host Address Space
*
- * Copyright (c) 2011-2013, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
int memmgr_fd,
ulong mem_id,
u64 *offset_align,
- u32 flags /*NVHOST_AS_MAP_BUFFER_FLAGS_*/);
+ u32 flags, /*NVHOST_AS_MAP_BUFFER_FLAGS_*/
+ u32 kind);
int (*unmap_buffer)(struct nvhost_as_share *, u64 offset);
};
struct nvhost_as_bind_channel_args *args);
int nvhost_as_ioctl_map_buffer(struct nvhost_as_share *as_share,
struct nvhost_as_map_buffer_args *args);
+int nvhost_as_ioctl_map_buffer_ex(struct nvhost_as_share *as_share,
+ struct nvhost_as_map_buffer_ex_args *args);
int nvhost_as_ioctl_unmap_buffer(struct nvhost_as_share *as_share,
struct nvhost_as_unmap_buffer_args *args);
} o_a;
};
+ /*
+ * Mapping dmabuf fds into an address space:
+ *
+ * The caller requests a mapping to a particular page 'kind'.
+ *
+ * If 'page_size' is set to 0 the dmabuf's alignment/sizing will be used to
+ * determine the page size (largest possible). The page size chosen will be
+ * returned back to the caller in the 'page_size' parameter in that case.
+ */
+struct nvhost_as_map_buffer_ex_args {
+ __u32 flags; /* in/out */
+#define NV_KIND_DEFAULT -1
+ __s32 kind; /* in (-1 represents default) */
+ __u32 dmabuf_fd; /* in */
+ __u32 page_size; /* inout, 0:= best fit to buffer */
+
+ __u32 padding[4]; /* reserved for future usage */
+
+ __u64 offset; /* in/out, we use this address if flag
+ * FIXED_OFFSET is set. This will fail
+ * if space is not properly allocated. The
+ * actual virtual address to which we mapped
+ * the buffer is returned in this field. */
+};
+
/*
* Unmapping a buffer:
*
_IOWR(NVHOST_AS_IOCTL_MAGIC, 5, struct nvhost_as_unmap_buffer_args)
#define NVHOST_AS_IOCTL_ALLOC_SPACE \
_IOWR(NVHOST_AS_IOCTL_MAGIC, 6, struct nvhost_as_alloc_space_args)
+#define NVHOST_AS_IOCTL_MAP_BUFFER_EX \
+ _IOWR(NVHOST_AS_IOCTL_MAGIC, 7, struct nvhost_as_map_buffer_ex_args)
#define NVHOST_AS_IOCTL_LAST \
- _IOC_NR(NVHOST_AS_IOCTL_ALLOC_SPACE)
+ _IOC_NR(NVHOST_AS_IOCTL_MAP_BUFFER_EX)
#define NVHOST_AS_IOCTL_MAX_ARG_SIZE \
- sizeof(struct nvhost_as_map_buffer_args)
+ sizeof(struct nvhost_as_map_buffer_ex_args)
#endif