return (struct mem_handle *) ((uintptr_t)h | mem_mgr_type_dmabuf);
}
-int nvhost_dmabuf_get_param(struct mem_mgr *memmgr, struct mem_handle *handle,
+int nvhost_dmabuf_get_param(struct mem_handle *handle,
u32 param, u64 *result)
{
/* TBD: find a way to associate size, kind, etc */
void nvhost_dmabuf_put_mgr(struct mem_mgr *mgr);
struct mem_mgr *nvhost_dmabuf_get_mgr(struct mem_mgr *mgr);
struct mem_mgr *nvhost_dmabuf_get_mgr_file(int fd);
-struct mem_handle *nvhost_dmabuf_alloc(struct mem_mgr *mgr,
- size_t size, size_t align, int flags);
+struct mem_handle *nvhost_dmabuf_alloc(size_t size, size_t align, int flags);
void nvhost_dmabuf_put(struct mem_handle *handle);
struct sg_table *nvhost_dmabuf_pin(struct mem_handle *handle);
void nvhost_dmabuf_unpin(struct mem_handle *handle, struct sg_table *sgt);
void nvhost_dmabuf_kunmap(struct mem_handle *handle, unsigned int pagenum,
void *addr);
struct mem_handle *nvhost_dmabuf_get(ulong id, struct platform_device *dev);
-int nvhost_dmabuf_get_param(struct mem_mgr *memmgr, struct mem_handle *handle,
+int nvhost_dmabuf_get_param(struct mem_handle *handle,
u32 param, u64 *result);
size_t nvhost_dmabuf_size(struct mem_handle *handle);
#endif
if (args->padding[i])
return -EINVAL;
- return gk20a_vm_map_buffer(as_share, 0, args->dmabuf_fd,
+ return gk20a_vm_map_buffer(as_share, args->dmabuf_fd,
&args->offset, args->flags,
args->kind);
}
struct nvhost_as_map_buffer_args *args)
{
nvhost_dbg_fn("");
- return gk20a_vm_map_buffer(as_share, args->nvmap_fd,
- args->nvmap_handle, &args->o_a.align,
+ return gk20a_vm_map_buffer(as_share, args->nvmap_handle,
+ &args->o_a.align,
args->flags, NV_KIND_DEFAULT);
/* args->o_a.offset will be set if !err */
}
#include <linux/scatterlist.h>
#include <linux/file.h>
#include <linux/anon_inodes.h>
+#include <linux/dma-buf.h>
#include "dev.h"
#include "debug.h"
-#include "nvhost_memmgr.h"
#include "nvhost_sync.h"
#include "gk20a.h"
#include "dbg_gpu_gk20a.h"
+#include "nvhost_memmgr.h"
#include "hw_ram_gk20a.h"
#include "hw_fifo_gk20a.h"
#include "hw_pbdma_gk20a.h"
static void gk20a_free_cycle_stats_buffer(struct channel_gk20a *ch)
{
- struct mem_mgr *memmgr = gk20a_channel_mem_mgr(ch);
/* disable existing cyclestats buffer */
mutex_lock(&ch->cyclestate.cyclestate_buffer_mutex);
if (ch->cyclestate.cyclestate_buffer_handler) {
- nvhost_memmgr_munmap(ch->cyclestate.cyclestate_buffer_handler,
+ dma_buf_vunmap(ch->cyclestate.cyclestate_buffer_handler,
ch->cyclestate.cyclestate_buffer);
- nvhost_memmgr_put(memmgr,
- ch->cyclestate.cyclestate_buffer_handler);
+ dma_buf_put(ch->cyclestate.cyclestate_buffer_handler);
ch->cyclestate.cyclestate_buffer_handler = NULL;
ch->cyclestate.cyclestate_buffer = NULL;
ch->cyclestate.cyclestate_buffer_size = 0;
static int gk20a_channel_cycle_stats(struct channel_gk20a *ch,
struct nvhost_cycle_stats_args *args)
{
- struct mem_mgr *memmgr = gk20a_channel_mem_mgr(ch);
- struct mem_handle *handle_ref;
+ struct dma_buf *dmabuf;
void *virtual_address;
- u64 cyclestate_buffer_size;
- struct platform_device *dev = ch->ch->dev;
if (args->nvmap_handle && !ch->cyclestate.cyclestate_buffer_handler) {
/* set up new cyclestats buffer */
- handle_ref = nvhost_memmgr_get(memmgr,
- args->nvmap_handle, dev);
- if (IS_ERR(handle_ref))
- return PTR_ERR(handle_ref);
- virtual_address = nvhost_memmgr_mmap(handle_ref);
+ dmabuf = dma_buf_get(args->nvmap_handle);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+ virtual_address = dma_buf_vmap(dmabuf);
if (!virtual_address)
return -ENOMEM;
- nvhost_memmgr_get_param(memmgr, handle_ref,
- NVMAP_HANDLE_PARAM_SIZE,
- &cyclestate_buffer_size);
-
- ch->cyclestate.cyclestate_buffer_handler = handle_ref;
+ ch->cyclestate.cyclestate_buffer_handler = dmabuf;
ch->cyclestate.cyclestate_buffer = virtual_address;
- ch->cyclestate.cyclestate_buffer_size = cyclestate_buffer_size;
+ ch->cyclestate.cyclestate_buffer_size = dmabuf->size;
return 0;
} else if (!args->nvmap_handle &&
static int gk20a_init_error_notifier(struct channel_gk20a *ch,
struct nvhost_set_error_notifier *args) {
- struct platform_device *dev = ch->g->dev;
void *va;
- struct mem_mgr *memmgr;
- struct mem_handle *handle_ref;
+ struct dma_buf *dmabuf;
if (!args->mem) {
pr_err("gk20a_init_error_notifier: invalid memory handle\n");
return -EINVAL;
}
- memmgr = gk20a_channel_mem_mgr(ch);
- handle_ref = nvhost_memmgr_get(memmgr, args->mem, dev);
+ dmabuf = dma_buf_get(args->mem);
if (ch->error_notifier_ref)
gk20a_free_error_notifiers(ch);
- if (IS_ERR(handle_ref)) {
+ if (IS_ERR(dmabuf)) {
pr_err("Invalid handle: %d\n", args->mem);
return -EINVAL;
}
/* map handle */
- va = nvhost_memmgr_mmap(handle_ref);
+ va = dma_buf_vmap(dmabuf);
if (!va) {
- nvhost_memmgr_put(memmgr, handle_ref);
+ dma_buf_put(dmabuf);
pr_err("Cannot map notifier handle\n");
return -ENOMEM;
}
/* set channel notifiers pointer */
- ch->error_notifier_ref = handle_ref;
+ ch->error_notifier_ref = dmabuf;
ch->error_notifier = va + args->offset;
ch->error_notifier_va = va;
memset(ch->error_notifier, 0, sizeof(struct nvhost_notification));
static void gk20a_free_error_notifiers(struct channel_gk20a *ch)
{
if (ch->error_notifier_ref) {
- struct mem_mgr *memmgr = gk20a_channel_mem_mgr(ch);
- nvhost_memmgr_munmap(ch->error_notifier_ref,
- ch->error_notifier_va);
- nvhost_memmgr_put(memmgr, ch->error_notifier_ref);
+ dma_buf_vunmap(ch->error_notifier_ref, ch->error_notifier_va);
+ dma_buf_put(ch->error_notifier_ref);
ch->error_notifier_ref = 0;
ch->error_notifier = 0;
ch->error_notifier_va = 0;
{
struct channel_gk20a *ch = (struct channel_gk20a *)filp->private_data;
struct gk20a *g = ch->g;
- struct mem_mgr *memmgr = gk20a_channel_mem_mgr(ch);
trace_nvhost_channel_release(dev_name(&g->dev->dev));
gk20a_channel_idle(ch->g->dev);
gk20a_put_client(g);
- if (memmgr)
- nvhost_memmgr_put_mgr(memmgr);
filp->private_data = NULL;
return 0;
}
void *mem = NULL;
unsigned int words;
u64 offset;
- struct mem_handle *r = NULL;
+ struct dma_buf *dmabuf = NULL;
if (nvhost_debug_trace_cmdbuf) {
u64 gpu_va = (u64)g->entry0 |
(u64)((u64)pbdma_gp_entry1_get_hi_v(g->entry1) << 32);
- struct mem_mgr *memmgr = NULL;
int err;
words = pbdma_gp_entry1_length_v(g->entry1);
- err = gk20a_vm_find_buffer(c->vm, gpu_va, &memmgr, &r,
- &offset);
+ err = gk20a_vm_find_buffer(c->vm, gpu_va, &dmabuf, &offset);
if (!err)
- mem = nvhost_memmgr_mmap(r);
+ mem = dma_buf_vmap(dmabuf);
}
if (mem) {
offset + i * sizeof(u32),
mem);
}
- nvhost_memmgr_munmap(r, mem);
+ dma_buf_vunmap(dmabuf, mem);
}
}
u32 payload, long timeout)
{
struct platform_device *pdev = ch->ch->dev;
- struct mem_mgr *memmgr = gk20a_channel_mem_mgr(ch);
- struct mem_handle *handle_ref;
+ struct dma_buf *dmabuf;
void *data;
u32 *semaphore;
int ret = 0;
if (ch->has_timedout)
return -ETIMEDOUT;
- handle_ref = nvhost_memmgr_get(memmgr, id, pdev);
- if (IS_ERR(handle_ref)) {
+ dmabuf = dma_buf_get(id);
+ if (IS_ERR(dmabuf)) {
nvhost_err(&pdev->dev, "invalid notifier nvmap handle 0x%lx",
id);
return -EINVAL;
}
- data = nvhost_memmgr_kmap(handle_ref, offset >> PAGE_SHIFT);
+ data = dma_buf_kmap(dmabuf, offset >> PAGE_SHIFT);
if (!data) {
nvhost_err(&pdev->dev, "failed to map notifier memory");
ret = -EINVAL;
else if (remain < 0)
ret = remain;
- nvhost_memmgr_kunmap(handle_ref, offset >> PAGE_SHIFT, data);
+ dma_buf_kunmap(dmabuf, offset >> PAGE_SHIFT, data);
cleanup_put:
- nvhost_memmgr_put(memmgr, handle_ref);
+ dma_buf_put(dmabuf);
return ret;
}
struct nvhost_wait_args *args)
{
struct device *d = dev_from_gk20a(ch->g);
- struct platform_device *dev = ch->ch->dev;
- struct mem_mgr *memmgr = gk20a_channel_mem_mgr(ch);
- struct mem_handle *handle_ref;
+ struct dma_buf *dmabuf;
struct notification *notif;
struct timespec tv;
u64 jiffies;
id = args->condition.notifier.nvmap_handle;
offset = args->condition.notifier.offset;
- handle_ref = nvhost_memmgr_get(memmgr, id, dev);
- if (IS_ERR(handle_ref)) {
+ dmabuf = dma_buf_get(id);
+ if (IS_ERR(dmabuf)) {
nvhost_err(d, "invalid notifier nvmap handle 0x%lx",
id);
return -EINVAL;
}
- notif = nvhost_memmgr_mmap(handle_ref);
+ notif = dma_buf_vmap(dmabuf);
if (!notif) {
nvhost_err(d, "failed to map notifier memory");
return -ENOMEM;
notif->info16 = ch->hw_chid; /* should be method offset */
notif_clean_up:
- nvhost_memmgr_munmap(handle_ref, notif);
+ dma_buf_vunmap(dmabuf, notif);
return ret;
case NVHOST_WAIT_TYPE_SEMAPHORE:
break;
}
case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
- {
- int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
- struct mem_mgr *new_client = nvhost_memmgr_get_mgr_file(fd);
-
- if (IS_ERR(new_client)) {
- err = PTR_ERR(new_client);
- break;
- }
- if (ch->memmgr)
- nvhost_memmgr_put_mgr(ch->memmgr);
- ch->memmgr = new_client;
break;
- }
case NVHOST_IOCTL_CHANNEL_ALLOC_OBJ_CTX:
gk20a_channel_busy(dev);
err = gk20a_alloc_obj_ctx(ch,
#include <linux/nvhost_ioctl.h>
struct gk20a;
struct gr_gk20a;
-struct mem_mgr;
-struct mem_handle;
struct dbg_session_gk20a;
#include "nvhost_channel.h"
bool vpr;
pid_t pid;
- struct mem_mgr *memmgr;
struct nvhost_channel *ch;
struct list_head jobs;
struct {
void *cyclestate_buffer;
u32 cyclestate_buffer_size;
- struct mem_handle *cyclestate_buffer_handler;
+ struct dma_buf *cyclestate_buffer_handler;
struct mutex cyclestate_buffer_mutex;
} cyclestate;
#endif
u32 timeout_ms_max;
bool timeout_debug_dump;
- struct mem_handle *error_notifier_ref;
+ struct dma_buf *error_notifier_ref;
struct nvhost_notification *error_notifier;
void *error_notifier_va;
};
int gk20a_channel_suspend(struct gk20a *g);
int gk20a_channel_resume(struct gk20a *g);
-static inline
-struct mem_mgr *gk20a_channel_mem_mgr(struct channel_gk20a *ch)
-{
- return ch->memmgr;
-}
/* Channel file operations */
int gk20a_channel_open(struct inode *inode, struct file *filp);
long gk20a_channel_ioctl(struct file *filp,
if (err)
goto clean_up;
- gold_ptr = nvhost_memmgr_mmap(gr->global_ctx_buffer[GOLDEN_CTX].ref);
+ gold_ptr = dma_buf_vmap(gr->global_ctx_buffer[GOLDEN_CTX].ref);
if (!gold_ptr)
goto clean_up;
nvhost_dbg_fn("done");
if (gold_ptr)
- nvhost_memmgr_munmap(gr->global_ctx_buffer[GOLDEN_CTX].ref,
- gold_ptr);
+ dma_buf_vunmap(gr->global_ctx_buffer[GOLDEN_CTX].ref,
+ gold_ptr);
if (ctx_ptr)
vunmap(ctx_ptr);
static int gr_gk20a_alloc_global_ctx_buffers(struct gk20a *g)
{
struct gr_gk20a *gr = &g->gr;
- struct mem_mgr *memmgr = mem_mgr_from_g(g);
- struct mem_handle *mem;
+ struct dma_buf *dmabuf;
int i, attr_buffer_size;
u32 cb_buffer_size = gr->bundle_cb_default_size *
nvhost_dbg_info("cb_buffer_size : %d", cb_buffer_size);
- mem = nvhost_memmgr_alloc(memmgr, cb_buffer_size,
- DEFAULT_ALLOC_ALIGNMENT,
- DEFAULT_ALLOC_FLAGS,
- 0);
- if (IS_ERR(mem))
+ dmabuf = (struct dma_buf *)
+ nvhost_memmgr_alloc(cb_buffer_size,
+ DEFAULT_ALLOC_ALIGNMENT,
+ DEFAULT_ALLOC_FLAGS,
+ 0);
+ if (IS_ERR(dmabuf))
goto clean_up;
- gr->global_ctx_buffer[CIRCULAR].ref = mem;
+ gr->global_ctx_buffer[CIRCULAR].ref = dmabuf;
gr->global_ctx_buffer[CIRCULAR].size = cb_buffer_size;
- mem = nvhost_memmgr_alloc(memmgr, cb_buffer_size,
- DEFAULT_ALLOC_ALIGNMENT,
- DEFAULT_ALLOC_FLAGS,
- NVMAP_HEAP_CARVEOUT_VPR);
- if (!IS_ERR(mem)) {
- gr->global_ctx_buffer[CIRCULAR_VPR].ref = mem;
+ dmabuf = (struct dma_buf *)
+ nvhost_memmgr_alloc(cb_buffer_size,
+ DEFAULT_ALLOC_ALIGNMENT,
+ DEFAULT_ALLOC_FLAGS,
+ NVMAP_HEAP_CARVEOUT_VPR);
+ if (!IS_ERR(dmabuf)) {
+ gr->global_ctx_buffer[CIRCULAR_VPR].ref = dmabuf;
gr->global_ctx_buffer[CIRCULAR_VPR].size = cb_buffer_size;
}
nvhost_dbg_info("pagepool_buffer_size : %d", pagepool_buffer_size);
- mem = nvhost_memmgr_alloc(memmgr, pagepool_buffer_size,
- DEFAULT_ALLOC_ALIGNMENT,
- DEFAULT_ALLOC_FLAGS,
- 0);
- if (IS_ERR(mem))
+ dmabuf = (struct dma_buf *)
+ nvhost_memmgr_alloc(pagepool_buffer_size,
+ DEFAULT_ALLOC_ALIGNMENT,
+ DEFAULT_ALLOC_FLAGS,
+ 0);
+ if (IS_ERR(dmabuf))
goto clean_up;
- gr->global_ctx_buffer[PAGEPOOL].ref = mem;
+ gr->global_ctx_buffer[PAGEPOOL].ref = dmabuf;
gr->global_ctx_buffer[PAGEPOOL].size = pagepool_buffer_size;
- mem = nvhost_memmgr_alloc(memmgr, pagepool_buffer_size,
- DEFAULT_ALLOC_ALIGNMENT,
- DEFAULT_ALLOC_FLAGS,
- NVMAP_HEAP_CARVEOUT_VPR);
- if (!IS_ERR(mem)) {
- gr->global_ctx_buffer[PAGEPOOL_VPR].ref = mem;
+ dmabuf = (struct dma_buf *)
+ nvhost_memmgr_alloc(pagepool_buffer_size,
+ DEFAULT_ALLOC_ALIGNMENT,
+ DEFAULT_ALLOC_FLAGS,
+ NVMAP_HEAP_CARVEOUT_VPR);
+ if (!IS_ERR(dmabuf)) {
+ gr->global_ctx_buffer[PAGEPOOL_VPR].ref = dmabuf;
gr->global_ctx_buffer[PAGEPOOL_VPR].size = pagepool_buffer_size;
}
nvhost_dbg_info("attr_buffer_size : %d", attr_buffer_size);
- mem = nvhost_memmgr_alloc(memmgr, attr_buffer_size,
- DEFAULT_ALLOC_ALIGNMENT,
- DEFAULT_ALLOC_FLAGS,
- 0);
- if (IS_ERR(mem))
+ dmabuf = (struct dma_buf *)
+ nvhost_memmgr_alloc(attr_buffer_size,
+ DEFAULT_ALLOC_ALIGNMENT,
+ DEFAULT_ALLOC_FLAGS,
+ 0);
+ if (IS_ERR(dmabuf))
goto clean_up;
- gr->global_ctx_buffer[ATTRIBUTE].ref = mem;
+ gr->global_ctx_buffer[ATTRIBUTE].ref = dmabuf;
gr->global_ctx_buffer[ATTRIBUTE].size = attr_buffer_size;
- mem = nvhost_memmgr_alloc(memmgr, attr_buffer_size,
- DEFAULT_ALLOC_ALIGNMENT,
- DEFAULT_ALLOC_FLAGS,
- NVMAP_HEAP_CARVEOUT_VPR);
- if (!IS_ERR(mem)) {
- gr->global_ctx_buffer[ATTRIBUTE_VPR].ref = mem;
+ dmabuf = (struct dma_buf *)
+ nvhost_memmgr_alloc(attr_buffer_size,
+ DEFAULT_ALLOC_ALIGNMENT,
+ DEFAULT_ALLOC_FLAGS,
+ NVMAP_HEAP_CARVEOUT_VPR);
+ if (!IS_ERR(dmabuf)) {
+ gr->global_ctx_buffer[ATTRIBUTE_VPR].ref = dmabuf;
gr->global_ctx_buffer[ATTRIBUTE_VPR].size = attr_buffer_size;
}
nvhost_dbg_info("golden_image_size : %d",
gr->ctx_vars.golden_image_size);
- mem = nvhost_memmgr_alloc(memmgr, gr->ctx_vars.golden_image_size,
- DEFAULT_ALLOC_ALIGNMENT,
- DEFAULT_ALLOC_FLAGS,
- 0);
- if (IS_ERR(mem))
+ dmabuf = (struct dma_buf *)
+ nvhost_memmgr_alloc(gr->ctx_vars.golden_image_size,
+ DEFAULT_ALLOC_ALIGNMENT,
+ DEFAULT_ALLOC_FLAGS,
+ 0);
+ if (IS_ERR(dmabuf))
goto clean_up;
- gr->global_ctx_buffer[GOLDEN_CTX].ref = mem;
+ gr->global_ctx_buffer[GOLDEN_CTX].ref = dmabuf;
gr->global_ctx_buffer[GOLDEN_CTX].size =
gr->ctx_vars.golden_image_size;
nvhost_err(dev_from_gk20a(g), "fail");
for (i = 0; i < NR_GLOBAL_CTX_BUF; i++) {
if (gr->global_ctx_buffer[i].ref) {
- nvhost_memmgr_put(memmgr,
- gr->global_ctx_buffer[i].ref);
+ dma_buf_put(gr->global_ctx_buffer[i].ref);
memset(&gr->global_ctx_buffer[i],
0, sizeof(struct mem_desc));
}
static void gr_gk20a_free_global_ctx_buffers(struct gk20a *g)
{
struct gr_gk20a *gr = &g->gr;
- struct mem_mgr *memmgr = mem_mgr_from_g(g);
u32 i;
for (i = 0; i < NR_GLOBAL_CTX_BUF; i++) {
- nvhost_memmgr_put(memmgr, gr->global_ctx_buffer[i].ref);
+ dma_buf_put(gr->global_ctx_buffer[i].ref);
memset(&gr->global_ctx_buffer[i], 0, sizeof(struct mem_desc));
}
struct channel_gk20a *c)
{
struct vm_gk20a *ch_vm = c->vm;
- struct mem_mgr *memmgr = mem_mgr_from_g(g);
- struct mem_handle *handle_ref;
+ struct dma_buf *handle_ref;
u64 *g_bfr_va = c->ch_ctx.global_ctx_buffer_va;
struct gr_gk20a *gr = &g->gr;
u64 gpu_va;
else
handle_ref = gr->global_ctx_buffer[CIRCULAR_VPR].ref;
- gpu_va = gk20a_vm_map(ch_vm, memmgr, handle_ref,
+ gpu_va = gk20a_vm_map(ch_vm, handle_ref,
/*offset_align, flags, kind*/
0, NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
gmmu_pte_kind_pitch_v(), NULL, false,
else
handle_ref = gr->global_ctx_buffer[ATTRIBUTE_VPR].ref;
- gpu_va = gk20a_vm_map(ch_vm, memmgr, handle_ref,
+ gpu_va = gk20a_vm_map(ch_vm, handle_ref,
/*offset_align, flags, kind*/
0, NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
gmmu_pte_kind_pitch_v(), NULL, false,
else
handle_ref = gr->global_ctx_buffer[PAGEPOOL_VPR].ref;
- gpu_va = gk20a_vm_map(ch_vm, memmgr, handle_ref,
+ gpu_va = gk20a_vm_map(ch_vm, handle_ref,
/*offset_align, flags, kind*/
0, NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
gmmu_pte_kind_pitch_v(), NULL, false,
g_bfr_va[PAGEPOOL_VA] = gpu_va;
/* Golden Image */
- gpu_va = gk20a_vm_map(ch_vm, memmgr,
- gr->global_ctx_buffer[GOLDEN_CTX].ref,
+ gpu_va = gk20a_vm_map(ch_vm, gr->global_ctx_buffer[GOLDEN_CTX].ref,
/*offset_align, flags, kind*/
0, 0, gmmu_pte_kind_pitch_v(), NULL, false,
mem_flag_none);
static struct mapped_buffer_node *find_mapped_buffer_locked(
struct rb_root *root, u64 addr);
static struct mapped_buffer_node *find_mapped_buffer_reverse_locked(
- struct rb_root *root, struct mem_handle *r,
+ struct rb_root *root, struct dma_buf *dmabuf,
u32 kind);
static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
enum gmmu_pgsz_gk20a pgsz_idx,
0x1ffffLL };
static const u64 gmmu_page_masks[gmmu_nr_page_sizes] = { ~0xfffLL, ~0x1ffffLL };
+struct gk20a_comptags {
+ u32 offset;
+ u32 lines;
+};
+
+struct gk20a_dmabuf_priv {
+ struct mutex lock;
+
+ struct nvhost_allocator *comptag_allocator;
+ struct gk20a_comptags comptags;
+
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+
+ int pin_count;
+};
+
+static void gk20a_mm_delete_priv(void *_priv)
+{
+ struct gk20a_dmabuf_priv *priv = _priv;
+ if (!priv)
+ return;
+
+ if (priv->comptags.lines) {
+ BUG_ON(!priv->comptag_allocator);
+ priv->comptag_allocator->free(priv->comptag_allocator,
+ priv->comptags.offset,
+ priv->comptags.lines);
+ }
+
+ kfree(priv);
+}
+
+static struct sg_table *gk20a_mm_pin(struct device *dev,
+ struct dma_buf *dmabuf)
+{
+ struct gk20a_dmabuf_priv *priv;
+ static DEFINE_MUTEX(priv_lock);
+
+ /* create the nvhost priv if needed */
+ priv = dma_buf_get_drvdata(dmabuf, dev);
+ if (!priv) {
+ mutex_lock(&priv_lock);
+ priv = dma_buf_get_drvdata(dmabuf, dev);
+ if (priv)
+ goto priv_exist_or_err;
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ priv = ERR_PTR(-ENOMEM);
+ goto priv_exist_or_err;
+ }
+ mutex_init(&priv->lock);
+ dma_buf_set_drvdata(dmabuf, dev, priv, gk20a_mm_delete_priv);
+priv_exist_or_err:
+ mutex_unlock(&priv_lock);
+ }
+ if (IS_ERR(priv))
+ return (struct sg_table *)priv;
+
+ mutex_lock(&priv->lock);
+
+ if (priv->pin_count == 0) {
+ priv->attach = dma_buf_attach(dmabuf, dev);
+ if (IS_ERR(priv->attach)) {
+ mutex_unlock(&priv->lock);
+ return (struct sg_table *)priv->attach;
+ }
+
+ priv->sgt = dma_buf_map_attachment(priv->attach,
+ DMA_BIDIRECTIONAL);
+ if (IS_ERR(priv->sgt)) {
+ dma_buf_detach(dmabuf, priv->attach);
+ mutex_unlock(&priv->lock);
+ return priv->sgt;
+ }
+ }
+
+ priv->pin_count++;
+ mutex_unlock(&priv->lock);
+ return priv->sgt;
+}
+
+static void gk20a_mm_unpin(struct device *dev, struct dma_buf *dmabuf,
+ struct sg_table *sgt)
+{
+ struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(dmabuf, dev);
+ dma_addr_t dma_addr;
+
+ if (IS_ERR(priv) || !priv)
+ return;
+
+ mutex_lock(&priv->lock);
+ WARN_ON(priv->sgt != sgt);
+ priv->pin_count--;
+ WARN_ON(priv->pin_count < 0);
+ dma_addr = sg_dma_address(priv->sgt->sgl);
+ if (priv->pin_count == 0) {
+ dma_buf_unmap_attachment(priv->attach, priv->sgt,
+ DMA_BIDIRECTIONAL);
+ dma_buf_detach(dmabuf, priv->attach);
+ }
+ mutex_unlock(&priv->lock);
+}
+
+
+static void gk20a_get_comptags(struct device *dev,
+ struct dma_buf *dmabuf,
+ struct gk20a_comptags *comptags)
+{
+ struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(dmabuf, dev);
+
+ if (!comptags)
+ return;
+
+ if (!priv) {
+ comptags->lines = 0;
+ comptags->offset = 0;
+ return;
+ }
+
+ *comptags = priv->comptags;
+}
+
+static int gk20a_alloc_comptags(struct device *dev,
+ struct dma_buf *dmabuf,
+ struct nvhost_allocator *allocator,
+ int lines)
+{
+ struct gk20a_dmabuf_priv *priv = dma_buf_get_drvdata(dmabuf, dev);
+ u32 offset = 0;
+ int err;
+
+ if (!priv)
+ return -ENOSYS;
+
+ if (!lines)
+ return -EINVAL;
+
+ /* store the allocator so we can use it when we free the ctags */
+ priv->comptag_allocator = allocator;
+ err = allocator->alloc(allocator, &offset, lines);
+ if (!err) {
+ priv->comptags.lines = lines;
+ priv->comptags.offset = offset;
+ }
+ return err;
+}
+
+
+
+
static int gk20a_init_mm_reset_enable_hw(struct gk20a *g)
{
nvhost_dbg_fn("");
}
static struct mapped_buffer_node *find_mapped_buffer_reverse_locked(
- struct rb_root *root, struct mem_handle *r,
+ struct rb_root *root, struct dma_buf *dmabuf,
u32 kind)
{
struct rb_node *node = rb_first(root);
while (node) {
struct mapped_buffer_node *mapped_buffer =
container_of(node, struct mapped_buffer_node, node);
- if (mapped_buffer->handle_ref == r &&
+ if (mapped_buffer->dmabuf == dmabuf &&
kind == mapped_buffer->kind)
return mapped_buffer;
node = rb_next(&mapped_buffer->node);
}
static u64 gk20a_vm_map_duplicate_locked(struct vm_gk20a *vm,
- struct mem_mgr *memmgr,
- struct mem_handle *r,
+ struct dma_buf *dmabuf,
u64 offset_align,
u32 flags,
int kind,
/* fall-back to default kind if no kind is provided */
if (kind < 0) {
u64 nvmap_param;
- nvhost_memmgr_get_param(memmgr, r, NVMAP_HANDLE_PARAM_KIND,
+ nvhost_memmgr_get_param((struct mem_handle *)dmabuf,
+ NVMAP_HANDLE_PARAM_KIND,
&nvmap_param);
kind = nvmap_param;
}
- mapped_buffer = find_mapped_buffer_reverse_locked(
- &vm->mapped_buffers, r, kind);
+ mapped_buffer =
+ find_mapped_buffer_reverse_locked(&vm->mapped_buffers,
+ dmabuf, kind);
if (!mapped_buffer)
return 0;
mapped_buffer->addr != offset_align)
return 0;
- WARN_ON(mapped_buffer->memmgr != memmgr);
BUG_ON(mapped_buffer->vm != vm);
/* mark the buffer as used */
* existing mapping here, we need to give back those
* refs once in order not to leak.
*/
- if (mapped_buffer->own_mem_ref) {
- nvhost_memmgr_put(mapped_buffer->memmgr,
- mapped_buffer->handle_ref);
- nvhost_memmgr_put_mgr(mapped_buffer->memmgr);
- } else
+ if (mapped_buffer->own_mem_ref)
+ dma_buf_put(mapped_buffer->dmabuf);
+ else
mapped_buffer->own_mem_ref = true;
-
- mapped_buffer->memmgr = memmgr;
}
kref_get(&mapped_buffer->ref);
}
u64 gk20a_vm_map(struct vm_gk20a *vm,
- struct mem_mgr *memmgr,
- struct mem_handle *r,
+ struct dma_buf *dmabuf,
u64 offset_align,
u32 flags /*NVHOST_AS_MAP_BUFFER_FLAGS_*/,
int kind,
u64 map_offset = 0;
int err = 0;
struct buffer_attrs bfr = {0};
- struct nvhost_comptags comptags;
+ struct gk20a_comptags comptags;
mutex_lock(&vm->update_gmmu_lock);
/* check if this buffer is already mapped */
- map_offset = gk20a_vm_map_duplicate_locked(vm, memmgr, r, offset_align,
+ map_offset = gk20a_vm_map_duplicate_locked(vm, dmabuf, offset_align,
flags, kind, sgt,
user_mapped, rw_flag);
if (map_offset) {
}
/* pin buffer to get phys/iovmm addr */
- bfr.sgt = nvhost_memmgr_pin(memmgr, r, d, rw_flag);
+ bfr.sgt = gk20a_mm_pin(d, dmabuf);
if (IS_ERR(bfr.sgt)) {
/* Falling back to physical is actually possible
* here in many cases if we use 4K phys pages in the
if (kind < 0) {
u64 value;
- err = nvhost_memmgr_get_param(memmgr, r,
+ err = nvhost_memmgr_get_param((struct mem_handle *)dmabuf,
NVMAP_HANDLE_PARAM_KIND,
&value);
if (err) {
}
bfr.kind_v = kind;
- bfr.size = nvhost_memmgr_size(r);
+ bfr.size = dmabuf->size;
bfr.align = 1 << __ffs((u64)sg_dma_address(bfr.sgt->sgl));
bfr.pgsz_idx = -1;
if (!vm->enable_ctag)
bfr.ctag_lines = 0;
- nvhost_memmgr_get_comptags(d, r, &comptags);
+ gk20a_get_comptags(d, dmabuf, &comptags);
if (bfr.ctag_lines && !comptags.lines) {
/* allocate compression resources if needed */
- err = nvhost_memmgr_alloc_comptags(d, r,
- ctag_allocator, bfr.ctag_lines);
+ err = gk20a_alloc_comptags(d, dmabuf, ctag_allocator,
+ bfr.ctag_lines);
if (err) {
/* ok to fall back here if we ran out */
/* TBD: we can partially alloc ctags as well... */
bfr.ctag_lines = bfr.ctag_offset = 0;
bfr.kind_v = bfr.uc_kind_v;
} else {
- nvhost_memmgr_get_comptags(d, r, &comptags);
+ gk20a_get_comptags(d, dmabuf, &comptags);
/* init/clear the ctag buffer */
g->ops.ltc.clear_comptags(g,
nvhost_warn(d, "oom allocating tracking buffer");
goto clean_up;
}
- mapped_buffer->memmgr = memmgr;
- mapped_buffer->handle_ref = r;
+ mapped_buffer->dmabuf = dmabuf;
mapped_buffer->sgt = bfr.sgt;
mapped_buffer->addr = map_offset;
mapped_buffer->size = bfr.size;
if (va_allocated)
gk20a_vm_free_va(vm, map_offset, bfr.size, bfr.pgsz_idx);
if (!IS_ERR(bfr.sgt))
- nvhost_memmgr_unpin(memmgr, r, d, bfr.sgt);
+ gk20a_mm_unpin(d, dmabuf, bfr.sgt);
mutex_unlock(&vm->update_gmmu_lock);
nvhost_dbg_info("err=%d\n", err);
hi32(mapped_buffer->addr), lo32(mapped_buffer->addr),
mapped_buffer->own_mem_ref);
- nvhost_memmgr_unpin(mapped_buffer->memmgr,
- mapped_buffer->handle_ref,
- dev_from_vm(vm),
- mapped_buffer->sgt);
+ gk20a_mm_unpin(dev_from_vm(vm), mapped_buffer->dmabuf,
+ mapped_buffer->sgt);
/* remove from mapped buffer tree and remove list, free */
rb_erase(&mapped_buffer->node, &vm->mapped_buffers);
if (mapped_buffer->user_mapped)
vm->num_user_mapped_buffers--;
- if (mapped_buffer->own_mem_ref) {
- nvhost_memmgr_put(mapped_buffer->memmgr,
- mapped_buffer->handle_ref);
- nvhost_memmgr_put_mgr(mapped_buffer->memmgr);
- }
+ if (mapped_buffer->own_mem_ref)
+ dma_buf_put(mapped_buffer->dmabuf);
kfree(mapped_buffer);
}
int gk20a_vm_map_buffer(struct gk20a_as_share *as_share,
- int memmgr_fd,
- ulong mem_id,
+ int dmabuf_fd,
u64 *offset_align,
u32 flags, /*NVHOST_AS_MAP_BUFFER_FLAGS_*/
int kind)
{
int err = 0;
struct vm_gk20a *vm = as_share->vm;
- struct gk20a *g = gk20a_from_vm(vm);
- struct mem_mgr *memmgr;
- struct mem_handle *r;
+ struct dma_buf *dmabuf;
u64 ret_va;
nvhost_dbg_fn("");
- /* get ref to the memmgr (released on unmap_locked) */
- memmgr = nvhost_memmgr_get_mgr_file(memmgr_fd);
- if (IS_ERR(memmgr))
- return 0;
-
/* get ref to the mem handle (released on unmap_locked) */
- r = nvhost_memmgr_get(memmgr, mem_id, g->dev);
- if (!r) {
- nvhost_memmgr_put_mgr(memmgr);
+ dmabuf = dma_buf_get(dmabuf_fd);
+ if (!dmabuf)
return 0;
- }
- ret_va = gk20a_vm_map(vm, memmgr, r, *offset_align,
+ ret_va = gk20a_vm_map(vm, dmabuf, *offset_align,
flags, kind, NULL, true,
mem_flag_none);
*offset_align = ret_va;
if (!ret_va) {
- nvhost_memmgr_put(memmgr, r);
- nvhost_memmgr_put_mgr(memmgr);
+ dma_buf_put(dmabuf);
err = -EINVAL;
}
int gk20a_vm_find_buffer(struct vm_gk20a *vm, u64 gpu_va,
- struct mem_mgr **mgr, struct mem_handle **r,
+ struct dma_buf **dmabuf,
u64 *offset)
{
struct mapped_buffer_node *mapped_buffer;
return -EINVAL;
}
- *mgr = mapped_buffer->memmgr;
- *r = mapped_buffer->handle_ref;
+ *dmabuf = mapped_buffer->dmabuf;
*offset = gpu_va - mapped_buffer->addr;
mutex_unlock(&vm->update_gmmu_lock);
#define NV_GMMU_VA_IS_UPPER(x) ((x) >= ((u64)0x1 << (NV_GMMU_VA_RANGE-1)))
struct mem_desc {
- struct mem_handle *ref;
+ struct dma_buf *ref;
struct sg_table *sgt;
u32 size;
};
struct vm_reserved_va_node *va_node;
u64 addr;
u64 size;
- struct mem_mgr *memmgr;
- struct mem_handle *handle_ref;
+ struct dma_buf *dmabuf;
struct sg_table *sgt;
struct kref ref;
u32 user_mapped;
#define gk20a_from_mm(mm) ((mm)->g)
#define gk20a_from_vm(vm) ((vm)->mm->g)
-#define mem_mgr_from_mm(mm) (gk20a_from_mm(mm)->host->memmgr)
-#define mem_mgr_from_vm(vm) (gk20a_from_vm(vm)->host->memmgr)
#define dev_from_vm(vm) dev_from_gk20a(vm->mm->g)
#define DEFAULT_ALLOC_FLAGS (mem_mgr_flag_uncacheable)
int rw_flag);
u64 gk20a_vm_map(struct vm_gk20a *vm,
- struct mem_mgr *memmgr,
- struct mem_handle *r,
- u64 offset_align,
- u32 flags /*NVHOST_MAP_BUFFER_FLAGS_*/,
- int kind,
- struct sg_table **sgt,
- bool user_mapped,
- int rw_flag);
+ struct dma_buf *dmabuf,
+ u64 offset_align,
+ u32 flags /*NVHOST_AS_MAP_BUFFER_FLAGS_*/,
+ int kind,
+ struct sg_table **sgt,
+ bool user_mapped,
+ int rw_flag);
/* unmap handle from kernel */
void gk20a_vm_unmap(struct vm_gk20a *vm, u64 offset);
/* find buffer corresponding to va */
int gk20a_vm_find_buffer(struct vm_gk20a *vm, u64 gpu_va,
- struct mem_mgr **memmgr, struct mem_handle **r,
+ struct dma_buf **dmabuf,
u64 *offset);
void gk20a_vm_get(struct vm_gk20a *vm);
int gk20a_vm_bind_channel(struct gk20a_as_share *as_share,
struct channel_gk20a *ch);
int gk20a_vm_map_buffer(struct gk20a_as_share *as_share,
- int memmgr_fd,
- ulong mem_id,
+ int dmabuf_fd,
u64 *offset_align,
- u32 flags /*NVHOST_AS_MAP_BUFFER_FLAGS_*/,
+ u32 flags, /*NVHOST_AS_MAP_BUFFER_FLAGS_*/
int kind);
int gk20a_vm_unmap_buffer(struct gk20a_as_share *, u64 offset);
struct nvhost_channel;
struct nvhost_cdma;
struct mem_mgr;
+struct dma_buf;
struct nvhost_dbg_session;
struct nvhost_hwctx {
return mgr;
}
-struct mem_handle *nvhost_memmgr_alloc(struct mem_mgr *mgr,
- size_t size, size_t align, int flags, unsigned int heap_mask)
+struct mem_handle *nvhost_memmgr_alloc(size_t size, size_t align,
+ int flags, unsigned int heap_mask)
{
struct mem_handle *h = NULL;
#ifdef CONFIG_TEGRA_GRHOST_USE_NVMAP
- h = nvhost_nvmap_alloc(mgr, size, align, flags, heap_mask);
+ h = nvhost_nvmap_alloc(size, align, flags, heap_mask);
#else
#ifdef CONFIG_TEGRA_GRHOST_USE_DMABUF
- h = nvhost_dmabuf_alloc(mgr, size, align, flags);
+ h = nvhost_dmabuf_alloc(size, align, flags);
#endif
#endif
}
}
-int nvhost_memmgr_get_param(struct mem_mgr *mem_mgr,
- struct mem_handle *mem_handle,
+int nvhost_memmgr_get_param(struct mem_handle *mem_handle,
u32 param, u64 *result)
{
#ifndef CONFIG_ARM64
#endif
#ifdef CONFIG_TEGRA_GRHOST_USE_NVMAP
case mem_mgr_type_nvmap:
- return nvhost_nvmap_get_param(mem_mgr, mem_handle,
+ return nvhost_nvmap_get_param(mem_handle,
param, result);
break;
#endif
#ifdef CONFIG_TEGRA_GRHOST_USE_DMABUF
case mem_mgr_type_dmabuf:
- return nvhost_dmabuf_get_param(mem_mgr, mem_handle,
+ return nvhost_dmabuf_get_param(mem_handle,
param, result);
break;
#endif
void nvhost_memmgr_put_mgr(struct mem_mgr *);
struct mem_mgr *nvhost_memmgr_get_mgr(struct mem_mgr *);
struct mem_mgr *nvhost_memmgr_get_mgr_file(int fd);
-struct mem_handle *nvhost_memmgr_alloc(struct mem_mgr *,
- size_t size, size_t align,
- int flags, unsigned int heap_mask);
+struct mem_handle *nvhost_memmgr_alloc(size_t size, size_t align,
+ int flags, unsigned int heap_mask);
struct mem_handle *nvhost_memmgr_get(struct mem_mgr *,
ulong id, struct platform_device *dev);
void nvhost_memmgr_put(struct mem_mgr *mgr, struct mem_handle *handle);
static inline int nvhost_memmgr_type(ulong id) { return id & MEMMGR_TYPE_MASK; }
static inline int nvhost_memmgr_id(ulong id) { return id & MEMMGR_ID_MASK; }
-int nvhost_memmgr_get_param(struct mem_mgr *mem_mgr,
- struct mem_handle *mem_handle,
+int nvhost_memmgr_get_param(struct mem_handle *mem_handle,
u32 param, u64 *result);
void nvhost_memmgr_get_comptags(struct device *dev,
return (struct mem_mgr *)0x1;
}
-struct mem_handle *nvhost_nvmap_alloc(struct mem_mgr *mgr,
- size_t size, size_t align, int flags, unsigned int heap_mask)
+struct mem_handle *nvhost_nvmap_alloc(size_t size, size_t align,
+ int flags, unsigned int heap_mask)
{
return (struct mem_handle *)nvmap_alloc_dmabuf(
size, align, flags, heap_mask);
return (struct mem_handle *)dma_buf_get(id);
}
-int nvhost_nvmap_get_param(struct mem_mgr *mgr, struct mem_handle *handle,
- u32 param, u64 *result)
+int nvhost_nvmap_get_param(struct mem_handle *handle,
+ u32 param, u64 *result)
{
return nvmap_get_dmabuf_param(
(struct dma_buf *)handle,
void nvhost_nvmap_put_mgr(struct mem_mgr *mgr);
struct mem_mgr *nvhost_nvmap_get_mgr(struct mem_mgr *mgr);
struct mem_mgr *nvhost_nvmap_get_mgr_file(int fd);
-struct mem_handle *nvhost_nvmap_alloc(struct mem_mgr *mgr,
- size_t size, size_t align, int flags, unsigned int heap_flags);
+struct mem_handle *nvhost_nvmap_alloc(size_t size, size_t align,
+ int flags, unsigned int heap_flags);
void nvhost_nvmap_put(struct mem_mgr *mgr, struct mem_handle *handle);
struct sg_table *nvhost_nvmap_pin(struct mem_mgr *mgr,
struct mem_handle *handle, struct device *dev, int rw_flag);
void *addr);
struct mem_handle *nvhost_nvmap_get(struct mem_mgr *mgr,
ulong id, struct platform_device *dev);
-int nvhost_nvmap_get_param(struct mem_mgr *mgr, struct mem_handle *handle,
+int nvhost_nvmap_get_param(struct mem_handle *handle,
u32 param, u64 *result);
phys_addr_t nvhost_nvmap_get_addr_from_id(ulong id);