#include "gk20a.h"
#include "dbg_gpu_gk20a.h"
-#include "nvhost_memmgr.h"
#include "hw_ram_gk20a.h"
#include "hw_fifo_gk20a.h"
#include "hw_pbdma_gk20a.h"
/* free gpfifo */
if (ch->gpfifo.gpu_va)
gk20a_gmmu_unmap(ch_vm, ch->gpfifo.gpu_va,
- ch->gpfifo.size, mem_flag_none);
+ ch->gpfifo.size, gk20a_mem_flag_none);
if (ch->gpfifo.cpu_va)
dma_free_coherent(d, ch->gpfifo.size,
ch->gpfifo.cpu_va, ch->gpfifo.iova);
q->base_gpuva = gk20a_gmmu_map(ch_vm, &sgt,
size,
0, /* flags */
- mem_flag_none);
+ gk20a_mem_flag_none);
if (!q->base_gpuva) {
nvhost_err(d, "ch %d : failed to map gpu va"
"for priv cmd buffer", c->hw_chid);
if (q->base_gpuva)
gk20a_gmmu_unmap(ch_vm, q->base_gpuva,
- q->mem.size, mem_flag_none);
+ q->mem.size, gk20a_mem_flag_none);
if (q->mem.base_cpuva)
dma_free_coherent(d, q->mem.size,
q->mem.base_cpuva, q->mem.base_iova);
&sgt,
c->gpfifo.size,
0, /* flags */
- mem_flag_none);
+ gk20a_mem_flag_none);
if (!c->gpfifo.gpu_va) {
nvhost_err(d, "channel %d : failed to map"
" gpu_va for gpfifo", c->hw_chid);
clean_up_unmap:
gk20a_gmmu_unmap(ch_vm, c->gpfifo.gpu_va,
- c->gpfifo.size, mem_flag_none);
+ c->gpfifo.size, gk20a_mem_flag_none);
clean_up_sgt:
gk20a_free_sgtable(&sgt);
clean_up:
/*
* drivers/video/tegra/host/t20/debug_gk20a.c
*
- * Copyright (C) 2011-2013 NVIDIA Corporation. All rights reserved.
+ * Copyright (C) 2011-2014 NVIDIA Corporation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
#include "dev.h"
#include "debug.h"
-#include "nvhost_memmgr.h"
#include "nvhost_cdma.h"
#include "nvhost_acm.h"
#include <linux/slab.h>
#include <linux/scatterlist.h>
#include <trace/events/nvhost.h>
+#include <linux/dma-mapping.h>
#include "../dev.h"
-#include "nvhost_memmgr.h"
#include "gk20a.h"
#include "hw_fifo_gk20a.h"
gk20a_gmmu_unmap(&g->mm.bar1.vm,
f->userd.gpu_va,
f->userd.size,
- mem_flag_none);
+ gk20a_mem_flag_none);
if (f->userd.sgt)
gk20a_free_sgtable(&f->userd.sgt);
&f->userd.sgt,
f->userd_total_size,
0, /* flags */
- mem_flag_none);
+ gk20a_mem_flag_none);
if (!f->userd.gpu_va) {
dev_err(d, "gmmu mapping failed\n");
goto clean_up;
gk20a_gmmu_unmap(&g->mm.bar1.vm,
f->userd.gpu_va,
f->userd.size,
- mem_flag_none);
+ gk20a_mem_flag_none);
if (f->userd.sgt)
gk20a_free_sgtable(&f->userd.sgt);
if (f->userd.cpuva)
{
return &(nvhost_get_host(g->dev)->syncpt);
}
-static inline struct mem_mgr *mem_mgr_from_g(struct gk20a* g)
-{
- return nvhost_get_host(g->dev)->memmgr;
-}
static inline struct gk20a *gk20a_from_as(struct gk20a_as *as)
{
return container_of(as, struct gk20a, as);
*
* GK20A Graphics Context for Simulation
*
- * Copyright (c) 2011-2013, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
#include "hw_therm_gk20a.h"
#include "hw_pbdma_gk20a.h"
#include "chip_support.h"
-#include "nvhost_memmgr.h"
#include "gk20a_gating_reglist.h"
#include "gr_pri_gk20a.h"
#include "regops_gk20a.h"
&ucode_info->surface_desc.sgt,
ucode_info->surface_desc.size,
0, /* flags */
- mem_flag_read_only);
+ gk20a_mem_flag_read_only);
if (!ucode_info->ucode_gpuva) {
nvhost_err(d, "failed to update gmmu ptes\n");
return -ENOMEM;
clean_up:
if (ucode_info->ucode_gpuva)
gk20a_gmmu_unmap(vm, ucode_info->ucode_gpuva,
- ucode_info->surface_desc.size, mem_flag_none);
+ ucode_info->surface_desc.size, gk20a_mem_flag_none);
if (ucode_info->surface_desc.sgt)
gk20a_free_sgtable(&ucode_info->surface_desc.sgt);
if (ucode_info->surface_desc.cpuva)
gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size,
NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
- mem_flag_none);
+ gk20a_mem_flag_none);
if (!gpu_va)
goto clean_up;
g_bfr_va[CIRCULAR_VA] = gpu_va;
gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size,
NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
- mem_flag_none);
+ gk20a_mem_flag_none);
if (!gpu_va)
goto clean_up;
g_bfr_va[ATTRIBUTE_VA] = gpu_va;
gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size,
NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
- mem_flag_none);
+ gk20a_mem_flag_none);
if (!gpu_va)
goto clean_up;
g_bfr_va[PAGEPOOL_VA] = gpu_va;
sgt = gr->global_ctx_buffer[GOLDEN_CTX].sgt;
size = gr->global_ctx_buffer[GOLDEN_CTX].size;
gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size, 0,
- mem_flag_none);
+ gk20a_mem_flag_none);
if (!gpu_va)
goto clean_up;
g_bfr_va[GOLDEN_CTX_VA] = gpu_va;
if (g_bfr_va[i]) {
gk20a_gmmu_unmap(ch_vm, g_bfr_va[i],
gr->global_ctx_buffer[i].size,
- mem_flag_none);
+ gk20a_mem_flag_none);
g_bfr_va[i] = 0;
}
}
if (g_bfr_va[i]) {
gk20a_gmmu_unmap(ch_vm, g_bfr_va[i],
gr->global_ctx_buffer[i].size,
- mem_flag_none);
+ gk20a_mem_flag_none);
g_bfr_va[i] = 0;
}
}
gr_ctx->gpu_va = gk20a_gmmu_map(ch_vm, &sgt, gr_ctx->size,
NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
- mem_flag_none);
+ gk20a_mem_flag_none);
if (!gr_ctx->gpu_va)
goto err_free_sgt;
nvhost_dbg_fn("");
gk20a_gmmu_unmap(ch_vm, ch_ctx->gr_ctx.gpu_va,
- ch_ctx->gr_ctx.size, mem_flag_none);
+ ch_ctx->gr_ctx.size, gk20a_mem_flag_none);
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
dma_free_attrs(d, ch_ctx->gr_ctx.size,
ch_ctx->gr_ctx.pages, ch_ctx->gr_ctx.iova, &attrs);
goto err_free;
patch_ctx->gpu_va = gk20a_gmmu_map(ch_vm, &sgt, patch_ctx->size,
- 0, mem_flag_none);
+ 0, gk20a_mem_flag_none);
if (!patch_ctx->gpu_va)
goto err_free_sgtable;
if (patch_ctx->gpu_va)
gk20a_gmmu_unmap(ch_vm, patch_ctx->gpu_va,
- patch_ctx->size, mem_flag_none);
+ patch_ctx->size, gk20a_mem_flag_none);
patch_ctx->gpu_va = 0;
patch_ctx->data_count = 0;
}
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include "gk20a.h"
#include "gr_gk20a.h"
#include "dev.h"
-#include "nvhost_memmgr.h"
static int gk20a_determine_L2_size_bytes(struct gk20a *g)
{
#include <linux/vmalloc.h>
#include <asm/cacheflush.h>
+#ifdef CONFIG_TEGRA_NVMAP
+#include <linux/nvmap.h>
+#endif
+
#include "dev.h"
-#include "nvhost_memmgr.h"
#include "gk20a.h"
#include "mm_gk20a.h"
#include "hw_gmmu_gk20a.h"
* - Mappings to the same allocations are reused and refcounted.
* - This path does not support deferred unmapping (i.e. kernel must wait for
* all hw operations on the buffer to complete before unmapping).
- * - References to memmgr and mem_handle are owned and managed by the (kernel)
- * clients of the gk20a_vm layer.
+ * - References to dmabuf are owned and managed by the (kernel) clients of
+ * the gk20a_vm layer.
*
*
* User space mappings
* - Mappings to the same allocations are reused and refcounted.
* - This path supports deferred unmapping (i.e. we delay the actual unmapping
* until all hw operations have completed).
- * - References to memmgr and mem_handle are owned and managed by the vm_gk20a
+ * - References to dmabuf are owned and managed by the vm_gk20a
* layer itself. vm.map acquires these refs, and sets
* mapped_buffer->own_mem_ref to record that we must release the refs when we
* actually unmap.
{
struct mapped_buffer_node *mapped_buffer = 0;
+#ifdef CONFIG_TEGRA_NVMAP
/* fall-back to default kind if no kind is provided */
if (kind < 0) {
u64 nvmap_param;
- nvhost_memmgr_get_param((struct mem_handle *)dmabuf,
- NVMAP_HANDLE_PARAM_KIND,
- &nvmap_param);
+ int err;
+ err = nvmap_get_dmabuf_param(dmabuf, NVMAP_HANDLE_PARAM_KIND,
+ &nvmap_param);
+ if (err)
+ return 0;
kind = nvmap_param;
}
+#endif
+
+ if (kind < 0)
+ return 0;
mapped_buffer =
find_mapped_buffer_reverse_locked(&vm->mapped_buffers,
mapped_buffer->user_mapped++;
/* If the mapping comes from user space, we own
- * the memmgr and handle refs. Since we reuse an
+ * the handle ref. Since we reuse an
* existing mapping here, we need to give back those
* refs once in order not to leak.
*/
if (sgt)
*sgt = bfr.sgt;
+#ifdef CONFIG_TEGRA_NVMAP
if (kind < 0) {
u64 value;
- err = nvhost_memmgr_get_param((struct mem_handle *)dmabuf,
- NVMAP_HANDLE_PARAM_KIND,
- &value);
+ err = nvmap_get_dmabuf_param(dmabuf, NVMAP_HANDLE_PARAM_KIND,
+ &value);
if (err) {
nvhost_err(d, "failed to get nvmap buffer kind (err=%d)",
err);
}
kind = value;
}
+#endif
+
+ if (kind < 0) {
+ err = -EINVAL;
+ goto clean_up;
+ }
bfr.kind_v = kind;
bfr.size = dmabuf->size;
gmmu_pte_kind_f(kind_v) |
gmmu_pte_comptagline_f(ctag);
- if (rw_flag == mem_flag_read_only) {
+ if (rw_flag == gk20a_mem_flag_read_only) {
pte_w[0] |= gmmu_pte_read_only_true_f();
pte_w[1] |=
gmmu_pte_write_disable_true_f();
- } else if (rw_flag == mem_flag_write_only) {
+ } else if (rw_flag ==
+ gk20a_mem_flag_write_only) {
pte_w[1] |=
gmmu_pte_read_disable_true_f();
}
u64 page_vaddr = __locked_gmmu_map(vm, vaddr,
vm->zero_page_sgt, pgsz, pgsz_idx, 0, 0,
NVHOST_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET,
- mem_flag_none);
+ gk20a_mem_flag_none);
if (!page_vaddr) {
nvhost_err(dev_from_vm(vm), "failed to remap clean buffers!");
while (i--) {
vaddr -= pgsz;
__locked_gmmu_unmap(vm, vaddr, pgsz, pgsz_idx, 0,
- mem_flag_none);
+ gk20a_mem_flag_none);
}
return -EINVAL;
}
-/* return mem_mgr and mem_handle to caller. If the mem_handle is a kernel dup
- from user space (as_ioctl), caller releases the kernel duplicated handle */
/* NOTE! mapped_buffers lock must be held */
static void gk20a_vm_unmap_locked(struct mapped_buffer_node *mapped_buffer)
{
mapped_buffer->size,
mapped_buffer->pgsz_idx,
mapped_buffer->va_allocated,
- mem_flag_none);
+ gk20a_mem_flag_none);
nvhost_dbg(dbg_map, "as=%d pgsz=%d gv=0x%x,%08x own_mem_ref=%d",
vm_aspace_id(vm), gmmu_page_sizes[mapped_buffer->pgsz_idx],
va_node->size,
va_node->pgsz_idx,
false,
- mem_flag_none);
+ gk20a_mem_flag_none);
kfree(va_node);
}
mutex_unlock(&vm->update_gmmu_lock);
ret_va = gk20a_vm_map(vm, dmabuf, *offset_align,
flags, kind, NULL, true,
- mem_flag_none);
+ gk20a_mem_flag_none);
*offset_align = ret_va;
if (!ret_va) {
dma_buf_put(dmabuf);
size_t size;
};
+#ifndef _NVHOST_MEM_MGR_H
+enum gk20a_mem_rw_flag {
+ gk20a_mem_flag_none = 0,
+ gk20a_mem_flag_read_only = 1,
+ gk20a_mem_flag_write_only = 2,
+};
+#endif
+
enum gmmu_pgsz_gk20a {
gmmu_page_size_small = 0,
gmmu_page_size_big = 1,
#define dev_from_vm(vm) dev_from_gk20a(vm->mm->g)
-#define DEFAULT_ALLOC_FLAGS (mem_mgr_flag_uncacheable)
#define DEFAULT_ALLOC_ALIGNMENT (4*1024)
static inline int bar1_aperture_size_mb_gk20a(void)
#include "../dev.h"
#include "../bus_client.h"
-#include "nvhost_memmgr.h"
#include "nvhost_acm.h"
#include "gk20a.h"
pmu->ucode.pmu_va = gk20a_gmmu_map(vm, &sgt_pmu_ucode,
GK20A_PMU_UCODE_SIZE_MAX,
0, /* flags */
- mem_flag_read_only);
+ gk20a_mem_flag_read_only);
if (!pmu->ucode.pmu_va) {
nvhost_err(d, "failed to map pmu ucode memory!!");
goto err_free_ucode_sgt;
pmu->seq_buf.pmu_va = gk20a_gmmu_map(vm, &sgt_seq_buf,
GK20A_PMU_SEQ_BUF_SIZE,
0, /* flags */
- mem_flag_none);
+ gk20a_mem_flag_none);
if (!pmu->seq_buf.pmu_va) {
nvhost_err(d, "failed to map pmu ucode memory!!");
goto err_free_seq_buf_sgt;
err_unmap_seq_buf:
gk20a_gmmu_unmap(vm, pmu->seq_buf.pmu_va,
- GK20A_PMU_SEQ_BUF_SIZE, mem_flag_none);
+ GK20A_PMU_SEQ_BUF_SIZE, gk20a_mem_flag_none);
err_free_seq_buf_sgt:
gk20a_free_sgtable(&sgt_seq_buf);
err_unmap_ucode:
gk20a_gmmu_unmap(vm, pmu->ucode.pmu_va,
- GK20A_PMU_UCODE_SIZE_MAX, mem_flag_none);
+ GK20A_PMU_UCODE_SIZE_MAX, gk20a_mem_flag_none);
err_free_ucode_sgt:
gk20a_free_sgtable(&sgt_pmu_ucode);
err_free_seq_buf:
&sgt_pg_buf,
size,
0, /* flags */
- mem_flag_none);
+ gk20a_mem_flag_none);
if (!pmu->pg_buf.pmu_va) {
nvhost_err(d, "failed to map fecs pg buffer");
err = -ENOMEM;