]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
video: tegra: host: gk20a: Remove memmgr refs
authorArto Merilainen <amerilainen@nvidia.com>
Fri, 21 Feb 2014 16:46:14 +0000 (18:46 +0200)
committerTerje Bergstrom <tbergstrom@nvidia.com>
Thu, 27 Feb 2014 06:37:01 +0000 (22:37 -0800)
This patch removes remaining references to nvhost memmgr from
gk20a generic code.

Bug 1450489

Change-Id: I4ef3825024b1bf9a050254e57dbfd9d985001c16
Signed-off-by: Arto Merilainen <amerilainen@nvidia.com>
Reviewed-on: http://git-master/r/372966
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
drivers/video/tegra/host/gk20a/channel_gk20a.c
drivers/video/tegra/host/gk20a/debug_gk20a.c
drivers/video/tegra/host/gk20a/fifo_gk20a.c
drivers/video/tegra/host/gk20a/gk20a.h
drivers/video/tegra/host/gk20a/gr_ctx_gk20a_sim.c
drivers/video/tegra/host/gk20a/gr_gk20a.c
drivers/video/tegra/host/gk20a/ltc_common.c
drivers/video/tegra/host/gk20a/mm_gk20a.c
drivers/video/tegra/host/gk20a/mm_gk20a.h
drivers/video/tegra/host/gk20a/pmu_gk20a.c

index 0ddeded1fd9aebb2051b135aaa27fd1637bb6fe2..9b0c98342066fb212bb27c5b7b18740b3fc82f22 100644 (file)
@@ -35,7 +35,6 @@
 #include "gk20a.h"
 #include "dbg_gpu_gk20a.h"
 
-#include "nvhost_memmgr.h"
 #include "hw_ram_gk20a.h"
 #include "hw_fifo_gk20a.h"
 #include "hw_pbdma_gk20a.h"
@@ -643,7 +642,7 @@ void gk20a_free_channel(struct channel_gk20a *ch, bool finish)
        /* free gpfifo */
        if (ch->gpfifo.gpu_va)
                gk20a_gmmu_unmap(ch_vm, ch->gpfifo.gpu_va,
-                       ch->gpfifo.size, mem_flag_none);
+                       ch->gpfifo.size, gk20a_mem_flag_none);
        if (ch->gpfifo.cpu_va)
                dma_free_coherent(d, ch->gpfifo.size,
                        ch->gpfifo.cpu_va, ch->gpfifo.iova);
@@ -834,7 +833,7 @@ static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c)
        q->base_gpuva = gk20a_gmmu_map(ch_vm, &sgt,
                                        size,
                                        0, /* flags */
-                                       mem_flag_none);
+                                       gk20a_mem_flag_none);
        if (!q->base_gpuva) {
                nvhost_err(d, "ch %d : failed to map gpu va"
                           "for priv cmd buffer", c->hw_chid);
@@ -884,7 +883,7 @@ static void channel_gk20a_free_priv_cmdbuf(struct channel_gk20a *c)
 
        if (q->base_gpuva)
                gk20a_gmmu_unmap(ch_vm, q->base_gpuva,
-                               q->mem.size, mem_flag_none);
+                               q->mem.size, gk20a_mem_flag_none);
        if (q->mem.base_cpuva)
                dma_free_coherent(d, q->mem.size,
                        q->mem.base_cpuva, q->mem.base_iova);
@@ -1135,7 +1134,7 @@ static int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c,
                                        &sgt,
                                        c->gpfifo.size,
                                        0, /* flags */
-                                       mem_flag_none);
+                                       gk20a_mem_flag_none);
        if (!c->gpfifo.gpu_va) {
                nvhost_err(d, "channel %d : failed to map"
                           " gpu_va for gpfifo", c->hw_chid);
@@ -1170,7 +1169,7 @@ static int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c,
 
 clean_up_unmap:
        gk20a_gmmu_unmap(ch_vm, c->gpfifo.gpu_va,
-               c->gpfifo.size, mem_flag_none);
+               c->gpfifo.size, gk20a_mem_flag_none);
 clean_up_sgt:
        gk20a_free_sgtable(&sgt);
 clean_up:
index d288a8afe5862d55d2a3f3425eeb9e72ff61b34d..31530d61c3f67e73178523b0e34017e85681d5f1 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * drivers/video/tegra/host/t20/debug_gk20a.c
  *
- * Copyright (C) 2011-2013 NVIDIA Corporation.  All rights reserved.
+ * Copyright (C) 2011-2014 NVIDIA Corporation.  All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -21,7 +21,6 @@
 
 #include "dev.h"
 #include "debug.h"
-#include "nvhost_memmgr.h"
 #include "nvhost_cdma.h"
 #include "nvhost_acm.h"
 
index 78ec6ef011b221b60a137563f6f243781592ba4b..d3c5af1b00a613dffbe25e37296e85632232312e 100644 (file)
@@ -22,9 +22,9 @@
 #include <linux/slab.h>
 #include <linux/scatterlist.h>
 #include <trace/events/nvhost.h>
+#include <linux/dma-mapping.h>
 
 #include "../dev.h"
-#include "nvhost_memmgr.h"
 
 #include "gk20a.h"
 #include "hw_fifo_gk20a.h"
@@ -177,7 +177,7 @@ void gk20a_remove_fifo_support(struct fifo_gk20a *f)
                gk20a_gmmu_unmap(&g->mm.bar1.vm,
                                f->userd.gpu_va,
                                f->userd.size,
-                               mem_flag_none);
+                               gk20a_mem_flag_none);
 
        if (f->userd.sgt)
                gk20a_free_sgtable(&f->userd.sgt);
@@ -526,7 +526,7 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g)
                                        &f->userd.sgt,
                                        f->userd_total_size,
                                        0, /* flags */
-                                       mem_flag_none);
+                                       gk20a_mem_flag_none);
        if (!f->userd.gpu_va) {
                dev_err(d, "gmmu mapping failed\n");
                goto clean_up;
@@ -585,7 +585,7 @@ clean_up:
                gk20a_gmmu_unmap(&g->mm.bar1.vm,
                                        f->userd.gpu_va,
                                        f->userd.size,
-                                       mem_flag_none);
+                                       gk20a_mem_flag_none);
        if (f->userd.sgt)
                gk20a_free_sgtable(&f->userd.sgt);
        if (f->userd.cpuva)
index 62a33b7df438849debff892f76ba6b764f172ead..4183156f25d8b26eaec52c467fcee887240d59b6 100644 (file)
@@ -283,10 +283,6 @@ static inline struct nvhost_syncpt *syncpt_from_gk20a(struct gk20a* g)
 {
        return &(nvhost_get_host(g->dev)->syncpt);
 }
-static inline struct mem_mgr *mem_mgr_from_g(struct gk20a* g)
-{
-       return nvhost_get_host(g->dev)->memmgr;
-}
 static inline struct gk20a *gk20a_from_as(struct gk20a_as *as)
 {
        return container_of(as, struct gk20a, as);
index b54f1e291439fab90bb9bb7dc7ac0bce78e0ece4..fbb3a922b544b480c6179a16e1602b4e58598948 100644 (file)
@@ -3,7 +3,7 @@
  *
  * GK20A Graphics Context for Simulation
  *
- * Copyright (c) 2011-2013, NVIDIA CORPORATION.  All rights reserved.
+ * Copyright (c) 2011-2014, NVIDIA CORPORATION.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
index 9d828675f7e97deac06747770467cfb9321f9db1..d13701acef50c466ba4ec39cf9b8fce702447f5f 100644 (file)
@@ -52,7 +52,6 @@
 #include "hw_therm_gk20a.h"
 #include "hw_pbdma_gk20a.h"
 #include "chip_support.h"
-#include "nvhost_memmgr.h"
 #include "gk20a_gating_reglist.h"
 #include "gr_pri_gk20a.h"
 #include "regops_gk20a.h"
@@ -1766,7 +1765,7 @@ static int gr_gk20a_init_ctxsw_ucode_vaspace(struct gk20a *g)
                                        &ucode_info->surface_desc.sgt,
                                        ucode_info->surface_desc.size,
                                        0, /* flags */
-                                       mem_flag_read_only);
+                                       gk20a_mem_flag_read_only);
        if (!ucode_info->ucode_gpuva) {
                nvhost_err(d, "failed to update gmmu ptes\n");
                return -ENOMEM;
@@ -1909,7 +1908,7 @@ static int gr_gk20a_init_ctxsw_ucode(struct gk20a *g)
  clean_up:
        if (ucode_info->ucode_gpuva)
                gk20a_gmmu_unmap(vm, ucode_info->ucode_gpuva,
-                       ucode_info->surface_desc.size, mem_flag_none);
+                       ucode_info->surface_desc.size, gk20a_mem_flag_none);
        if (ucode_info->surface_desc.sgt)
                gk20a_free_sgtable(&ucode_info->surface_desc.sgt);
        if (ucode_info->surface_desc.cpuva)
@@ -2337,7 +2336,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
 
        gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size,
                                NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
-                               mem_flag_none);
+                               gk20a_mem_flag_none);
        if (!gpu_va)
                goto clean_up;
        g_bfr_va[CIRCULAR_VA] = gpu_va;
@@ -2353,7 +2352,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
 
        gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size,
                                NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
-                               mem_flag_none);
+                               gk20a_mem_flag_none);
        if (!gpu_va)
                goto clean_up;
        g_bfr_va[ATTRIBUTE_VA] = gpu_va;
@@ -2369,7 +2368,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
 
        gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size,
                                NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
-                               mem_flag_none);
+                               gk20a_mem_flag_none);
        if (!gpu_va)
                goto clean_up;
        g_bfr_va[PAGEPOOL_VA] = gpu_va;
@@ -2378,7 +2377,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
        sgt = gr->global_ctx_buffer[GOLDEN_CTX].sgt;
        size = gr->global_ctx_buffer[GOLDEN_CTX].size;
        gpu_va = gk20a_gmmu_map(ch_vm, &sgt, size, 0,
-                               mem_flag_none);
+                               gk20a_mem_flag_none);
        if (!gpu_va)
                goto clean_up;
        g_bfr_va[GOLDEN_CTX_VA] = gpu_va;
@@ -2391,7 +2390,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
                if (g_bfr_va[i]) {
                        gk20a_gmmu_unmap(ch_vm, g_bfr_va[i],
                                         gr->global_ctx_buffer[i].size,
-                                        mem_flag_none);
+                                        gk20a_mem_flag_none);
                        g_bfr_va[i] = 0;
                }
        }
@@ -2411,7 +2410,7 @@ static void gr_gk20a_unmap_global_ctx_buffers(struct channel_gk20a *c)
                if (g_bfr_va[i]) {
                        gk20a_gmmu_unmap(ch_vm, g_bfr_va[i],
                                         gr->global_ctx_buffer[i].size,
-                                        mem_flag_none);
+                                        gk20a_mem_flag_none);
                        g_bfr_va[i] = 0;
                }
        }
@@ -2454,7 +2453,7 @@ static int gr_gk20a_alloc_channel_gr_ctx(struct gk20a *g,
 
        gr_ctx->gpu_va = gk20a_gmmu_map(ch_vm, &sgt, gr_ctx->size,
                                NVHOST_MAP_BUFFER_FLAGS_CACHEABLE_TRUE,
-                               mem_flag_none);
+                               gk20a_mem_flag_none);
        if (!gr_ctx->gpu_va)
                goto err_free_sgt;
 
@@ -2484,7 +2483,7 @@ static void gr_gk20a_free_channel_gr_ctx(struct channel_gk20a *c)
        nvhost_dbg_fn("");
 
        gk20a_gmmu_unmap(ch_vm, ch_ctx->gr_ctx.gpu_va,
-                       ch_ctx->gr_ctx.size, mem_flag_none);
+                       ch_ctx->gr_ctx.size, gk20a_mem_flag_none);
        dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
        dma_free_attrs(d, ch_ctx->gr_ctx.size,
                ch_ctx->gr_ctx.pages, ch_ctx->gr_ctx.iova, &attrs);
@@ -2520,7 +2519,7 @@ static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g,
                goto err_free;
 
        patch_ctx->gpu_va = gk20a_gmmu_map(ch_vm, &sgt, patch_ctx->size,
-                                       0, mem_flag_none);
+                                       0, gk20a_mem_flag_none);
        if (!patch_ctx->gpu_va)
                goto err_free_sgtable;
 
@@ -2549,7 +2548,7 @@ static void gr_gk20a_unmap_channel_patch_ctx(struct channel_gk20a *c)
 
        if (patch_ctx->gpu_va)
                gk20a_gmmu_unmap(ch_vm, patch_ctx->gpu_va,
-                       patch_ctx->size, mem_flag_none);
+                       patch_ctx->size, gk20a_mem_flag_none);
        patch_ctx->gpu_va = 0;
        patch_ctx->data_count = 0;
 }
index 609e00e62bc71af3db9844f4fe60036179ec8df2..85c2e5d9489d740c5f138c452f20ac44926e7ea5 100644 (file)
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/dma-mapping.h>
 #include <linux/delay.h>
 
 #include "gk20a.h"
 #include "gr_gk20a.h"
 
 #include "dev.h"
-#include "nvhost_memmgr.h"
 
 static int gk20a_determine_L2_size_bytes(struct gk20a *g)
 {
index 240e9a51a5254b5080aaf07a453860b899f0675a..5dd773e43081463743b36c5defcb4aaa4822c310 100644 (file)
 #include <linux/vmalloc.h>
 #include <asm/cacheflush.h>
 
+#ifdef CONFIG_TEGRA_NVMAP
+#include <linux/nvmap.h>
+#endif
+
 #include "dev.h"
-#include "nvhost_memmgr.h"
 #include "gk20a.h"
 #include "mm_gk20a.h"
 #include "hw_gmmu_gk20a.h"
@@ -61,8 +64,8 @@
  *  - Mappings to the same allocations are reused and refcounted.
  *  - This path does not support deferred unmapping (i.e. kernel must wait for
  *    all hw operations on the buffer to complete before unmapping).
- *  - References to memmgr and mem_handle are owned and managed by the (kernel)
- *    clients of the gk20a_vm layer.
+ *  - References to dmabuf are owned and managed by the (kernel) clients of
+ *    the gk20a_vm layer.
  *
  *
  * User space mappings
@@ -73,7 +76,7 @@
  *  - Mappings to the same allocations are reused and refcounted.
  *  - This path supports deferred unmapping (i.e. we delay the actual unmapping
  *    until all hw operations have completed).
- *  - References to memmgr and mem_handle are owned and managed by the vm_gk20a
+ *  - References to dmabuf are owned and managed by the vm_gk20a
  *    layer itself. vm.map acquires these refs, and sets
  *    mapped_buffer->own_mem_ref to record that we must release the refs when we
  *    actually unmap.
@@ -1183,14 +1186,21 @@ static u64 gk20a_vm_map_duplicate_locked(struct vm_gk20a *vm,
 {
        struct mapped_buffer_node *mapped_buffer = 0;
 
+#ifdef CONFIG_TEGRA_NVMAP
        /* fall-back to default kind if no kind is provided */
        if (kind < 0) {
                u64 nvmap_param;
-               nvhost_memmgr_get_param((struct mem_handle *)dmabuf,
-                                       NVMAP_HANDLE_PARAM_KIND,
-                                       &nvmap_param);
+               int err;
+               err = nvmap_get_dmabuf_param(dmabuf, NVMAP_HANDLE_PARAM_KIND,
+                                             &nvmap_param);
+               if (err)
+                       return 0;
                kind = nvmap_param;
        }
+#endif
+
+       if (kind < 0)
+               return 0;
 
        mapped_buffer =
                find_mapped_buffer_reverse_locked(&vm->mapped_buffers,
@@ -1214,7 +1224,7 @@ static u64 gk20a_vm_map_duplicate_locked(struct vm_gk20a *vm,
                mapped_buffer->user_mapped++;
 
                /* If the mapping comes from user space, we own
-                * the memmgr and handle refs. Since we reuse an
+                * the handle ref. Since we reuse an
                 * existing mapping here, we need to give back those
                 * refs once in order not to leak.
                 */
@@ -1294,11 +1304,11 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
        if (sgt)
                *sgt = bfr.sgt;
 
+#ifdef CONFIG_TEGRA_NVMAP
        if (kind < 0) {
                u64 value;
-               err = nvhost_memmgr_get_param((struct mem_handle *)dmabuf,
-                                             NVMAP_HANDLE_PARAM_KIND,
-                                             &value);
+               err = nvmap_get_dmabuf_param(dmabuf, NVMAP_HANDLE_PARAM_KIND,
+                                            &value);
                if (err) {
                        nvhost_err(d, "failed to get nvmap buffer kind (err=%d)",
                                   err);
@@ -1306,6 +1316,12 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
                }
                kind = value;
        }
+#endif
+
+       if (kind < 0) {
+               err = -EINVAL;
+               goto clean_up;
+       }
 
        bfr.kind_v = kind;
        bfr.size = dmabuf->size;
@@ -1712,11 +1728,12 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
                                        gmmu_pte_kind_f(kind_v) |
                                        gmmu_pte_comptagline_f(ctag);
 
-                               if (rw_flag == mem_flag_read_only) {
+                               if (rw_flag == gk20a_mem_flag_read_only) {
                                        pte_w[0] |= gmmu_pte_read_only_true_f();
                                        pte_w[1] |=
                                                gmmu_pte_write_disable_true_f();
-                               } else if (rw_flag == mem_flag_write_only) {
+                               } else if (rw_flag ==
+                                          gk20a_mem_flag_write_only) {
                                        pte_w[1] |=
                                                gmmu_pte_read_disable_true_f();
                                }
@@ -1911,7 +1928,7 @@ static int gk20a_vm_put_empty(struct vm_gk20a *vm, u64 vaddr,
                u64 page_vaddr = __locked_gmmu_map(vm, vaddr,
                        vm->zero_page_sgt, pgsz, pgsz_idx, 0, 0,
                        NVHOST_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET,
-                       mem_flag_none);
+                       gk20a_mem_flag_none);
 
                if (!page_vaddr) {
                        nvhost_err(dev_from_vm(vm), "failed to remap clean buffers!");
@@ -1931,14 +1948,12 @@ err_unmap:
        while (i--) {
                vaddr -= pgsz;
                __locked_gmmu_unmap(vm, vaddr, pgsz, pgsz_idx, 0,
-                                   mem_flag_none);
+                                   gk20a_mem_flag_none);
        }
 
        return -EINVAL;
 }
 
-/* return mem_mgr and mem_handle to caller. If the mem_handle is a kernel dup
-   from user space (as_ioctl), caller releases the kernel duplicated handle */
 /* NOTE! mapped_buffers lock must be held */
 static void gk20a_vm_unmap_locked(struct mapped_buffer_node *mapped_buffer)
 {
@@ -1960,7 +1975,7 @@ static void gk20a_vm_unmap_locked(struct mapped_buffer_node *mapped_buffer)
                                mapped_buffer->size,
                                mapped_buffer->pgsz_idx,
                                mapped_buffer->va_allocated,
-                               mem_flag_none);
+                               gk20a_mem_flag_none);
 
        nvhost_dbg(dbg_map, "as=%d pgsz=%d gv=0x%x,%08x own_mem_ref=%d",
                   vm_aspace_id(vm), gmmu_page_sizes[mapped_buffer->pgsz_idx],
@@ -2336,7 +2351,7 @@ int gk20a_vm_free_space(struct gk20a_as_share *as_share,
                                va_node->size,
                                va_node->pgsz_idx,
                                false,
-                               mem_flag_none);
+                               gk20a_mem_flag_none);
                kfree(va_node);
        }
        mutex_unlock(&vm->update_gmmu_lock);
@@ -2381,7 +2396,7 @@ int gk20a_vm_map_buffer(struct gk20a_as_share *as_share,
 
        ret_va = gk20a_vm_map(vm, dmabuf, *offset_align,
                        flags, kind, NULL, true,
-                       mem_flag_none);
+                       gk20a_mem_flag_none);
        *offset_align = ret_va;
        if (!ret_va) {
                dma_buf_put(dmabuf);
index 60499673e8d8abc11fb51f0cce2ddf7a81558d96..1e71f067a5ac8bb9db90a8ad4a2553914353985d 100644 (file)
@@ -170,6 +170,14 @@ struct page_table_gk20a {
        size_t size;
 };
 
+#ifndef _NVHOST_MEM_MGR_H
+enum gk20a_mem_rw_flag {
+       gk20a_mem_flag_none = 0,
+       gk20a_mem_flag_read_only = 1,
+       gk20a_mem_flag_write_only = 2,
+};
+#endif
+
 enum gmmu_pgsz_gk20a {
        gmmu_page_size_small = 0,
        gmmu_page_size_big   = 1,
@@ -330,7 +338,6 @@ int gk20a_mm_init(struct mm_gk20a *mm);
 
 #define dev_from_vm(vm) dev_from_gk20a(vm->mm->g)
 
-#define DEFAULT_ALLOC_FLAGS (mem_mgr_flag_uncacheable)
 #define DEFAULT_ALLOC_ALIGNMENT (4*1024)
 
 static inline int bar1_aperture_size_mb_gk20a(void)
index cbeafa0033aa21a141bcf1c6d9d588febb30b416..fc616963c1fd7266c28d7a9a7dda11f5dad86766 100644 (file)
@@ -27,7 +27,6 @@
 
 #include "../dev.h"
 #include "../bus_client.h"
-#include "nvhost_memmgr.h"
 #include "nvhost_acm.h"
 
 #include "gk20a.h"
@@ -1099,7 +1098,7 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
        pmu->ucode.pmu_va = gk20a_gmmu_map(vm, &sgt_pmu_ucode,
                                        GK20A_PMU_UCODE_SIZE_MAX,
                                        0, /* flags */
-                                       mem_flag_read_only);
+                                       gk20a_mem_flag_read_only);
        if (!pmu->ucode.pmu_va) {
                nvhost_err(d, "failed to map pmu ucode memory!!");
                goto err_free_ucode_sgt;
@@ -1117,7 +1116,7 @@ int gk20a_init_pmu_setup_sw(struct gk20a *g)
        pmu->seq_buf.pmu_va = gk20a_gmmu_map(vm, &sgt_seq_buf,
                                        GK20A_PMU_SEQ_BUF_SIZE,
                                        0, /* flags */
-                                       mem_flag_none);
+                                       gk20a_mem_flag_none);
        if (!pmu->seq_buf.pmu_va) {
                nvhost_err(d, "failed to map pmu ucode memory!!");
                goto err_free_seq_buf_sgt;
@@ -1163,12 +1162,12 @@ skip_init:
 
  err_unmap_seq_buf:
        gk20a_gmmu_unmap(vm, pmu->seq_buf.pmu_va,
-               GK20A_PMU_SEQ_BUF_SIZE, mem_flag_none);
+               GK20A_PMU_SEQ_BUF_SIZE, gk20a_mem_flag_none);
  err_free_seq_buf_sgt:
        gk20a_free_sgtable(&sgt_seq_buf);
  err_unmap_ucode:
        gk20a_gmmu_unmap(vm, pmu->ucode.pmu_va,
-               GK20A_PMU_UCODE_SIZE_MAX, mem_flag_none);
+               GK20A_PMU_UCODE_SIZE_MAX, gk20a_mem_flag_none);
  err_free_ucode_sgt:
        gk20a_free_sgtable(&sgt_pmu_ucode);
  err_free_seq_buf:
@@ -1312,7 +1311,7 @@ int gk20a_init_pmu_setup_hw2(struct gk20a *g)
                                        &sgt_pg_buf,
                                        size,
                                        0, /* flags */
-                                       mem_flag_none);
+                                       gk20a_mem_flag_none);
                if (!pmu->pg_buf.pmu_va) {
                        nvhost_err(d, "failed to map fecs pg buffer");
                        err = -ENOMEM;