]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
video: tegra: nvmap: remove deferred cache flush
authorKrishna Reddy <vdumpa@nvidia.com>
Wed, 15 Jan 2014 19:57:42 +0000 (11:57 -0800)
committerKrishna Reddy <vdumpa@nvidia.com>
Fri, 17 Jan 2014 20:06:28 +0000 (12:06 -0800)
Remove deferred cache flush as pin/unpin would not be
called with dmabuf interface. User space wouldn't
call pin/unpin either going forward.

Change-Id: I4cf1896f5696cb18cf6f63a93efb71b40739e015
Signed-off-by: Krishna Reddy <vdumpa@nvidia.com>
Reviewed-on: http://git-master/r/356126
Reviewed-by: Automatic_Commit_Validation_User
GVS: Gerrit_Virtual_Submit

drivers/video/tegra/nvmap/nvmap.c
drivers/video/tegra/nvmap/nvmap_dev.c
drivers/video/tegra/nvmap/nvmap_handle.c
drivers/video/tegra/nvmap/nvmap_ioctl.c
drivers/video/tegra/nvmap/nvmap_priv.h

index 66f09022139c4242c7233057e3b55f8653c97bfa..ed66264f2ff24dd33cc3cc14d6f9806ebc35cc2b 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Memory manager for Tegra GPU
  *
- * Copyright (c) 2009-2013, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2009-2014, NVIDIA CORPORATION. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -233,15 +233,6 @@ void __nvmap_kunmap(struct nvmap_handle *h, unsigned int pagenum,
        if (WARN_ON(pagenum >= h->size >> PAGE_SHIFT))
                return;
 
-       if (nvmap_find_cache_maint_op(h->dev, h)) {
-               struct nvmap_share *share = nvmap_get_share_from_dev(h->dev);
-               /* acquire pin lock to ensure maintenance is done before
-                * handle is pinned */
-               mutex_lock(&share->pin_lock);
-               nvmap_cache_maint_ops_flush(h->dev, h);
-               mutex_unlock(&share->pin_lock);
-       }
-
        if (h->heap_pgalloc)
                paddr = page_to_phys(h->pgalloc.pages[pagenum]);
        else
@@ -337,15 +328,6 @@ void __nvmap_munmap(struct nvmap_handle *h, void *addr)
            WARN_ON(!addr))
                return;
 
-       if (nvmap_find_cache_maint_op(h->dev, h)) {
-               struct nvmap_share *share = nvmap_get_share_from_dev(h->dev);
-               /* acquire pin lock to ensure maintenance is done before
-                * handle is pinned */
-               mutex_lock(&share->pin_lock);
-               nvmap_cache_maint_ops_flush(h->dev, h);
-               mutex_unlock(&share->pin_lock);
-       }
-
        /* Handle can be locked by cache maintenance in
         * separate thread */
        if (h->heap_pgalloc) {
@@ -489,14 +471,6 @@ struct sg_table *__nvmap_sg_table(struct nvmap_client *client,
                if (err)
                        goto err;
        }
-       if (atomic_read(&h->disable_deferred_cache) <= 1) {
-               /* disable deferred cache maint */
-               atomic_set(&h->disable_deferred_cache, 1);
-               if (nvmap_find_cache_maint_op(nvmap_dev, h))
-                       nvmap_cache_maint_ops_flush(nvmap_dev, h);
-               /* avoid unnecessary check for deferred cache maint */
-               atomic_set(&h->disable_deferred_cache, 2);
-       }
        nvmap_handle_put(h);
        return sgt;
 
index b5873598a79a518927df38c22438f6ac415d0a9b..e9957b7298dadd63ea62206e0b3e52d81f787201 100644 (file)
@@ -135,12 +135,6 @@ struct nvmap_share *nvmap_get_share_from_dev(struct nvmap_device *dev)
        return &dev->iovmm_master;
 }
 
-struct nvmap_deferred_ops *nvmap_get_deferred_ops_from_dev(
-               struct nvmap_device *dev)
-{
-       return &dev->deferred_ops;
-}
-
 /* allocates a PTE for the caller's use; returns the PTE pointer or
  * a negative errno. not safe from IRQs */
 pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr)
@@ -1115,23 +1109,6 @@ ulong nvmap_iovmm_get_used_pages(void)
        return total >> PAGE_SHIFT;
 }
 
-static void nvmap_deferred_ops_init(struct nvmap_deferred_ops *deferred_ops)
-{
-       INIT_LIST_HEAD(&deferred_ops->ops_list);
-       spin_lock_init(&deferred_ops->deferred_ops_lock);
-
-#ifdef CONFIG_NVMAP_DEFERRED_CACHE_MAINT
-       deferred_ops->enable_deferred_cache_maintenance = 1;
-#else
-       deferred_ops->enable_deferred_cache_maintenance = 0;
-#endif /* CONFIG_NVMAP_DEFERRED_CACHE_MAINT */
-
-       deferred_ops->deferred_maint_inner_requested = 0;
-       deferred_ops->deferred_maint_inner_flushed = 0;
-       deferred_ops->deferred_maint_outer_requested = 0;
-       deferred_ops->deferred_maint_outer_flushed = 0;
-}
-
 static int nvmap_probe(struct platform_device *pdev)
 {
        struct nvmap_platform_data *plat = pdev->dev.platform_data;
@@ -1180,8 +1157,6 @@ static int nvmap_probe(struct platform_device *pdev)
 
        init_waitqueue_head(&dev->iovmm_master.pin_wait);
 
-       nvmap_deferred_ops_init(&dev->deferred_ops);
-
        mutex_init(&dev->iovmm_master.pin_lock);
 #ifdef CONFIG_NVMAP_PAGE_POOLS
        for (i = 0; i < NVMAP_NUM_POOLS; i++)
@@ -1255,29 +1230,9 @@ static int nvmap_probe(struct platform_device *pdev)
        if (IS_ERR_OR_NULL(nvmap_debug_root))
                dev_err(&pdev->dev, "couldn't create debug files\n");
 
-       debugfs_create_bool("enable_deferred_cache_maintenance",
-               S_IRUGO|S_IWUSR, nvmap_debug_root,
-               (u32 *)&dev->deferred_ops.enable_deferred_cache_maintenance);
-
        debugfs_create_u32("max_handle_count", S_IRUGO,
                        nvmap_debug_root, &nvmap_max_handle_count);
 
-       debugfs_create_u64("deferred_maint_inner_requested", S_IRUGO|S_IWUSR,
-                       nvmap_debug_root,
-                       &dev->deferred_ops.deferred_maint_inner_requested);
-
-       debugfs_create_u64("deferred_maint_inner_flushed", S_IRUGO|S_IWUSR,
-                       nvmap_debug_root,
-                       &dev->deferred_ops.deferred_maint_inner_flushed);
-#ifdef CONFIG_OUTER_CACHE
-       debugfs_create_u64("deferred_maint_outer_requested", S_IRUGO|S_IWUSR,
-                       nvmap_debug_root,
-                       &dev->deferred_ops.deferred_maint_outer_requested);
-
-       debugfs_create_u64("deferred_maint_outer_flushed", S_IRUGO|S_IWUSR,
-                       nvmap_debug_root,
-                       &dev->deferred_ops.deferred_maint_outer_flushed);
-#endif /* CONFIG_OUTER_CACHE */
        for (i = 0; i < plat->nr_carveouts; i++) {
                struct nvmap_carveout_node *node = &dev->heaps[dev->nr_carveouts];
                const struct nvmap_platform_carveout *co = &plat->carveouts[i];
index 5381e79af446ef3dba57973b21fed40cc98c1f01..8bfc7d3e3e1ff98f520aa4796f243cb9bf02b95f 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Handle allocation and freeing routines for nvmap
  *
- * Copyright (c) 2009-2013, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2009-2014, NVIDIA CORPORATION. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -476,8 +476,6 @@ void nvmap_free_handle_id(struct nvmap_client *client, unsigned long id)
 
 out:
        BUG_ON(!atomic_read(&h->ref));
-       if (nvmap_find_cache_maint_op(h->dev, h))
-               nvmap_cache_maint_ops_flush(h->dev, h);
        nvmap_handle_put(h);
 }
 EXPORT_SYMBOL(nvmap_free_handle_id);
index 9b7cf23d21dfe8faf774c853cc0ca06d3d416128..a14a500fef750630281feacd9e5b3fb23b7dd07f 100644 (file)
@@ -803,80 +803,16 @@ static inline bool fast_cache_maint(struct nvmap_handle *h,
 }
 #endif
 
-static void debug_count_requested_op(struct nvmap_deferred_ops *deferred_ops,
-               unsigned long size, unsigned int flags)
-{
-       unsigned long inner_flush_size = size;
-       unsigned long outer_flush_size = size;
-       (void) outer_flush_size;
-
-#ifdef CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS
-       inner_flush_size = min(size, (unsigned long)
-               cache_maint_inner_threshold);
-#endif
-
-#if defined(CONFIG_NVMAP_OUTER_CACHE_MAINT_BY_SET_WAYS)
-       outer_flush_size = min(size, (unsigned long)
-               cache_maint_outer_threshold);
-#endif
-
-       if (flags == NVMAP_HANDLE_INNER_CACHEABLE)
-               deferred_ops->deferred_maint_inner_requested +=
-                               inner_flush_size;
-
-       if (flags == NVMAP_HANDLE_CACHEABLE) {
-               deferred_ops->deferred_maint_inner_requested +=
-                               inner_flush_size;
-#ifdef CONFIG_OUTER_CACHE
-               deferred_ops->deferred_maint_outer_requested +=
-                               outer_flush_size;
-#endif /* CONFIG_OUTER_CACHE */
-       }
-}
-
-static void debug_count_flushed_op(struct nvmap_deferred_ops *deferred_ops,
-               unsigned long size, unsigned int flags)
-{
-       unsigned long inner_flush_size = size;
-       unsigned long outer_flush_size = size;
-       (void) outer_flush_size;
-
-#ifdef CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS
-       inner_flush_size = min(size, (unsigned long)
-               cache_maint_inner_threshold);
-#endif
-
-#if defined(CONFIG_NVMAP_OUTER_CACHE_MAINT_BY_SET_WAYS)
-       outer_flush_size = min(size, (unsigned long)
-               cache_maint_outer_threshold);
-#endif
-
-       if (flags == NVMAP_HANDLE_INNER_CACHEABLE)
-               deferred_ops->deferred_maint_inner_flushed +=
-                               inner_flush_size;
-
-       if (flags == NVMAP_HANDLE_CACHEABLE) {
-               deferred_ops->deferred_maint_inner_flushed +=
-                               inner_flush_size;
-#ifdef CONFIG_OUTER_CACHE
-               deferred_ops->deferred_maint_outer_flushed +=
-                               outer_flush_size;
-#endif /* CONFIG_OUTER_CACHE */
-       }
-}
-
 struct cache_maint_op {
-       struct list_head list_data;
        phys_addr_t start;
        phys_addr_t end;
        unsigned int op;
        struct nvmap_handle *h;
-       int error;
        bool inner;
        bool outer;
 };
 
-static void cache_maint_work_funct(struct cache_maint_op *cache_work)
+static int do_cache_maint(struct cache_maint_op *cache_work)
 {
        pgprot_t prot;
        pte_t **pte = NULL;
@@ -889,17 +825,8 @@ static void cache_maint_work_funct(struct cache_maint_op *cache_work)
        struct nvmap_client *client = h->owner;
        unsigned int op = cache_work->op;
 
-       BUG_ON(!h);
-
-       h = nvmap_handle_get(h);
-       if (!h) {
-               cache_work->error = -EFAULT;
-               return;
-       }
-       if (!h->alloc) {
-               cache_work->error = -EFAULT;
-               goto out;
-       }
+       if (!h || !h->alloc)
+               return -EFAULT;
 
        if (client)
                trace_cache_maint(client, h, pstart, pend, op);
@@ -929,13 +856,12 @@ static void cache_maint_work_funct(struct cache_maint_op *cache_work)
 
        if (pstart > h->size || pend > h->size) {
                pr_warn("cache maintenance outside handle\n");
-               cache_work->error = -EINVAL;
+               err = -EINVAL;
                goto out;
        }
 
        pstart += h->carveout->base;
        pend += h->carveout->base;
-
        loop = pstart;
 
        while (loop < pend) {
@@ -957,151 +883,7 @@ static void cache_maint_work_funct(struct cache_maint_op *cache_work)
 out:
        if (pte)
                nvmap_free_pte(h->dev, pte);
-       nvmap_handle_put(h);
-       return;
-}
-
-int nvmap_find_cache_maint_op(struct nvmap_device *dev,
-               struct nvmap_handle *h) {
-       struct nvmap_deferred_ops *deferred_ops =
-                       nvmap_get_deferred_ops_from_dev(dev);
-       struct cache_maint_op *cache_op = NULL;
-       spin_lock(&deferred_ops->deferred_ops_lock);
-       list_for_each_entry(cache_op, &deferred_ops->ops_list, list_data) {
-               if (cache_op->h == h) {
-                       spin_unlock(&deferred_ops->deferred_ops_lock);
-                       return true;
-               }
-       }
-       spin_unlock(&deferred_ops->deferred_ops_lock);
-       return false;
-}
-
-void nvmap_cache_maint_ops_flush(struct nvmap_device *dev,
-               struct nvmap_handle *h) {
-
-       struct nvmap_deferred_ops *deferred_ops =
-               nvmap_get_deferred_ops_from_dev(dev);
-
-       struct cache_maint_op *cache_op = NULL;
-       struct cache_maint_op *temp = NULL;
-
-       size_t flush_size_outer_inner = 0;
-       size_t flush_size_inner = 0;
-#ifdef CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS
-       bool allow_outer_flush_by_ways;
-#endif
-       struct list_head flushed_ops;
-
-       (void) flush_size_outer_inner;
-       (void) flush_size_inner;
-       INIT_LIST_HEAD(&flushed_ops);
-
-#ifdef CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS
-       /* go through deferred ops, check if we can just do full L1/L2 flush
-        we only do list operation inside lock, actual maintenance shouldn't
-        block list operations */
-       spin_lock(&deferred_ops->deferred_ops_lock);
-
-#ifdef CONFIG_NVMAP_OUTER_CACHE_MAINT_BY_SET_WAYS
-       allow_outer_flush_by_ways =
-                       cache_maint_outer_threshold >
-                               cache_maint_inner_threshold;
-#else
-       allow_outer_flush_by_ways = false;
-#endif
-
-       if (list_empty(&deferred_ops->ops_list)) {
-               spin_unlock(&deferred_ops->deferred_ops_lock);
-               return;
-       }
-
-       /* count sum of inner and outer flush ranges */
-       list_for_each_entry(cache_op, &deferred_ops->ops_list, list_data) {
-               if (cache_op->op == NVMAP_CACHE_OP_WB_INV) {
-                       unsigned long range =
-                                       cache_op->end - cache_op->start;
-                       if (allow_outer_flush_by_ways &&
-                               cache_op->outer && cache_op->inner)
-                               flush_size_outer_inner += range;
-                       else
-                       if (cache_op->inner && !cache_op->outer)
-                               flush_size_inner += range;
-               }
-       }
-       /* collect all flush operations */
-       if (flush_size_outer_inner > cache_maint_outer_threshold) {
-               list_for_each_entry_safe(cache_op, temp,
-                                       &deferred_ops->ops_list, list_data) {
-                       if (cache_op->op == NVMAP_CACHE_OP_WB_INV &&
-                                       (cache_op->outer && cache_op->inner))
-                               list_move(&cache_op->list_data, &flushed_ops);
-               }
-               debug_count_flushed_op(deferred_ops,
-                               cache_maint_outer_threshold,
-                               NVMAP_HANDLE_CACHEABLE);
-               debug_count_flushed_op(deferred_ops,
-                               cache_maint_inner_threshold,
-                               NVMAP_HANDLE_INNER_CACHEABLE);
-       } else if (flush_size_inner > cache_maint_inner_threshold) {
-               list_for_each_entry_safe(cache_op, temp,
-                               &deferred_ops->ops_list, list_data) {
-                       if (cache_op->op == NVMAP_CACHE_OP_WB_INV &&
-                                       (cache_op->inner && !cache_op->outer))
-                               list_move(&cache_op->list_data, &flushed_ops);
-               }
-               debug_count_flushed_op(deferred_ops,
-                               cache_maint_inner_threshold,
-                               NVMAP_HANDLE_INNER_CACHEABLE);
-       }
-       spin_unlock(&deferred_ops->deferred_ops_lock);
-
-       /* do actual maintenance outside spinlock */
-       if (flush_size_outer_inner > cache_maint_outer_threshold) {
-               inner_flush_cache_all();
-               outer_flush_all();
-               /* cleanup finished ops */
-               list_for_each_entry_safe(cache_op, temp,
-                               &flushed_ops, list_data) {
-                       list_del(&cache_op->list_data);
-                       nvmap_handle_put(cache_op->h);
-                       kfree(cache_op);
-               }
-       } else if (flush_size_inner > cache_maint_inner_threshold) {
-               /* Flush only inner-cached entries */
-               inner_flush_cache_all();
-               /* cleanup finished ops */
-               list_for_each_entry_safe(cache_op, temp,
-                               &flushed_ops, list_data) {
-                       list_del(&cache_op->list_data);
-                       nvmap_handle_put(cache_op->h);
-                       kfree(cache_op);
-               }
-       }
-#endif
-       /* Flush other handles (all or only requested) */
-       spin_lock(&deferred_ops->deferred_ops_lock);
-       list_for_each_entry_safe(cache_op, temp,
-                       &deferred_ops->ops_list, list_data) {
-               if (!h || cache_op->h == h)
-                       list_move(&cache_op->list_data, &flushed_ops);
-       }
-       spin_unlock(&deferred_ops->deferred_ops_lock);
-
-       list_for_each_entry_safe(cache_op, temp,
-                       &flushed_ops, list_data) {
-
-               cache_maint_work_funct(cache_op);
-
-               if (cache_op->op == NVMAP_CACHE_OP_WB_INV)
-                       debug_count_flushed_op(deferred_ops,
-                               cache_op->end - cache_op->start,
-                               cache_op->h->flags);
-
-               list_del(&cache_op->list_data);
-               nvmap_handle_put(cache_op->h);
-               kfree(cache_op);
-       }
+       return err;
 }
 
 int __nvmap_cache_maint(struct nvmap_client *client,
@@ -1109,87 +891,24 @@ int __nvmap_cache_maint(struct nvmap_client *client,
                        unsigned long start, unsigned long end,
                        unsigned int op, unsigned int allow_deferred)
 {
-       int err = 0;
-       struct nvmap_deferred_ops *deferred_ops =
-               nvmap_get_deferred_ops_from_dev(nvmap_dev);
-       bool inner_maint = false;
-       bool outer_maint = false;
+       int err;
+       struct cache_maint_op cache_op;
 
        h = nvmap_handle_get(h);
        if (!h)
                return -EFAULT;
 
-       /* count requested flush ops */
-       if (op == NVMAP_CACHE_OP_WB_INV) {
-               spin_lock(&deferred_ops->deferred_ops_lock);
-               debug_count_requested_op(deferred_ops,
-                               end - start, h->flags);
-               spin_unlock(&deferred_ops->deferred_ops_lock);
-       }
-
-       inner_maint = h->flags == NVMAP_HANDLE_CACHEABLE ||
-                       h->flags == NVMAP_HANDLE_INNER_CACHEABLE;
-
-#ifdef CONFIG_OUTER_CACHE
-       outer_maint = h->flags == NVMAP_HANDLE_CACHEABLE;
-#endif
-
-       /* Finish deferred maintenance for the handle before invalidating */
-       if (op == NVMAP_CACHE_OP_INV &&
-                       nvmap_find_cache_maint_op(h->dev, h)) {
-               struct nvmap_share *share = nvmap_get_share_from_dev(h->dev);
-               mutex_lock(&share->pin_lock);
-               nvmap_cache_maint_ops_flush(h->dev, h);
-               mutex_unlock(&share->pin_lock);
-       }
-
-       if (op == NVMAP_CACHE_OP_WB_INV &&
-                       (inner_maint || outer_maint) &&
-                       allow_deferred == CACHE_MAINT_ALLOW_DEFERRED &&
-                       atomic_read(&h->pin) == 0 &&
-                       atomic_read(&h->disable_deferred_cache) == 0 &&
-                       !h->nvhost_priv &&
-                       deferred_ops->enable_deferred_cache_maintenance) {
-
-               struct cache_maint_op *cache_op;
-
-               cache_op = (struct cache_maint_op *)
-                               kmalloc(sizeof(struct cache_maint_op),
-                                       GFP_KERNEL);
-               cache_op->h = h;
-               cache_op->start = start;
-               cache_op->end = end;
-               cache_op->op = op;
-               cache_op->inner = inner_maint;
-               cache_op->outer = outer_maint;
-
-               spin_lock(&deferred_ops->deferred_ops_lock);
-                       list_add_tail(&cache_op->list_data,
-                               &deferred_ops->ops_list);
-               spin_unlock(&deferred_ops->deferred_ops_lock);
-       } else {
-               struct cache_maint_op cache_op;
-
-               cache_op.h = h;
-               cache_op.start = start;
-               cache_op.end = end;
-               cache_op.op = op;
-               cache_op.inner = inner_maint;
-               cache_op.outer = outer_maint;
-
-               cache_maint_work_funct(&cache_op);
-
-               if (op == NVMAP_CACHE_OP_WB_INV) {
-                       spin_lock(&deferred_ops->deferred_ops_lock);
-                       debug_count_flushed_op(deferred_ops,
-                               end - start, h->flags);
-                       spin_unlock(&deferred_ops->deferred_ops_lock);
-               }
+       cache_op.h = h;
+       cache_op.start = start;
+       cache_op.end = end;
+       cache_op.op = op;
+       cache_op.inner = h->flags == NVMAP_HANDLE_CACHEABLE ||
+                        h->flags == NVMAP_HANDLE_INNER_CACHEABLE;
+       cache_op.outer = h->flags == NVMAP_HANDLE_CACHEABLE;
 
-               err = cache_op.error;
-               nvmap_handle_put(h);
-       }
-       return 0;
+       err = do_cache_maint(&cache_op);
+       nvmap_handle_put(h);
+       return err;
 }
 
 static int rw_handle_page(struct nvmap_handle *h, int is_read,
index dc8b5eb4e38dc6ebece8812730c58046dab7ab15..1da8310ee8cbf7605cc34e2d9c3a2553a345c4f0 100644 (file)
@@ -87,16 +87,6 @@ extern struct platform_device *nvmap_pdev;
 #define CACHE_MAINT_IMMEDIATE          0
 #define CACHE_MAINT_ALLOW_DEFERRED     1
 
-struct nvmap_deferred_ops {
-       struct list_head ops_list;
-       spinlock_t deferred_ops_lock;
-       bool enable_deferred_cache_maintenance;
-       u64 deferred_maint_inner_requested;
-       u64 deferred_maint_inner_flushed;
-       u64 deferred_maint_outer_requested;
-       u64 deferred_maint_outer_flushed;
-};
-
 /* handles allocated using shared system memory (either IOVMM- or high-order
  * page allocations */
 struct nvmap_pgalloc {
@@ -109,7 +99,6 @@ struct nvmap_handle {
        struct rb_node node;    /* entry on global handle tree */
        atomic_t ref;           /* reference count (i.e., # of duplications) */
        atomic_t pin;           /* pin count */
-       atomic_t disable_deferred_cache;
        unsigned long flags;
        size_t size;            /* padded (as-allocated) size */
        size_t orig_size;       /* original (as-requested) size */
@@ -241,7 +230,6 @@ struct nvmap_device {
        struct nvmap_share iovmm_master;
        struct list_head clients;
        spinlock_t      clients_lock;
-       struct nvmap_deferred_ops deferred_ops;
 };
 
 static inline void nvmap_ref_lock(struct nvmap_client *priv)
@@ -313,12 +301,6 @@ void nvmap_carveout_commit_subtract(struct nvmap_client *client,
 
 struct nvmap_share *nvmap_get_share_from_dev(struct nvmap_device *dev);
 
-void nvmap_cache_maint_ops_flush(struct nvmap_device *dev,
-               struct nvmap_handle *h);
-
-struct nvmap_deferred_ops *nvmap_get_deferred_ops_from_dev(
-               struct nvmap_device *dev);
-
 int nvmap_find_cache_maint_op(struct nvmap_device *dev,
                struct nvmap_handle *h);