]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/commitdiff
video: tegra: nvmap: single page allocation policy corrected
authorKirill Artamonov <kartamonov@nvidia.com>
Tue, 24 May 2011 10:12:43 +0000 (13:12 +0300)
committerDan Willemsen <dwillemsen@nvidia.com>
Sat, 14 Sep 2013 08:19:05 +0000 (01:19 -0700)
When user doesn't use default heap policy and selects
GART or carveout allocation, automatic single-page-to-sysmem
rule doesn't work. Because of broken rule many single page
allocations take extra space in carveout and create
unnecessary page mappings in GART and SMMU.

The fix adds sysmem bit to heap mask when allocation is
single page and GART or carveout is present in heap mask.

bug 730124
bug 731923

The change also does sanity check of available system memory
before adding sysmem bit for carveout allocations.

bug 777839

Original-Change-Id: I13a62653825f6c80581adcd2682fb2608d3a284e
Reviewed-on: http://git-master/r/31383
Reviewed-by: Kirill Artamonov <kartamonov@nvidia.com>
Tested-by: Kirill Artamonov <kartamonov@nvidia.com>
Reviewed-by: Krishna Reddy <vdumpa@nvidia.com>
Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com>
Rebase-Id: R278606210f20aacc885fa9eb06b3a2a3d8677b55

drivers/video/tegra/nvmap/nvmap_handle.c

index 3b1c2ee06b1092b7fe69217695fd780ff248a3ed..de47aa7a5e58c0050bc79e7b1e56a28de39edac3 100644 (file)
@@ -35,6 +35,9 @@
 #include <mach/iovmm.h>
 #include <mach/nvmap.h>
 
+#include <linux/vmstat.h>
+#include <linux/swap.h>
+
 #include "nvmap.h"
 #include "nvmap_mru.h"
 #include "nvmap_common.h"
@@ -319,6 +322,10 @@ static const unsigned int heap_policy_large[] = {
        0,
 };
 
+/* Do not override single page policy if there is not much space to
+avoid invoking system oom killer. */
+#define NVMAP_SMALL_POLICY_SYSMEM_THRESHOLD 50000000
+
 int nvmap_alloc_handle_id(struct nvmap_client *client,
                          unsigned long id, unsigned int heap_mask,
                          size_t align, unsigned int flags)
@@ -340,8 +347,29 @@ int nvmap_alloc_handle_id(struct nvmap_client *client,
        h->secure = !!(flags & NVMAP_HANDLE_SECURE);
        h->flags = (flags & NVMAP_HANDLE_CACHE_FLAG);
        h->align = max_t(size_t, align, L1_CACHE_BYTES);
+
 #ifndef CONFIG_NVMAP_CONVERT_CARVEOUT_TO_IOVMM
-       /* This resriction is deprecated as alignments greater than
+#ifdef CONFIG_NVMAP_ALLOW_SYSMEM
+       /* Allow single pages allocations in system memory to save
+        * carveout space and avoid extra iovm mappings */
+       if (nr_page == 1) {
+               if (heap_mask & NVMAP_HEAP_IOVMM)
+                       heap_mask |= NVMAP_HEAP_SYSMEM;
+               else if (heap_mask & NVMAP_HEAP_CARVEOUT_GENERIC) {
+                       /* Calculate size of free physical pages
+                        * managed by kernel */
+                       unsigned long freeMem =
+                               (global_page_state(NR_FREE_PAGES) +
+                               global_page_state(NR_FILE_PAGES) -
+                               total_swapcache_pages) << PAGE_SHIFT;
+
+                       if (freeMem > NVMAP_SMALL_POLICY_SYSMEM_THRESHOLD)
+                               heap_mask |= NVMAP_HEAP_SYSMEM;
+               }
+       }
+#endif
+
+       /* This restriction is deprecated as alignments greater than
           PAGE_SIZE are now correctly handled, but it is retained for
           AP20 compatibility. */
        if (h->align > PAGE_SIZE)