goto out;
if (h->heap_pgalloc)
- paddr = page_to_phys(h->pgalloc.pages[pagenum]);
+ paddr = page_to_phys(nvmap_to_page(h->pgalloc.pages[pagenum]));
else
paddr = h->carveout->base + pagenum * PAGE_SIZE;
return;
if (h->heap_pgalloc)
- paddr = page_to_phys(h->pgalloc.pages[pagenum]);
+ paddr = page_to_phys(nvmap_to_page(h->pgalloc.pages[pagenum]));
else
paddr = h->carveout->base + pagenum * PAGE_SIZE;
prot = nvmap_pgprot(h, PG_PROT_KERNEL);
#ifdef NVMAP_LAZY_VFREE
+ struct page **pages;
if (h->heap_pgalloc) {
- if (!h->vaddr)
- h->vaddr = vm_map_ram(h->pgalloc.pages,
+ if (!h->vaddr) {
+ pages = nvmap_pages(h->pgalloc.pages,
+ h->size >> PAGE_SHIFT);
+ if (!pages)
+ return NULL;
+ vaddr = vm_map_ram(pages,
h->size >> PAGE_SHIFT, -1, prot);
+ nvmap_altfree(pages,
+ (h->size >> PAGE_SHIFT) * sizeof(*pages));
+ }
+ if (vaddr && atomic_long_cmpxchg(&h->vaddr, 0, (long)vaddr))
+ vm_unmap_ram(vaddr, h->size >> PAGE_SHIFT);
return h->vaddr;
}
#else
{
struct sg_table *sgt = NULL;
int err, npages;
+ struct page **pages;
if (!virt_addr_valid(h))
return ERR_PTR(-EINVAL);
goto err;
sg_set_buf(sgt->sgl, phys_to_virt(handle_phys(h)), h->size);
} else {
- err = sg_alloc_table_from_pages(sgt, h->pgalloc.pages,
+ pages = nvmap_pages(h->pgalloc.pages, npages);
+ err = sg_alloc_table_from_pages(sgt, pages,
npages, 0, h->size, GFP_KERNEL);
+ nvmap_altfree(pages, npages * sizeof(*pages));
if (err)
goto err;
}
page = pfn_to_page(pfn);
} else {
offs >>= PAGE_SHIFT;
- page = priv->handle->pgalloc.pages[offs];
+ page = nvmap_to_page(priv->handle->pgalloc.pages[offs]);
}
if (page)
}
for (i = 0; i < h->size >> PAGE_SHIFT; i++) {
- int mapcount = page_mapcount(h->pgalloc.pages[i]);
+ struct page *page = nvmap_to_page(h->pgalloc.pages[i]);
+ int mapcount = page_mapcount(page);
if (!mapcount)
*non_pss += PAGE_SIZE;
*total += PAGE_SIZE;
continue;
for (i = 0; i < h->size >> PAGE_SHIFT; i++) {
- int mapcount = page_mapcount(h->pgalloc.pages[i]);
+ struct page *page = nvmap_to_page(h->pgalloc.pages[i]);
+ int mapcount = page_mapcount(page);
if (!mapcount)
*non_pss += PAGE_SIZE;
*total += PAGE_SIZE;
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/rbtree.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
#include <linux/dma-buf.h>
#include <linux/moduleparam.h>
#include <linux/nvmap.h>
vm_unmap_ram(h->vaddr, h->size >> PAGE_SHIFT);
#endif
+ for (i = 0; i < nr_page; i++)
+ h->pgalloc.pages[i] = nvmap_to_page(h->pgalloc.pages[i]);
+
#ifdef CONFIG_NVMAP_PAGE_POOLS
pool = &nvmap_dev->pool;
size_t size;
#ifdef NVMAP_LAZY_VFREE
+ struct page **pages;
if (inner) {
- if (!h->vaddr)
- h->vaddr = vm_map_ram(h->pgalloc.pages,
+ void *vaddr = NULL;
+
+ if (!h->vaddr) {
+ pages = nvmap_pages(h->pgalloc.pages,
+ h->size >> PAGE_SHIFT);
+ if (!pages)
+ goto per_page_cache_maint;
+ vaddr = vm_map_ram(pages,
h->size >> PAGE_SHIFT, -1, prot);
+ nvmap_altfree(pages,
+ (h->size >> PAGE_SHIFT) * sizeof(*page));
+ }
+ if (vaddr && atomic_long_cmpxchg(&h->vaddr, 0, (long)vaddr))
+ vm_unmap_ram(vaddr, h->size >> PAGE_SHIFT);
if (h->vaddr) {
/* Fast inner cache maintenance using single mapping */
inner_cache_maint(op, h->vaddr + start, end - start);
inner = false;
}
}
+per_page_cache_maint:
#endif
+
while (start < end) {
- page = h->pgalloc.pages[start >> PAGE_SHIFT];
+ page = nvmap_to_page(h->pgalloc.pages[start >> PAGE_SHIFT]);
next = min(((start + PAGE_SIZE) & PAGE_MASK), end);
off = start & ~PAGE_MASK;
size = next - start;
if (!h->heap_pgalloc) {
phys = h->carveout->base + start;
} else {
- page = h->pgalloc.pages[start >> PAGE_SHIFT];
+ page =
+ nvmap_to_page(h->pgalloc.pages[start >> PAGE_SHIFT]);
BUG_ON(!page);
get_page(page);
phys = page_to_phys(page) + (start & ~PAGE_MASK);
nvmap_stats_read(NS_CFLUSH_DONE));
for (i = 0; i < numpages; i++) {
+ struct page *page = nvmap_to_page(pages[i]);
#ifdef CONFIG_ARM64 //__flush_dcache_page flushes inner and outer on ARM64
if (flush_inner)
- __flush_dcache_page(pages[i]);
+ __flush_dcache_page(page);
#else
if (flush_inner)
- __flush_dcache_page(page_mapping(pages[i]), pages[i]);
- base = page_to_phys(pages[i]);
+ __flush_dcache_page(page_mapping(page), page);
+ base = page_to_phys(page);
outer_flush_range(base, base + PAGE_SIZE);
#endif
}