Use kernel api for kernel virtual mapping needs.
Change-Id: I6ce18a1f75a0951d220a2b991eb24364494f00a8
Signed-off-by: Krishna Reddy <vdumpa@nvidia.com>
Reviewed-on: http://git-master/r/382716
Reviewed-by: Alex Waterman <alexw@nvidia.com>
phys_addr_t paddr;
unsigned long kaddr;
pgprot_t prot;
- pte_t **pte;
+ struct vm_struct *area = NULL;
if (!virt_addr_valid(h))
return NULL;
if (pagenum >= h->size >> PAGE_SHIFT)
goto out;
prot = nvmap_pgprot(h, PG_PROT_KERNEL);
- pte = nvmap_alloc_pte(nvmap_dev, (void **)&kaddr);
- if (!pte)
+ area = alloc_vm_area(PAGE_SIZE, NULL);
+ if (!area)
goto out;
+ kaddr = (ulong)area->addr;
if (h->heap_pgalloc)
paddr = page_to_phys(h->pgalloc.pages[pagenum]);
else
paddr = h->carveout->base + pagenum * PAGE_SIZE;
- set_pte_at(&init_mm, kaddr, *pte,
- pfn_pte(__phys_to_pfn(paddr), prot));
- nvmap_flush_tlb_kernel_page(kaddr);
+ ioremap_page_range(kaddr, kaddr + PAGE_SIZE, paddr, prot);
return (void *)kaddr;
out:
nvmap_handle_put(h);
void *addr)
{
phys_addr_t paddr;
- pte_t **pte;
+ struct vm_struct *area = NULL;
if (!h ||
WARN_ON(!virt_addr_valid(h)) ||
outer_flush_range(paddr, paddr + PAGE_SIZE); /* FIXME */
}
- pte = nvmap_vaddr_to_pte(nvmap_dev, (unsigned long)addr);
- nvmap_free_pte(nvmap_dev, pte);
+ area = find_vm_area(addr);
+ if (area)
+ free_vm_area(area);
+ else
+ WARN(1, "Invalid address passed");
nvmap_handle_put(h);
}
{
pgprot_t prot;
unsigned long adj_size;
- unsigned long offs;
struct vm_struct *v;
void *p;
#endif
/* carveout - explicitly map the pfns into a vmalloc area */
-
adj_size = h->carveout->base & ~PAGE_MASK;
adj_size += h->size;
adj_size = PAGE_ALIGN(adj_size);
}
p = v->addr + (h->carveout->base & ~PAGE_MASK);
-
- for (offs = 0; offs < adj_size; offs += PAGE_SIZE) {
- unsigned long addr = (unsigned long) v->addr + offs;
- unsigned int pfn;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
-
- pfn = __phys_to_pfn(h->carveout->base + offs);
- pgd = pgd_offset_k(addr);
- pud = pud_alloc(&init_mm, pgd, addr);
- if (!pud)
- break;
- pmd = pmd_alloc(&init_mm, pud, addr);
- if (!pmd)
- break;
- pte = pte_alloc_kernel(pmd, addr);
- if (!pte)
- break;
- set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
- nvmap_flush_tlb_kernel_page(addr);
- }
-
- if (offs != adj_size) {
- free_vm_area(v);
- nvmap_handle_put(h);
- return NULL;
- }
+ ioremap_page_range((ulong)v->addr, (ulong)v->addr + adj_size,
+ h->carveout->base & PAGE_MASK, prot);
/* leave the handle ref count incremented by 1, so that
* the handle will not be freed while the kernel mapping exists.
} else {
struct vm_struct *vm;
addr -= (h->carveout->base & ~PAGE_MASK);
- vm = remove_vm_area(addr);
+ vm = find_vm_area(addr);
BUG_ON(!vm);
- kfree(vm);
+ free_vm_area(vm);
}
nvmap_handle_put(h);
}
#include <linux/bitmap.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/oom.h>
int nvmap_flush_heap_block(struct nvmap_client *client,
struct nvmap_heap_block *block, size_t len, unsigned int prot)
{
- pte_t **pte;
- void *addr;
- uintptr_t kaddr;
+ ulong kaddr;
phys_addr_t phys = block->base;
phys_addr_t end = block->base + len;
+ struct vm_struct *area = NULL;
if (prot == NVMAP_HANDLE_UNCACHEABLE || prot == NVMAP_HANDLE_WRITE_COMBINE)
goto out;
}
#endif
- pte = nvmap_alloc_pte(nvmap_dev, &addr);
- if (IS_ERR(pte))
- return PTR_ERR(pte);
+ area = alloc_vm_area(PAGE_SIZE, NULL);
+ if (!area)
+ return -ENOMEM;
- kaddr = (uintptr_t)addr;
+ kaddr = (ulong)area->addr;
while (phys < end) {
phys_addr_t next = (phys + PAGE_SIZE) & PAGE_MASK;
- unsigned long pfn = __phys_to_pfn(phys);
void *base = (void *)kaddr + (phys & ~PAGE_MASK);
next = min(next, end);
- set_pte_at(&init_mm, kaddr, *pte, pfn_pte(pfn, PG_PROT_KERNEL));
+ ioremap_page_range(kaddr, kaddr + PAGE_SIZE,
+ phys, PG_PROT_KERNEL);
nvmap_flush_tlb_kernel_page(kaddr);
FLUSH_DCACHE_AREA(base, next - phys);
phys = next;
+ unmap_kernel_range(kaddr, PAGE_SIZE);
}
if (prot != NVMAP_HANDLE_INNER_CACHEABLE)
outer_flush_range(block->base, block->base + len);
- nvmap_free_pte(nvmap_dev, pte);
+ free_vm_area(area);
out:
wmb();
return 0;
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mm.h>
phys_addr_t paddr;
#endif
gfp_t gfp = GFP_NVMAP;
- unsigned long kaddr;
- pte_t **pte = NULL;
+ unsigned long kaddr = 0;
+ struct vm_struct *area = NULL;
if (h->userflags & NVMAP_HANDLE_ZEROED_PAGES || zero_memory) {
gfp |= __GFP_ZERO;
prot = nvmap_pgprot(h, PG_PROT_KERNEL);
- pte = nvmap_alloc_pte(nvmap_dev, (void **)&kaddr);
- if (IS_ERR(pte))
+ area = alloc_vm_area(PAGE_SIZE, NULL);
+ if (!area)
return -ENOMEM;
+ kaddr = (ulong)area->addr;
}
pages = altalloc(nr_page * sizeof(*pages));
PAGE_SIZE);
} else {
paddr = page_to_phys(pages[i]);
- set_pte_at(&init_mm, kaddr, *pte,
- pfn_pte(__phys_to_pfn(paddr),
- prot));
+ ioremap_page_range(kaddr,
+ kaddr + PAGE_SIZE,
+ paddr, prot);
nvmap_flush_tlb_kernel_page(kaddr);
memset((char *)kaddr, 0, PAGE_SIZE);
+ unmap_kernel_range(kaddr, PAGE_SIZE);
}
}
}
goto fail;
if (h->userflags & NVMAP_HANDLE_ZEROED_PAGES || zero_memory)
- nvmap_free_pte(nvmap_dev, pte);
+ free_vm_area(area);
h->size = size;
h->pgalloc.pages = pages;
h->pgalloc.contig = contiguous;
fail:
if (h->userflags & NVMAP_HANDLE_ZEROED_PAGES || zero_memory)
- nvmap_free_pte(nvmap_dev, pte);
+ free_vm_area(area);
while (i--)
__free_page(pages[i]);
altfree(pages, nr_page * sizeof(*pages));
#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <linux/fs.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
static void heap_page_cache_maint(
struct nvmap_handle *h, unsigned long start, unsigned long end,
- unsigned int op, bool inner, bool outer, pte_t **pte,
+ unsigned int op, bool inner, bool outer,
unsigned long kaddr, pgprot_t prot)
{
struct page *page;
if (inner) {
void *vaddr = (void *)kaddr + off;
- BUG_ON(!pte);
BUG_ON(!kaddr);
- set_pte_at(&init_mm, kaddr, *pte,
- pfn_pte(__phys_to_pfn(paddr), prot));
+ ioremap_page_range(kaddr, kaddr + PAGE_SIZE,
+ paddr, prot);
nvmap_flush_tlb_kernel_page(kaddr);
inner_cache_maint(op, vaddr, size);
+ unmap_kernel_range(kaddr, PAGE_SIZE);
}
if (outer)
{
if (h->heap_pgalloc) {
heap_page_cache_maint(h, start,
- end, op, false, true, NULL, 0, 0);
+ end, op, false, true, 0, 0);
} else {
phys_addr_t pstart;
static int do_cache_maint(struct cache_maint_op *cache_work)
{
pgprot_t prot;
- pte_t **pte = NULL;
unsigned long kaddr;
phys_addr_t pstart = cache_work->start;
phys_addr_t pend = cache_work->end;
struct nvmap_handle *h = cache_work->h;
struct nvmap_client *client;
unsigned int op = cache_work->op;
+ struct vm_struct *area = NULL;
if (!h || !h->alloc)
return -EFAULT;
goto out;
prot = nvmap_pgprot(h, PG_PROT_KERNEL);
- pte = nvmap_alloc_pte(nvmap_dev, (void **)&kaddr);
- if (IS_ERR(pte)) {
- err = PTR_ERR(pte);
- pte = NULL;
+ area = alloc_vm_area(PAGE_SIZE, NULL);
+ if (!area) {
+ err = -ENOMEM;
goto out;
}
+ kaddr = (ulong)area->addr;
if (h->heap_pgalloc) {
heap_page_cache_maint(h, pstart, pend, op, true,
(h->flags == NVMAP_HANDLE_INNER_CACHEABLE) ?
- false : true,
- pte, kaddr, prot);
+ false : true, kaddr, prot);
goto out;
}
void *base = (void *)kaddr + (loop & ~PAGE_MASK);
next = min(next, pend);
- set_pte_at(&init_mm, kaddr, *pte,
- pfn_pte(__phys_to_pfn(loop), prot));
+ ioremap_page_range(kaddr, kaddr + PAGE_SIZE,
+ loop, prot);
nvmap_flush_tlb_kernel_page(kaddr);
inner_cache_maint(op, base, next - loop);
loop = next;
+ unmap_kernel_range(kaddr, PAGE_SIZE);
}
if (h->flags != NVMAP_HANDLE_INNER_CACHEABLE)
outer_cache_maint(op, pstart, pend - pstart);
out:
- if (pte)
- nvmap_free_pte(nvmap_dev, pte);
+ if (area)
+ free_vm_area(area);
return err;
}
static int rw_handle_page(struct nvmap_handle *h, int is_read,
unsigned long start, unsigned long rw_addr,
- unsigned long bytes, unsigned long kaddr, pte_t *pte)
+ unsigned long bytes, unsigned long kaddr)
{
pgprot_t prot = nvmap_pgprot(h, PG_PROT_KERNEL);
unsigned long end = start + bytes;
phys = page_to_phys(page) + (start & ~PAGE_MASK);
}
- set_pte_at(&init_mm, kaddr, pte,
- pfn_pte(__phys_to_pfn(phys), prot));
- nvmap_flush_tlb_kernel_page(kaddr);
+ ioremap_page_range(kaddr, kaddr + PAGE_SIZE, phys, prot);
src = (void *)kaddr + (phys & ~PAGE_MASK);
phys = PAGE_SIZE - (phys & ~PAGE_MASK);
if (page)
put_page(page);
+ unmap_kernel_range(kaddr, PAGE_SIZE);
}
return err;
unsigned long count)
{
ssize_t copied = 0;
- pte_t **pte;
void *addr;
int ret = 0;
+ struct vm_struct *area;
if (!elem_size)
return -EINVAL;
count = 1;
}
- pte = nvmap_alloc_pte(nvmap_dev, &addr);
- if (IS_ERR(pte))
- return PTR_ERR(pte);
+ area = alloc_vm_area(PAGE_SIZE, NULL);
+ if (!area)
+ return -ENOMEM;
+ addr = area->addr;
while (count--) {
if (h_offs + elem_size > h->size) {
CACHE_MAINT_IMMEDIATE);
ret = rw_handle_page(h, is_read, h_offs, sys_addr,
- elem_size, (unsigned long)addr, *pte);
+ elem_size, (unsigned long)addr);
if (ret)
break;
h_offs += h_stride;
}
- nvmap_free_pte(nvmap_dev, pte);
+ free_vm_area(area);
return ret ?: copied;
}