+void nvmap_vm_insert_handle(struct nvmap_handle *handle, u32 offset, u32 size)
+{
+ struct list_head *vmas;
+ struct nvmap_vma_list *vma_list;
+ struct vm_area_struct *vma;
+
+ if (!handle->heap_pgalloc)
+ return;
+
+ if (!size) {
+ offset = 0;
+ size = handle->size;
+ }
+
+ mutex_lock(&handle->lock);
+ vmas = &handle->vmas;
+ list_for_each_entry(vma_list, vmas, list) {
+ struct nvmap_vma_priv *priv;
+ u32 vm_size = size;
+ int end;
+ int i;
+
+ vma = vma_list->vma;
+ priv = vma->vm_private_data;
+ if ((offset + size) > (vma->vm_end - vma->vm_start))
+ vm_size = vma->vm_end - vma->vm_start - offset;
+
+ end = PAGE_ALIGN(offset + vm_size) >> PAGE_SHIFT;
+ offset >>= PAGE_SHIFT;
+ for (i = offset; i < end; i++) {
+ struct page *page = nvmap_to_page(handle->pgalloc.pages[i]);
+ pte_t *pte;
+ spinlock_t *ptl;
+
+ down_write(&vma->vm_mm->mmap_sem);
+ pte = get_locked_pte(vma->vm_mm, vma->vm_start + (i << PAGE_SHIFT), &ptl);
+ if (!pte) {
+ pr_err("nvmap: %s get_locked_pte failed\n", __func__);
+ up_write(&vma->vm_mm->mmap_sem);
+ mutex_unlock(&handle->lock);
+ return;
+ }
+ /*
+ * page->_map_count gets incremented while mapping here. If _count is not
+ * incremented, zap code will see that page as a bad page and throws lot
+ * of warnings.
+ */
+ atomic_inc(&page->_count);
+ do_set_pte(vma, vma->vm_start + (i << PAGE_SHIFT), page, pte, true, false);
+ pte_unmap_unlock(pte, ptl);
+ up_write(&vma->vm_mm->mmap_sem);
+ }
+ }
+ mutex_unlock(&handle->lock);
+}
+
+void nvmap_vm_insert_handles(struct nvmap_handle **handles, u32 *offsets,
+ u32 *sizes, u32 nr)
+{
+ int i;
+
+ for (i = 0; i < nr; i++) {
+ nvmap_vm_insert_handle(handles[i], offsets[i], sizes[i]);
+ nvmap_handle_mkdirty(handles[i], offsets[i],
+ sizes[i] ? sizes[i] : handles[i]->size);
+ }
+}
+