u32 kind);
static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
enum gmmu_pgsz_gk20a pgsz_idx,
- struct sg_table *sgt,
+ struct sg_table *sgt, u64 buffer_offset,
u64 first_vaddr, u64 last_vaddr,
u8 kind_v, u32 ctag_offset, bool cacheable,
int rw_flag);
static int validate_fixed_buffer(struct vm_gk20a *vm,
struct buffer_attrs *bfr,
- u64 map_offset)
+ u64 map_offset, u64 map_size)
{
struct device *dev = dev_from_vm(vm);
struct vm_reserved_va_node *va_node;
&va_node->va_buffers_list, va_buffers_list) {
s64 begin = max(buffer->addr, map_offset);
s64 end = min(buffer->addr +
- buffer->size, map_offset + bfr->size);
+ buffer->size, map_offset + map_size);
if (end - begin > 0) {
gk20a_warn(dev, "overlapping buffer map requested");
return -EINVAL;
static u64 __locked_gmmu_map(struct vm_gk20a *vm,
u64 map_offset,
struct sg_table *sgt,
+ u64 buffer_offset,
u64 size,
int pgsz_idx,
u8 kind_v,
err = update_gmmu_ptes_locked(vm, pgsz_idx,
sgt,
+ buffer_offset,
map_offset, map_offset + size - 1,
kind_v,
ctag_offset,
err = update_gmmu_ptes_locked(vm,
pgsz_idx,
0, /* n/a for unmap */
+ 0,
vaddr,
vaddr + size - 1,
0, 0, false /* n/a for unmap */,
int kind,
struct sg_table **sgt,
bool user_mapped,
- int rw_flag)
+ int rw_flag,
+ u64 buffer_offset,
+ u64 mapping_size)
{
struct gk20a *g = gk20a_from_vm(vm);
struct gk20a_allocator *ctag_allocator = &g->gr.comp_tags;
buf_addr = (u64)sg_phys(bfr.sgt->sgl);
bfr.align = 1 << __ffs(buf_addr);
bfr.pgsz_idx = -1;
+ mapping_size = mapping_size ? mapping_size : bfr.size;
/* If FIX_OFFSET is set, pgsz is determined. Otherwise, select
* page size according to memory alignment */
gmmu_page_size = gmmu_page_sizes[bfr.pgsz_idx];
/* Check if we should use a fixed offset for mapping this buffer */
+
if (flags & NVHOST_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) {
- err = validate_fixed_buffer(vm, &bfr, offset_align);
+ err = validate_fixed_buffer(vm, &bfr,
+ offset_align, mapping_size);
if (err)
goto clean_up;
/* update gmmu ptes */
map_offset = __locked_gmmu_map(vm, map_offset,
bfr.sgt,
- bfr.size,
+ buffer_offset, /* sg offset */
+ mapping_size,
bfr.pgsz_idx,
bfr.kind_v,
bfr.ctag_offset,
flags, rw_flag);
+
if (!map_offset)
goto clean_up;
mapped_buffer->dmabuf = dmabuf;
mapped_buffer->sgt = bfr.sgt;
mapped_buffer->addr = map_offset;
- mapped_buffer->size = bfr.size;
+ mapped_buffer->size = mapping_size;
mapped_buffer->pgsz_idx = bfr.pgsz_idx;
mapped_buffer->ctag_offset = bfr.ctag_offset;
mapped_buffer->ctag_lines = bfr.ctag_lines;
mutex_lock(&vm->update_gmmu_lock);
vaddr = __locked_gmmu_map(vm, 0, /* already mapped? - No */
*sgt, /* sg table */
+ 0, /* sg offset */
size,
0, /* page size index = 0 i.e. SZ_4K */
0, /* kind */
static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
enum gmmu_pgsz_gk20a pgsz_idx,
struct sg_table *sgt,
+ u64 buffer_offset,
u64 first_vaddr, u64 last_vaddr,
u8 kind_v, u32 ctag_offset,
bool cacheable,
u32 ctag_incr;
u32 page_size = gmmu_page_sizes[pgsz_idx];
u64 addr = 0;
+ u64 space_to_skip = buffer_offset;
pde_range_from_vaddr_range(vm, first_vaddr, last_vaddr,
&pde_lo, &pde_hi);
* comptags are active) is 128KB. We have checks elsewhere for that. */
ctag_incr = !!ctag_offset;
- if (sgt)
+ cur_offset = 0;
+ if (sgt) {
cur_chunk = sgt->sgl;
+ /* space_to_skip must be page aligned */
+ BUG_ON(space_to_skip & (page_size - 1));
+
+ while (space_to_skip > 0 && cur_chunk) {
+ u64 new_addr = gk20a_mm_iova_addr(cur_chunk);
+ if (new_addr) {
+ addr = new_addr;
+ addr += cur_offset;
+ }
+ cur_offset += page_size;
+ addr += page_size;
+ while (cur_chunk &&
+ cur_offset >= cur_chunk->length) {
+ cur_offset -= cur_chunk->length;
+ cur_chunk = sg_next(cur_chunk);
+ }
+ space_to_skip -= page_size;
+ }
+ }
else
cur_chunk = NULL;
- cur_offset = 0;
-
for (pde_i = pde_lo; pde_i <= pde_hi; pde_i++) {
u32 pte_lo, pte_hi;
u32 pte_cur;
gk20a_dbg(gpu_dbg_pte, "pte_lo=%d, pte_hi=%d", pte_lo, pte_hi);
for (pte_cur = pte_lo; pte_cur <= pte_hi; pte_cur++) {
-
if (likely(sgt)) {
u64 new_addr = gk20a_mm_iova_addr(cur_chunk);
if (new_addr) {
addr = new_addr;
addr += cur_offset;
}
-
pte_w[0] = gmmu_pte_valid_true_f() |
gmmu_pte_address_sys_f(addr
>> gmmu_pte_address_shift_v());
pte_w[1] |=
gmmu_pte_read_disable_true_f();
}
-
if (!cacheable)
pte_w[1] |= gmmu_pte_vol_true_f();
pte->ref_cnt++;
-
- gk20a_dbg(gpu_dbg_pte,
- "pte_cur=%d addr=0x%x,%08x kind=%d"
+ gk20a_dbg(gpu_dbg_pte, "pte_cur=%d addr=0x%x,%08x kind=%d"
" ctag=%d vol=%d refs=%d"
" [0x%08x,0x%08x]",
pte_cur, hi32(addr), lo32(addr),
kind_v, ctag, !cacheable,
pte->ref_cnt, pte_w[1], pte_w[0]);
-
ctag += ctag_incr;
cur_offset += page_size;
addr += page_size;
for (i = 0; i < num_pages; i++) {
u64 page_vaddr = __locked_gmmu_map(vm, vaddr,
- vm->zero_page_sgt, pgsz, pgsz_idx, 0, 0,
+ vm->zero_page_sgt, 0, pgsz, pgsz_idx, 0, 0,
NVHOST_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET,
gk20a_mem_flag_none);
gk20a_err(d, "invalid addr to unmap 0x%llx", offset);
return;
}
+
kref_put(&mapped_buffer->ref, gk20a_vm_unmap_locked_kref);
mutex_unlock(&vm->update_gmmu_lock);
}
va_node->sparse = true;
}
-
list_add_tail(&va_node->reserved_va_list, &vm->reserved_va_list);
mutex_unlock(&vm->update_gmmu_lock);
int dmabuf_fd,
u64 *offset_align,
u32 flags, /*NVHOST_AS_MAP_BUFFER_FLAGS_*/
- int kind)
+ int kind,
+ u64 buffer_offset,
+ u64 mapping_size)
{
int err = 0;
struct vm_gk20a *vm = as_share->vm;
ret_va = gk20a_vm_map(vm, dmabuf, *offset_align,
flags, kind, NULL, true,
- gk20a_mem_flag_none);
+ gk20a_mem_flag_none,
+ buffer_offset,
+ mapping_size);
+
*offset_align = ret_va;
if (!ret_va) {
dma_buf_put(dmabuf);