h = nvmap_get_handle_id(c, id);
if (!h)
return -EPERM;
- spin_lock(&h->lock);
+ mutex_lock(&h->lock);
phys = handle_phys(h);
- spin_unlock(&h->lock);
+ mutex_unlock(&h->lock);
nvmap_handle_put(h);
return phys;
bool secure; /* zap IOVMM area on unpin */
bool heap_pgalloc; /* handle is page allocated (sysmem / iovmm) */
bool alloc; /* handle has memory allocated */
- spinlock_t lock;
+ struct mutex lock;
};
struct nvmap_share {
struct rb_root handle_refs;
atomic_t iovm_commit;
size_t iovm_limit;
- spinlock_t ref_lock;
+ struct mutex ref_lock;
bool super;
atomic_t count;
struct task_struct *task;
static inline void nvmap_ref_lock(struct nvmap_client *priv)
{
- spin_lock(&priv->ref_lock);
+ mutex_lock(&priv->ref_lock);
}
static inline void nvmap_ref_unlock(struct nvmap_client *priv)
{
- spin_unlock(&priv->ref_lock);
+ mutex_unlock(&priv->ref_lock);
}
struct device *nvmap_client_to_device(struct nvmap_client *client);
task_unlock(current->group_leader);
client->task = task;
- spin_lock_init(&client->ref_lock);
+ mutex_init(&client->ref_lock);
atomic_set(&client->count, 1);
return client;
smp_rmb();
pins = atomic_read(&ref->pin);
- spin_lock(&ref->handle->lock);
if (ref->handle->owner == client)
ref->handle->owner = NULL;
- spin_unlock(&ref->handle->lock);
while (pins--)
nvmap_unpin_handles(client, &ref->handle, 1);
atomic_sub(h->size, &client->iovm_commit);
if (h->alloc && !h->heap_pgalloc) {
- spin_lock(&h->lock);
+ mutex_lock(&h->lock);
nvmap_carveout_commit_subtract(client,
nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
h->size);
- spin_unlock(&h->lock);
+ mutex_unlock(&h->lock);
}
nvmap_ref_unlock(client);
while (pins--)
nvmap_unpin_handles(client, &ref->handle, 1);
- spin_lock(&h->lock);
if (h->owner == client)
h->owner = NULL;
- spin_unlock(&h->lock);
kfree(ref);
BUG_ON(!h->owner);
h->size = h->orig_size = size;
h->flags = NVMAP_HANDLE_WRITE_COMBINE;
- spin_lock_init(&h->lock);
+ mutex_init(&h->lock);
nvmap_handle_add(client->dev, h);
}
if (!h->heap_pgalloc) {
- spin_lock(&h->lock);
+ mutex_lock(&h->lock);
nvmap_carveout_commit_add(client,
nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
h->size);
- spin_unlock(&h->lock);
+ mutex_unlock(&h->lock);
}
atomic_set(&ref->dupes, 1);
size_t src_align = block->align;
unsigned int src_prot = block->mem_prot;
int error = 0;
+ struct nvmap_share *share;
if (!handle) {
pr_err("INVALID HANDLE!\n");
return NULL;
}
- spin_lock(&handle->lock);
+ mutex_lock(&handle->lock);
- if (!handle->owner) {
- spin_unlock(&handle->lock);
- return NULL;
- }
+ share = nvmap_get_share_from_dev(handle->dev);
/* TODO: It is possible to use only handle lock and no share
* pin_lock, but then we'll need to lock every handle during
* each pinning operation. Need to estimate performance impact
* if we decide to simplify locking this way. */
- mutex_lock(&handle->owner->share->pin_lock);
+ mutex_lock(&share->pin_lock);
/* abort if block is pinned */
if (atomic_read(&handle->pin))
BUG_ON(error);
fail:
- mutex_unlock(&handle->owner->share->pin_lock);
- spin_unlock(&handle->lock);
+ mutex_unlock(&share->pin_lock);
+ mutex_unlock(&handle->lock);
return heap_block_new;
}
void nvmap_usecount_inc(struct nvmap_handle *h)
{
if (h->alloc && !h->heap_pgalloc) {
- spin_lock(&h->lock);
+ mutex_lock(&h->lock);
h->usecount++;
- spin_unlock(&h->lock);
+ mutex_unlock(&h->lock);
} else {
h->usecount++;
}
op.result = h->orig_size;
break;
case NVMAP_HANDLE_PARAM_ALIGNMENT:
- spin_lock(&h->lock);
+ mutex_lock(&h->lock);
if (!h->alloc)
op.result = 0;
else if (h->heap_pgalloc)
op.result = (h->carveout->base & -h->carveout->base);
else
op.result = SZ_4M;
- spin_unlock(&h->lock);
+ mutex_unlock(&h->lock);
break;
case NVMAP_HANDLE_PARAM_BASE:
if (WARN_ON(!h->alloc || !atomic_add_return(0, &h->pin)))
op.result = -1ul;
else if (!h->heap_pgalloc) {
- spin_lock(&h->lock);
+ mutex_lock(&h->lock);
op.result = h->carveout->base;
- spin_unlock(&h->lock);
+ mutex_unlock(&h->lock);
}
else if (h->pgalloc.contig)
op.result = page_to_phys(h->pgalloc.pages[0]);
if (!h->alloc)
op.result = 0;
else if (!h->heap_pgalloc) {
- spin_lock(&h->lock);
+ mutex_lock(&h->lock);
op.result = nvmap_carveout_usage(client, h->carveout);
- spin_unlock(&h->lock);
+ mutex_unlock(&h->lock);
}
else if (h->pgalloc.contig)
op.result = NVMAP_HEAP_SYSMEM;