/*
* Tegra Graphics Host Client Module
*
- * Copyright (c) 2010-2014, NVIDIA Corporation. All rights reserved.
+ * Copyright (c) 2010-2015, NVIDIA Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
priv->timeout = 0;
priv->vm = nvhost_vm_allocate(pdev);
+ if (!priv->vm)
+ goto fail_alloc_vm;
mutex_unlock(&channel_lock);
return 0;
+fail_alloc_vm:
fail_power_on:
fail_add_client:
kfree(priv);
struct host1x_actmon;
struct nvhost_vm;
struct nvhost_vm_buffer;
+struct nvhost_vm_static_buffer;
struct nvhost_cdma_ops {
void (*start)(struct nvhost_cdma *);
struct nvhost_vm_buffer *buffer);
void (*unpin_buffer)(struct nvhost_vm *vm,
struct nvhost_vm_buffer *buffer);
+ int (*pin_static_buffer)(struct nvhost_vm *vm,
+ struct nvhost_vm_static_buffer *sbuffer);
};
struct nvhost_pushbuffer_ops {
*
* Tegra Graphics Host Driver Entrypoint
*
- * Copyright (c) 2010-2014, NVIDIA Corporation. All rights reserved.
+ * Copyright (c) 2010-2015, NVIDIA Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
nvhost = host;
host->dev = dev;
+ INIT_LIST_HEAD(&host->static_mappings_list);
+ INIT_LIST_HEAD(&host->vm_list);
+ mutex_init(&host->vm_mutex);
mutex_init(&pdata->lock);
/* Copy host1x parameters. The private_data gets replaced
*
* Tegra Graphics Host Driver Entrypoint
*
- * Copyright (c) 2010-2014, NVIDIA Corporation. All rights reserved.
+ * Copyright (c) 2010-2015, NVIDIA Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
struct nvhost_channel **chlist; /* channel list */
struct mutex chlist_mutex; /* mutex for channel list */
unsigned long allocated_channels;
- unsigned long next_free_ch;
+
+ /* nvhost vm specific structures */
+ struct list_head static_mappings_list;
+ struct list_head vm_list;
+ struct mutex vm_mutex;
};
extern struct nvhost_master *nvhost;
unsigned int num_buffers;
};
+int nvhost_vm_map_static(struct platform_device *pdev,
+ void *vaddr, dma_addr_t paddr,
+ size_t size)
+{
+ struct nvhost_vm_static_buffer *sbuffer;
+ struct nvhost_master *host = nvhost_get_host(pdev);
+ struct nvhost_vm *vm;
+
+ /* if static mappings are not supported, exit */
+ if (!vm_op().pin_static_buffer)
+ return 0;
+
+ sbuffer = kzalloc(sizeof(*sbuffer), GFP_KERNEL);
+ if (!sbuffer)
+ return -ENOMEM;
+
+ sbuffer->paddr = paddr;
+ sbuffer->vaddr = vaddr;
+ sbuffer->size = size;
+ INIT_LIST_HEAD(&sbuffer->list);
+
+ /* take global vm mutex */
+ mutex_lock(&host->vm_mutex);
+
+ /* add this buffer into list of static mappings */
+ list_add_tail(&sbuffer->list, &host->static_mappings_list);
+
+ /* add the static mapping to all existing vms */
+ list_for_each_entry(vm, &host->vm_list, vm_list) {
+ int err = vm_op().pin_static_buffer(vm, sbuffer);
+ /* this is irreversible; just warn of failed mapping */
+ WARN_ON(err);
+ }
+
+ /* release the vm mutex */
+ mutex_unlock(&host->vm_mutex);
+
+ return 0;
+}
+
static struct nvhost_vm_buffer *nvhost_vm_find_buffer(struct rb_root *root,
struct dma_buf *dmabuf)
{
static void nvhost_vm_deinit(struct kref *kref)
{
struct nvhost_vm *vm = container_of(kref, struct nvhost_vm, kref);
+ struct nvhost_master *host = nvhost_get_host(vm->pdev);
struct nvhost_vm_buffer *buffer;
struct rb_node *node;
+ /* remove this vm from the vms list */
+ mutex_lock(&host->vm_mutex);
+ list_del(&vm->vm_list);
+ mutex_unlock(&host->vm_mutex);
+
mutex_lock(&vm->mutex);
/* go through all remaining buffers (if any) and free them here */
struct nvhost_vm *nvhost_vm_allocate(struct platform_device *pdev)
{
+ struct nvhost_vm_static_buffer *sbuffer;
+ struct nvhost_master *host = nvhost_get_host(pdev);
struct nvhost_vm *vm;
/* get room to keep vm */
goto err_init_vm;
}
+ /* take global vm mutex */
+ mutex_lock(&host->vm_mutex);
+
+ /* add this vm into list of vms */
+ list_add_tail(&vm->vm_list, &host->vm_list);
+
+ /* map all statically mapped buffers to this vm */
+ if (vm_op().pin_static_buffer) {
+ list_for_each_entry(sbuffer,
+ &host->static_mappings_list,
+ list) {
+ int err = vm_op().pin_static_buffer(vm, sbuffer);
+ if (err)
+ goto err_pin_static_buffers;
+ }
+ }
+
+ /* release the vm mutex */
+ mutex_unlock(&host->vm_mutex);
+
return vm;
+err_pin_static_buffers:
+ mutex_unlock(&host->vm_mutex);
+ vm_op().deinit(vm);
err_init_vm:
kfree(vm);
err_alloc_vm:
/* used by hardware layer */
void *private_data;
+
+ /* to track all vms in the system */
+ struct list_head vm_list;
};
struct nvhost_vm_buffer {
void *private_data;
};
+struct nvhost_vm_static_buffer {
+ struct sg_table *sgt;
+
+ void *vaddr;
+ dma_addr_t paddr;
+ size_t size;
+
+ /* list of all statically mapped buffers */
+ struct list_head list;
+};
+
+/**
+ * nvhost_vm_map_static - map allocated area to iova
+ * @pdev: pointer to host1x or host1x client device
+ * @vaddr: kernel virtual address
+ * @paddr: desired physical address for this buffer
+ * @size: size of the buffer (in bytes)
+ *
+ * This call maps given area to all existing (and future) address spaces.
+ * The mapping is permanent and cannot be removed. User of this API is
+ * responsible to ensure that the backing memory is not released at any
+ * point.
+ *
+ * Return 0 on succcess, error otherwise. Base address is returned
+ * in address pointer.
+ *
+ */
+int nvhost_vm_map_static(struct platform_device *pdev,
+ void *vaddr, dma_addr_t paddr,
+ size_t size);
+
/**
* nvhost_vm_pin_buffers - Pin mapped buffers to the hardware
* @vm: Pointer to nvhost_vm structure