/*
* drivers/media/video/tegra/nvavp/nvavp_dev.c
*
- * Copyright (c) 2011-2013, NVIDIA CORPORATION. All rights reserved.
+ * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
#include <linux/uaccess.h>
#include <linux/clk.h>
+#include <linux/compat.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/dma-buf.h>
#include <linux/pm_runtime.h>
#include <linux/clk/tegra.h>
#include <linux/tegra-powergate.h>
+#include <linux/irqchip/tegra.h>
+#include <linux/sched.h>
+#include <linux/memblock.h>
+#include <linux/anon_inodes.h>
+#include <linux/tegra_pm_domains.h>
-#include <mach/legacy_irq.h>
-#include <mach/pm_domains.h>
-#include <linux/nvmap.h>
+#include <linux/pm_qos.h>
+
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/tegra-timer.h>
#if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU)
#include "../avp/headavp.h"
#endif
#include "nvavp_os.h"
-/* HACK: this has to come from DT */
-#include "../../../../../arch/arm/mach-tegra/iomap.h"
-
#define TEGRA_NVAVP_NAME "nvavp"
#define NVAVP_PUSHBUFFER_SIZE 4096
#define NVAVP_PUSHBUFFER_MIN_UPDATE_SPACE (sizeof(u32) * 3)
-#define TEGRA_NVAVP_RESET_VECTOR_ADDR \
- (IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE) + 0x200)
+static void __iomem *nvavp_reg_base;
-#define FLOW_CTRL_HALT_COP_EVENTS IO_ADDRESS(TEGRA_FLOW_CTRL_BASE + 0x4)
+#define TEGRA_NVAVP_RESET_VECTOR_ADDR (nvavp_reg_base + 0xe200)
+
+#define FLOW_CTRL_HALT_COP_EVENTS (nvavp_reg_base + 0x6000 + 0x4)
#define FLOW_MODE_STOP (0x2 << 29)
#define FLOW_MODE_NONE 0x0
-#define NVAVP_OS_INBOX IO_ADDRESS(TEGRA_RES_SEMA_BASE + 0x10)
-#define NVAVP_OS_OUTBOX IO_ADDRESS(TEGRA_RES_SEMA_BASE + 0x20)
+#define NVAVP_OS_INBOX (nvavp_reg_base + 0x10)
+#define NVAVP_OS_OUTBOX (nvavp_reg_base + 0x20)
#define NVAVP_INBOX_VALID (1 << 29)
struct mutex open_lock;
int refcount;
int video_initialized;
+ int video_refcnt;
#if defined(CONFIG_TEGRA_NVAVP_AUDIO)
int audio_initialized;
+ int audio_refcnt;
struct work_struct app_notify_work;
+ void (*audio_notify)(void);
#endif
struct work_struct clock_disable_work;
/* ucode information */
struct nvavp_ucode_info ucode_info;
+ /* client to change min cpu freq rate*/
+ struct pm_qos_request min_cpu_freq_req;
+
+ /* client to change number of min online cpus*/
+ struct pm_qos_request min_online_cpus_req;
+
struct nvavp_channel channel_info[NVAVP_NUM_CHANNELS];
bool pending;
bool stay_on;
#if defined(CONFIG_TEGRA_NVAVP_AUDIO)
struct miscdevice audio_misc_dev;
#endif
+ struct task_struct *init_task;
};
struct nvavp_clientctx {
- struct nvmap_client *nvmap;
struct nvavp_pushbuffer_submit_hdr submit_hdr;
struct nvavp_reloc relocs[NVAVP_MAX_RELOCATION_COUNT];
- struct nvmap_handle_ref *gather_mem;
int num_relocs;
struct nvavp_info *nvavp;
int channel_id;
u32 clk_reqs;
+ spinlock_t iova_lock;
+ struct rb_root iova_handles;
};
+static struct nvavp_info *nvavp_info_ctx;
+
+static int nvavp_runtime_get(struct nvavp_info *nvavp)
+{
+ if (nvavp->init_task != current) {
+ mutex_unlock(&nvavp->open_lock);
+ pm_runtime_get_sync(&nvavp->nvhost_dev->dev);
+ mutex_lock(&nvavp->open_lock);
+ }
+ else
+ pm_runtime_get_noresume(&nvavp->nvhost_dev->dev);
+
+ return 0;
+}
+
+static void nvavp_runtime_put(struct nvavp_info *nvavp)
+{
+ pm_runtime_mark_last_busy(&nvavp->nvhost_dev->dev);
+ pm_runtime_put_autosuspend(&nvavp->nvhost_dev->dev);
+}
+
+static struct device_dma_parameters nvavp_dma_parameters = {
+ .max_segment_size = UINT_MAX,
+};
+
+struct nvavp_iova_info {
+ struct rb_node node;
+ atomic_t ref;
+ dma_addr_t addr;
+ struct dma_buf *dmabuf;
+ struct dma_buf_attachment *attachment;
+ struct sg_table *sgt;
+};
+
+/*
+ * Unmap's dmabuf and removes the iova info from rb tree
+ * Call with client iova_lock held.
+ */
+static void nvavp_remove_iova_info_locked(
+ struct nvavp_clientctx *clientctx,
+ struct nvavp_iova_info *b)
+{
+ struct nvavp_info *nvavp = clientctx->nvavp;
+
+ dev_dbg(&nvavp->nvhost_dev->dev,
+ "remove iova addr (0x%lx))\n", (unsigned long)b->addr);
+ dma_buf_unmap_attachment(b->attachment,
+ b->sgt, DMA_BIDIRECTIONAL);
+ dma_buf_detach(b->dmabuf, b->attachment);
+ dma_buf_put(b->dmabuf);
+ rb_erase(&b->node, &clientctx->iova_handles);
+ kfree(b);
+}
+
+/*
+ * Searches the given addr in rb tree and return valid pointer if present
+ * Call with client iova_lock held.
+ */
+static struct nvavp_iova_info *nvavp_search_iova_info_locked(
+ struct nvavp_clientctx *clientctx, struct dma_buf *dmabuf,
+ struct rb_node **curr_parent)
+{
+ struct rb_node *parent = NULL;
+ struct rb_node **p = &clientctx->iova_handles.rb_node;
+
+ while (*p) {
+ struct nvavp_iova_info *b;
+ parent = *p;
+ b = rb_entry(parent, struct nvavp_iova_info, node);
+ if (b->dmabuf == dmabuf)
+ return b;
+ else if (dmabuf > b->dmabuf)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+ *curr_parent = parent;
+ return NULL;
+}
+
+/*
+ * Adds a newly-created iova info handle to the rb tree
+ * Call with client iova_lock held.
+ */
+static void nvavp_add_iova_info_locked(struct nvavp_clientctx *clientctx,
+ struct nvavp_iova_info *h, struct rb_node *parent)
+{
+ struct nvavp_iova_info *b;
+ struct nvavp_info *nvavp = clientctx->nvavp;
+ struct rb_node **p = &clientctx->iova_handles.rb_node;
+
+ dev_dbg(&nvavp->nvhost_dev->dev,
+ "add iova addr (0x%lx))\n", (unsigned long)h->addr);
+
+ if (parent) {
+ b = rb_entry(parent, struct nvavp_iova_info, node);
+ if (h->dmabuf > b->dmabuf)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+ rb_link_node(&h->node, parent, p);
+ rb_insert_color(&h->node, &clientctx->iova_handles);
+}
+
+/*
+ * Maps and adds the iova address if already not present in rb tree
+ * if present, update ref count and return iova return iova address
+ */
+static int nvavp_get_iova_addr(struct nvavp_clientctx *clientctx,
+ struct dma_buf *dmabuf, dma_addr_t *addr)
+{
+ struct nvavp_info *nvavp = clientctx->nvavp;
+ struct nvavp_iova_info *h;
+ struct nvavp_iova_info *b = NULL;
+ struct rb_node *curr_parent = NULL;
+ int ret = 0;
+
+ spin_lock(&clientctx->iova_lock);
+ b = nvavp_search_iova_info_locked(clientctx, dmabuf, &curr_parent);
+ if (b) {
+ /* dmabuf already present in rb tree */
+ atomic_inc(&b->ref);
+ *addr = b->addr;
+ dev_dbg(&nvavp->nvhost_dev->dev,
+ "found iova addr (0x%pa) ref count(%d))\n",
+ &(b->addr), atomic_read(&b->ref));
+ goto out;
+ }
+ spin_unlock(&clientctx->iova_lock);
+
+ /* create new iova_info node */
+ h = kzalloc(sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+
+ h->dmabuf = dmabuf;
+ h->attachment = dma_buf_attach(dmabuf, &nvavp->nvhost_dev->dev);
+ if (IS_ERR(h->attachment)) {
+ dev_err(&nvavp->nvhost_dev->dev, "cannot attach dmabuf\n");
+ ret = PTR_ERR(h->attachment);
+ goto err_put;
+ }
+
+ h->sgt = dma_buf_map_attachment(h->attachment, DMA_BIDIRECTIONAL);
+ if (IS_ERR(h->sgt)) {
+ dev_err(&nvavp->nvhost_dev->dev, "cannot map dmabuf\n");
+ ret = PTR_ERR(h->sgt);
+ goto err_map;
+ }
+
+ h->addr = sg_dma_address(h->sgt->sgl);
+ atomic_set(&h->ref, 1);
+
+ spin_lock(&clientctx->iova_lock);
+ b = nvavp_search_iova_info_locked(clientctx, dmabuf, &curr_parent);
+ if (b) {
+ dev_dbg(&nvavp->nvhost_dev->dev,
+ "found iova addr (0x%pa) ref count(%d))\n",
+ &(b->addr), atomic_read(&b->ref));
+ atomic_inc(&b->ref);
+ *addr = b->addr;
+ spin_unlock(&clientctx->iova_lock);
+ goto err_exist;
+ }
+ nvavp_add_iova_info_locked(clientctx, h, curr_parent);
+ *addr = h->addr;
+
+out:
+ spin_unlock(&clientctx->iova_lock);
+ return 0;
+err_exist:
+ dma_buf_unmap_attachment(h->attachment, h->sgt, DMA_BIDIRECTIONAL);
+err_map:
+ dma_buf_detach(dmabuf, h->attachment);
+err_put:
+ dma_buf_put(dmabuf);
+ kfree(h);
+ return ret;
+}
+
+/*
+ * Release the given iova address if it is last client otherwise dec ref count.
+ */
+static void nvavp_release_iova_addr(struct nvavp_clientctx *clientctx,
+ struct dma_buf *dmabuf, dma_addr_t addr)
+{
+ struct nvavp_info *nvavp = clientctx->nvavp;
+ struct nvavp_iova_info *b = NULL;
+ struct rb_node *curr_parent;
+
+ spin_lock(&clientctx->iova_lock);
+ b = nvavp_search_iova_info_locked(clientctx, dmabuf, &curr_parent);
+ if (!b) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "error iova addr (0x%pa) is not found\n", &addr);
+ goto out;
+ }
+ /* if it is last reference, release iova info */
+ if (atomic_sub_return(1, &b->ref) == 0)
+ nvavp_remove_iova_info_locked(clientctx, b);
+out:
+ spin_unlock(&clientctx->iova_lock);
+}
+
+/*
+ * Release all the iova addresses in rb tree
+ */
+static void nvavp_remove_iova_mapping(struct nvavp_clientctx *clientctx)
+{
+ struct rb_node *p = NULL;
+ struct nvavp_iova_info *b;
+
+ spin_lock(&clientctx->iova_lock);
+ while ((p = rb_first(&clientctx->iova_handles))) {
+ b = rb_entry(p, struct nvavp_iova_info, node);
+ nvavp_remove_iova_info_locked(clientctx, b);
+ }
+ spin_unlock(&clientctx->iova_lock);
+}
#if defined(CONFIG_TEGRA_NVAVP_AUDIO)
static int nvavp_get_audio_init_status(struct nvavp_info *nvavp)
static void nvavp_clks_enable(struct nvavp_info *nvavp)
{
- if (nvavp->clk_enabled++ == 0) {
- pm_runtime_get_sync(&nvavp->nvhost_dev->dev);
+ if (nvavp->clk_enabled == 0) {
+ nvavp_runtime_get(nvavp);
+ nvavp->clk_enabled++;
nvhost_module_busy_ext(nvavp->nvhost_dev);
clk_prepare_enable(nvavp->bsev_clk);
clk_prepare_enable(nvavp->vde_clk);
__func__, nvavp->sclk_rate);
dev_dbg(&nvavp->nvhost_dev->dev, "%s: setting emc_clk to %lu\n",
__func__, nvavp->emc_clk_rate);
+ } else {
+ nvavp->clk_enabled++;
}
}
clk_set_rate(nvavp->sclk, 0);
nvavp_powergate_vde(nvavp);
nvhost_module_idle_ext(nvavp->nvhost_dev);
- pm_runtime_put(&nvavp->nvhost_dev->dev);
+ nvavp_runtime_put(nvavp);
dev_dbg(&nvavp->nvhost_dev->dev, "%s: resetting emc_clk "
"and sclk\n", __func__);
}
nvavp = container_of(work, struct nvavp_info,
app_notify_work);
-
- kobject_uevent(&nvavp->nvhost_dev->dev.kobj, KOBJ_CHANGE);
+ if (nvavp->audio_notify)
+ nvavp->audio_notify();
+ else
+ kobject_uevent(&nvavp->nvhost_dev->dev.kobj, KOBJ_CHANGE);
}
#endif
if (inbox & NVE276_OS_INTERRUPT_AUDIO_IDLE) {
if (audio_enabled) {
audio_enabled = false;
- pm_runtime_put(&nvavp->nvhost_dev->dev);
+ nvavp_runtime_put(nvavp);
}
pr_debug("nvavp_service NVE276_OS_INTERRUPT_AUDIO_IDLE\n");
}
#if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU)
unsigned long stub_code_phys = virt_to_phys(_tegra_avp_boot_stub);
dma_addr_t stub_data_phys;
+#endif
+#if defined(CONFIG_TEGRA_NVAVP_AUDIO)
+ if (!(nvavp_check_idle(nvavp, NVAVP_AUDIO_CHANNEL)))
+ return 0;
+#endif
+
+#if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU)
_tegra_avp_boot_stub_data.map_phys_addr = avp->kernel_phys;
_tegra_avp_boot_stub_data.jump_addr = reset_addr;
wmb();
static int nvavp_pushbuffer_init(struct nvavp_info *nvavp)
{
int ret, channel_id;
+ u32 val;
for (channel_id = 0; channel_id < NVAVP_NUM_CHANNELS; channel_id++) {
ret = nvavp_pushbuffer_alloc(nvavp, channel_id);
nvavp_set_channel_control_area(nvavp, channel_id);
if (IS_VIDEO_CHANNEL_ID(channel_id)) {
nvavp->syncpt_id = NVSYNCPT_AVP_0;
- nvavp->syncpt_value = nvhost_syncpt_read_ext(
- nvavp->nvhost_dev, nvavp->syncpt_id);
+ if (!nvhost_syncpt_read_ext_check(nvavp->nvhost_dev,
+ nvavp->syncpt_id, &val))
+ nvavp->syncpt_value = val;
}
}
u32 index, value = -1;
int ret = 0;
+ mutex_lock(&nvavp->open_lock);
+ nvavp_runtime_get(nvavp);
+ mutex_unlock(&nvavp->open_lock);
channel_info = nvavp_get_channel_info(nvavp, channel_id);
control = channel_info->os_control;
if (IS_AUDIO_CHANNEL_ID(channel_id)) {
pr_debug("Wake up Audio Channel\n");
if (!audio_enabled) {
- pm_runtime_get_sync(&nvavp->nvhost_dev->dev);
+ mutex_lock(&nvavp->open_lock);
+ nvavp_runtime_get(nvavp);
+ mutex_unlock(&nvavp->open_lock);
audio_enabled = true;
}
ret = nvavp_outbox_write(0xA0000002);
err_exit:
mutex_unlock(&channel_info->pushbuffer_lock);
+ nvavp_runtime_put(nvavp);
return 0;
}
}
}
- dev_info(&nvavp->nvhost_dev->dev,
+ dev_dbg(&nvavp->nvhost_dev->dev,
"read ucode firmware from '%s' (%d bytes)\n",
fw_ucode_file, nvavp_ucode_fw->size);
ptr = (void *)nvavp_ucode_fw->data;
if (strncmp((const char *)ptr, "NVAVPAPP", 8)) {
- dev_info(&nvavp->nvhost_dev->dev,
+ dev_dbg(&nvavp->nvhost_dev->dev,
"ucode hdr string mismatch\n");
ret = -EINVAL;
goto err_req_ucode;
goto err_req_fw;
}
- dev_info(&nvavp->nvhost_dev->dev,
+ dev_dbg(&nvavp->nvhost_dev->dev,
"read firmware from '%s' (%d bytes)\n",
fw_os_file, nvavp_os_fw->size);
ptr = (void *)nvavp_os_fw->data;
if (strncmp((const char *)ptr, "NVAVP-OS", 8)) {
- dev_info(&nvavp->nvhost_dev->dev,
+ dev_dbg(&nvavp->nvhost_dev->dev,
"os hdr string mismatch\n");
ret = -EINVAL;
goto err_os_bin;
memcpy(os_info->os_bin, ptr, os_info->size);
memset(os_info->data + os_info->size, 0, SZ_1M - os_info->size);
- dev_info(&nvavp->nvhost_dev->dev,
+ dev_dbg(&nvavp->nvhost_dev->dev,
"entry=%08x control=%08x debug=%08x size=%d\n",
os_info->entry_offset, os_info->control_offset,
os_info->debug_offset, os_info->size);
memcpy(os_info->data, os_info->os_bin, os_info->size);
os_info->reset_addr = os_info->phys + os_info->entry_offset;
- dev_info(&nvavp->nvhost_dev->dev,
+ dev_dbg(&nvavp->nvhost_dev->dev,
"AVP os at vaddr=%p paddr=%llx reset_addr=%llx\n",
os_info->data, (u64)(os_info->phys), (u64)os_info->reset_addr);
return 0;
pr_debug("video_initialized == audio_initialized (%d)\n",
nvavp->video_initialized);
#if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU) /* Tegra2 with AVP MMU */
- /* paddr is any address returned from nvmap_pin */
+ /* paddr is phys address */
/* vaddr is AVP_KERNEL_VIRT_BASE */
- dev_info(&nvavp->nvhost_dev->dev,
+ dev_dbg(&nvavp->nvhost_dev->dev,
"using AVP MMU to relocate AVP os\n");
sprintf(fw_os_file, "nvavp_os.bin");
nvavp->os_info.reset_addr = AVP_KERNEL_VIRT_BASE;
#elif defined(CONFIG_TEGRA_AVP_KERNEL_ON_SMMU) /* Tegra3 with SMMU */
/* paddr is any address behind SMMU */
/* vaddr is TEGRA_SMMU_BASE */
- dev_info(&nvavp->nvhost_dev->dev,
+ dev_dbg(&nvavp->nvhost_dev->dev,
"using SMMU at %lx to load AVP kernel\n",
(unsigned long)nvavp->os_info.phys);
BUG_ON(nvavp->os_info.phys != 0xeff00000
(unsigned long)nvavp->os_info.phys);
nvavp->os_info.reset_addr = nvavp->os_info.phys;
#else /* nvmem= carveout */
- /* paddr is found in nvmem= carveout */
- /* vaddr is same as paddr */
- /* Find nvmem carveout */
- if (!pfn_valid(__phys_to_pfn(0x8e000000))) {
- nvavp->os_info.phys = 0x8e000000;
- } else if (!pfn_valid(__phys_to_pfn(0xf7e00000))) {
- nvavp->os_info.phys = 0xf7e00000;
- } else if (!pfn_valid(__phys_to_pfn(0x9e000000))) {
- nvavp->os_info.phys = 0x9e000000;
- } else if (!pfn_valid(__phys_to_pfn(0xbe000000))) {
- nvavp->os_info.phys = 0xbe000000;
- } else {
- dev_err(&nvavp->nvhost_dev->dev,
- "cannot find nvmem= carveout to load AVP os\n");
- dev_err(&nvavp->nvhost_dev->dev,
- "check kernel command line "
- "to see if nvmem= is defined\n");
- BUG();
- }
- dev_info(&nvavp->nvhost_dev->dev,
+ dev_dbg(&nvavp->nvhost_dev->dev,
"using nvmem= carveout at %llx to load AVP os\n",
(u64)nvavp->os_info.phys);
sprintf(fw_os_file, "nvavp_os_%08llx.bin", (u64)nvavp->os_info.phys);
static int nvavp_init(struct nvavp_info *nvavp, int channel_id)
{
int ret = 0;
+ int video_initialized = 0, audio_initialized = 0;
+
+ nvavp->init_task = current;
ret = nvavp_os_init(nvavp);
if (ret) {
"unable to load os firmware and allocate buffers\n");
}
- if (IS_VIDEO_CHANNEL_ID(channel_id) &&
- (!nvavp_get_video_init_status(nvavp)) ) {
+ video_initialized = nvavp_get_video_init_status(nvavp);
+#if defined(CONFIG_TEGRA_NVAVP_AUDIO)
+ audio_initialized = nvavp_get_audio_init_status(nvavp);
+#endif
+
+ if (IS_VIDEO_CHANNEL_ID(channel_id) && (!video_initialized)) {
pr_debug("nvavp_init : channel_ID (%d)\n", channel_id);
ret = nvavp_load_ucode(nvavp);
if (ret) {
nvavp_set_video_init_status(nvavp, 1);
}
#if defined(CONFIG_TEGRA_NVAVP_AUDIO)
- if (IS_AUDIO_CHANNEL_ID(channel_id) &&
- (!nvavp_get_audio_init_status(nvavp))) {
+ if (IS_AUDIO_CHANNEL_ID(channel_id) && (!audio_initialized)) {
pr_debug("nvavp_init : channel_ID (%d)\n", channel_id);
nvavp_reset_avp(nvavp, nvavp->os_info.reset_addr);
nvavp_set_audio_init_status(nvavp, 1);
#endif
err_exit:
+ nvavp->init_task = NULL;
return ret;
}
-#define TIMER_PTV 0
#define TIMER_EN (1 << 31)
#define TIMER_PERIODIC (1 << 30)
#define TIMER_PCR 0x4
if (!video_initialized && !audio_initialized)
return;
+ nvavp->init_task = current;
+
if (video_initialized) {
pr_debug("nvavp_uninit nvavp->video_initialized\n");
cancel_work_sync(&nvavp->clock_disable_work);
#endif
/* Video and Audio both becomes uninitialized */
- if (video_initialized == audio_initialized) {
- pr_debug("nvavp_uninit both channels unitialized\n");
+ if (!video_initialized && !audio_initialized) {
+ pr_debug("nvavp_uninit both channels uninitialized\n");
clk_disable_unprepare(nvavp->sclk);
clk_disable_unprepare(nvavp->emc_clk);
* WAR: turn off TMR2 for fix LP1 wake up by TMR2.
* turn off the periodic interrupt and the timer temporarily
*/
- reg = readl(IO_ADDRESS(TEGRA_TMR2_BASE + TIMER_PTV));
+ reg = timer_readl(TIMER2_OFFSET + TIMER_PTV);
reg &= ~(TIMER_EN | TIMER_PERIODIC);
- writel(reg, IO_ADDRESS(TEGRA_TMR2_BASE + TIMER_PTV));
+ timer_writel(reg, TIMER2_OFFSET + TIMER_PTV);
/* write a 1 to the intr_clr field to clear the interrupt */
reg = TIMER_PCR_INTR;
- writel(reg, IO_ADDRESS(TEGRA_TMR2_BASE + TIMER_PCR));
+ timer_writel(reg, TIMER2_OFFSET + TIMER_PCR);
+ nvavp->init_task = NULL;
+}
+
+static int nvcpu_set_clock(struct nvavp_info *nvavp,
+ struct nvavp_clock_args config,
+ unsigned long arg)
+{
+ dev_dbg(&nvavp->nvhost_dev->dev, "%s: update cpu freq to clk_rate=%u\n",
+ __func__, config.rate);
+
+ if (config.rate > 0)
+ pm_qos_update_request(&nvavp->min_cpu_freq_req, config.rate);
+ else
+ pm_qos_update_request(&nvavp->min_cpu_freq_req,
+ PM_QOS_CPU_FREQ_MIN_DEFAULT_VALUE);
+
+ return 0;
+}
+
+static int nvavp_map_iova(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct nvavp_clientctx *clientctx = filp->private_data;
+ struct nvavp_info *nvavp = clientctx->nvavp;
+ struct nvavp_map_args map_arg;
+ struct dma_buf *dmabuf;
+ dma_addr_t addr = 0;
+ int ret = 0;
+
+ if (copy_from_user(&map_arg, (void __user *)arg,
+ sizeof(struct nvavp_map_args))) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "failed to copy memory handle\n");
+ return -EFAULT;
+ }
+ if (!map_arg.fd) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "invalid memory handle %08x\n", map_arg.fd);
+ return -EINVAL;
+ }
+
+ dmabuf = dma_buf_get(map_arg.fd);
+ if (IS_ERR(dmabuf)) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "invalid buffer handle %08x\n", map_arg.fd);
+ return PTR_ERR(dmabuf);
+ }
+
+ ret = nvavp_get_iova_addr(clientctx, dmabuf, &addr);
+ if (ret)
+ goto out;
+
+ map_arg.addr = (__u32)addr;
+
+ if (copy_to_user((void __user *)arg, &map_arg,
+ sizeof(struct nvavp_map_args))) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "failed to copy phys addr\n");
+ ret = -EFAULT;
+ }
+
+out:
+ return ret;
+}
+
+static int nvavp_unmap_iova(struct file *filp, unsigned long arg)
+{
+ struct nvavp_clientctx *clientctx = filp->private_data;
+ struct nvavp_info *nvavp = clientctx->nvavp;
+ struct nvavp_map_args map_arg;
+ struct dma_buf *dmabuf;
+
+ if (copy_from_user(&map_arg, (void __user *)arg,
+ sizeof(struct nvavp_map_args))) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "failed to copy memory handle\n");
+ return -EFAULT;
+ }
+
+ dmabuf = dma_buf_get(map_arg.fd);
+ if (IS_ERR(dmabuf)) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "invalid buffer handle %08x\n", map_arg.fd);
+ return PTR_ERR(dmabuf);
+ }
+
+ nvavp_release_iova_addr(clientctx, dmabuf, (dma_addr_t)map_arg.addr);
+ dma_buf_put(dmabuf);
+
+ return 0;
}
static int nvavp_set_clock_ioctl(struct file *filp, unsigned int cmd,
nvavp->sclk_rate = config.rate;
else if (config.id == NVAVP_MODULE_ID_EMC)
nvavp->emc_clk_rate = config.rate;
+ else if (config.id == NVAVP_MODULE_ID_CPU)
+ return nvcpu_set_clock(nvavp, config, arg);
c = nvavp_clk_get(nvavp, config.id);
if (IS_ERR_OR_NULL(c))
return -EFAULT;
}
-static int nvavp_set_nvmapfd_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- struct nvavp_clientctx *clientctx = filp->private_data;
- struct nvavp_set_nvmap_fd_args buf;
- struct nvmap_client *new_client;
- int fd;
-
- if (_IOC_DIR(cmd) & _IOC_WRITE) {
- if (copy_from_user(&buf, (void __user *)arg, _IOC_SIZE(cmd)))
- return -EFAULT;
- }
-
- fd = buf.fd;
- new_client = nvmap_client_get_file(fd);
- if (IS_ERR(new_client))
- return PTR_ERR(new_client);
-
- clientctx->nvmap = new_client;
- return 0;
-}
-
static int nvavp_pushbuffer_submit_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct nvavp_pushbuffer_submit_hdr hdr;
u32 *cmdbuf_data;
struct dma_buf *cmdbuf_dmabuf;
- struct dma_buf_attachment *dmabuf_attach;
- struct sg_table *sgt;
+ struct dma_buf_attachment *cmdbuf_attach;
+ struct sg_table *cmdbuf_sgt;
int ret = 0, i;
phys_addr_t phys_addr;
unsigned long virt_addr;
if (!hdr.cmdbuf.mem)
return 0;
+ if (hdr.num_relocs > NVAVP_MAX_RELOCATION_COUNT) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "invalid num_relocs %d\n", hdr.num_relocs);
+ return -EINVAL;
+ }
+
if (copy_from_user(clientctx->relocs, (void __user *)hdr.relocs,
sizeof(struct nvavp_reloc) * hdr.num_relocs)) {
return -EFAULT;
}
- cmdbuf_dmabuf = nvmap_dmabuf_export(clientctx->nvmap, hdr.cmdbuf.mem);
- if (!cmdbuf_dmabuf) {
+ cmdbuf_dmabuf = dma_buf_get(hdr.cmdbuf.mem);
+ if (IS_ERR(cmdbuf_dmabuf)) {
dev_err(&nvavp->nvhost_dev->dev,
"invalid cmd buffer handle %08x\n", hdr.cmdbuf.mem);
- return -EPERM;
+ return PTR_ERR(cmdbuf_dmabuf);
+ }
+
+ if ((hdr.cmdbuf.offset & 3)
+ || (hdr.cmdbuf.offset >= cmdbuf_dmabuf->size)) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "invalid cmdbuf offset %d\n", hdr.cmdbuf.offset);
+ ret = -EINVAL;
+ goto err_dmabuf_attach;
}
- dmabuf_attach = dma_buf_attach(cmdbuf_dmabuf, &nvavp->nvhost_dev->dev);
- if (!dmabuf_attach) {
+ cmdbuf_attach = dma_buf_attach(cmdbuf_dmabuf, &nvavp->nvhost_dev->dev);
+ if (IS_ERR(cmdbuf_attach)) {
dev_err(&nvavp->nvhost_dev->dev, "cannot attach cmdbuf_dmabuf\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(cmdbuf_attach);
goto err_dmabuf_attach;
}
- sgt = dma_buf_map_attachment(dmabuf_attach, DMA_BIDIRECTIONAL);
- if (!sgt) {
+ cmdbuf_sgt = dma_buf_map_attachment(cmdbuf_attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(cmdbuf_sgt)) {
dev_err(&nvavp->nvhost_dev->dev, "cannot map cmdbuf_dmabuf\n");
+ ret = PTR_ERR(cmdbuf_sgt);
goto err_dmabuf_map;
}
- phys_addr = sg_dma_address(sgt->sgl);
+ phys_addr = sg_dma_address(cmdbuf_sgt->sgl);
virt_addr = (unsigned long)dma_buf_vmap(cmdbuf_dmabuf);
if (!virt_addr) {
}
cmdbuf_data = (u32 *)(virt_addr + hdr.cmdbuf.offset);
-
for (i = 0; i < hdr.num_relocs; i++) {
+ struct dma_buf *target_dmabuf;
+ struct dma_buf_attachment *target_attach;
+ struct sg_table *target_sgt;
u32 *reloc_addr, target_phys_addr;
if (clientctx->relocs[i].cmdbuf_mem != hdr.cmdbuf.mem) {
goto err_reloc_info;
}
+ if ((clientctx->relocs[i].cmdbuf_offset & 3)
+ || (clientctx->relocs[i].cmdbuf_offset >=
+ cmdbuf_dmabuf->size)
+ || (clientctx->relocs[i].cmdbuf_offset >=
+ (cmdbuf_dmabuf->size - hdr.cmdbuf.offset))) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "invalid reloc offset in cmdbuf %d\n",
+ clientctx->relocs[i].cmdbuf_offset);
+ ret = -EINVAL;
+ goto err_reloc_info;
+ }
+
reloc_addr = cmdbuf_data +
(clientctx->relocs[i].cmdbuf_offset >> 2);
- target_phys_addr = nvmap_handle_address_user_id(
- clientctx->nvmap, clientctx->relocs[i].target);
+ target_dmabuf = dma_buf_get(clientctx->relocs[i].target);
+ if (IS_ERR(target_dmabuf)) {
+ ret = PTR_ERR(target_dmabuf);
+ goto target_dmabuf_fail;
+ }
+
+ if ((clientctx->relocs[i].target_offset & 3)
+ || (clientctx->relocs[i].target_offset >=
+ target_dmabuf->size)) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "invalid target offset in reloc %d\n",
+ clientctx->relocs[i].target_offset);
+ ret = -EINVAL;
+ goto target_attach_fail;
+ }
+
+ target_attach = dma_buf_attach(target_dmabuf,
+ &nvavp->nvhost_dev->dev);
+ if (IS_ERR(target_attach)) {
+ ret = PTR_ERR(target_attach);
+ goto target_attach_fail;
+ }
+ target_sgt = dma_buf_map_attachment(target_attach,
+ DMA_BIDIRECTIONAL);
+ if (IS_ERR(target_sgt)) {
+ ret = PTR_ERR(target_sgt);
+ goto target_map_fail;
+ }
+
+ target_phys_addr = sg_dma_address(target_sgt->sgl);
+ if (!target_phys_addr)
+ target_phys_addr = sg_phys(target_sgt->sgl);
target_phys_addr += clientctx->relocs[i].target_offset;
writel(target_phys_addr, reloc_addr);
+ dma_buf_unmap_attachment(target_attach, target_sgt,
+ DMA_BIDIRECTIONAL);
+target_map_fail:
+ dma_buf_detach(target_dmabuf, target_attach);
+target_attach_fail:
+ dma_buf_put(target_dmabuf);
+target_dmabuf_fail:
+ if (ret != 0)
+ goto err_reloc_info;
}
if (hdr.syncpt) {
}
err_reloc_info:
-err_dmabuf_vmap:
dma_buf_vunmap(cmdbuf_dmabuf, (void *)virt_addr);
+err_dmabuf_vmap:
+ dma_buf_unmap_attachment(cmdbuf_attach, cmdbuf_sgt, DMA_BIDIRECTIONAL);
err_dmabuf_map:
- dma_buf_unmap_attachment(dmabuf_attach, sgt, DMA_BIDIRECTIONAL);
+ dma_buf_detach(cmdbuf_dmabuf, cmdbuf_attach);
err_dmabuf_attach:
dma_buf_put(cmdbuf_dmabuf);
return ret;
}
+#ifdef CONFIG_COMPAT
+static int nvavp_pushbuffer_submit_compat_ioctl(struct file *filp,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ struct nvavp_clientctx *clientctx = filp->private_data;
+ struct nvavp_info *nvavp = clientctx->nvavp;
+ struct nvavp_pushbuffer_submit_hdr_v32 hdr_v32;
+ struct nvavp_pushbuffer_submit_hdr __user *user_hdr;
+ int ret = 0;
+
+ if (_IOC_DIR(cmd) & _IOC_WRITE) {
+ if (copy_from_user(&hdr_v32, (void __user *)arg,
+ sizeof(struct nvavp_pushbuffer_submit_hdr_v32)))
+ return -EFAULT;
+ }
+
+ if (!hdr_v32.cmdbuf.mem)
+ return 0;
+
+ user_hdr = compat_alloc_user_space(sizeof(*user_hdr));
+ if (!access_ok(VERIFY_WRITE, user_hdr, sizeof(*user_hdr)))
+ return -EFAULT;
+
+ if (__put_user(hdr_v32.cmdbuf.mem, &user_hdr->cmdbuf.mem)
+ || __put_user(hdr_v32.cmdbuf.offset, &user_hdr->cmdbuf.offset)
+ || __put_user(hdr_v32.cmdbuf.words, &user_hdr->cmdbuf.words)
+ || __put_user((void __user *)(unsigned long)hdr_v32.relocs,
+ &user_hdr->relocs)
+ || __put_user(hdr_v32.num_relocs, &user_hdr->num_relocs)
+ || __put_user((void __user *)(unsigned long)hdr_v32.syncpt,
+ &user_hdr->syncpt)
+ || __put_user(hdr_v32.flags, &user_hdr->flags))
+ return -EFAULT;
+
+ ret = nvavp_pushbuffer_submit_ioctl(filp, cmd, (unsigned long)user_hdr);
+ if (ret)
+ return ret;
+
+ if (__get_user(hdr_v32.syncpt, &user_hdr->syncpt))
+ return -EFAULT;
+
+ if (copy_to_user((void __user *)arg, &hdr_v32,
+ sizeof(struct nvavp_pushbuffer_submit_hdr_v32))) {
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+#endif
+
+#if defined(CONFIG_TEGRA_NVAVP_AUDIO)
+int nvavp_pushbuffer_submit_audio(nvavp_clientctx_t client, int cmd_buf_phys,
+ int cmd_buf_words)
+{
+ struct nvavp_clientctx *clientctx = client;
+ struct nvavp_info *nvavp = clientctx->nvavp;
+
+ return nvavp_pushbuffer_update(nvavp,
+ cmd_buf_phys,
+ cmd_buf_words, NULL,
+ NVAVP_UCODE_EXT,
+ NVAVP_AUDIO_CHANNEL);
+}
+EXPORT_SYMBOL_GPL(nvavp_pushbuffer_submit_audio);
+
+void nvavp_register_audio_cb(nvavp_clientctx_t client, void (*cb)(void))
+{
+ struct nvavp_clientctx *clientctx = client;
+ struct nvavp_info *nvavp = clientctx->nvavp;
+
+ nvavp->audio_notify = cb;
+}
+EXPORT_SYMBOL_GPL(nvavp_register_audio_cb);
+#endif
+
static int nvavp_wake_avp_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
nvavp_clks_disable(nvavp);
}
mutex_unlock(&nvavp->open_lock);
+ if (!nvavp->stay_on)
+ schedule_work(&nvavp->clock_disable_work);
}
return 0;
}
#if defined(CONFIG_TEGRA_NVAVP_AUDIO)
-static int nvavp_enable_audio_clocks(struct file *filp, unsigned int cmd,
- unsigned long arg)
+int nvavp_enable_audio_clocks(nvavp_clientctx_t client, u32 clk_id)
{
- struct nvavp_clientctx *clientctx = filp->private_data;
+ struct nvavp_clientctx *clientctx = client;
struct nvavp_info *nvavp = clientctx->nvavp;
- struct nvavp_clock_args config;
-
- if (copy_from_user(&config, (void __user *)arg, sizeof(struct nvavp_clock_args)))
- return -EFAULT;
- dev_dbg(&nvavp->nvhost_dev->dev, "%s: clk_id=%d\n",
- __func__, config.id);
+ dev_dbg(&nvavp->nvhost_dev->dev, "%s: clk_id = %d\n",
+ __func__, clk_id);
- if (config.id == NVAVP_MODULE_ID_VCP)
+ mutex_lock(&nvavp->open_lock);
+ if (clk_id == NVAVP_MODULE_ID_VCP)
clk_prepare_enable(nvavp->vcp_clk);
- else if (config.id == NVAVP_MODULE_ID_BSEA)
+ else if (clk_id == NVAVP_MODULE_ID_BSEA)
clk_prepare_enable(nvavp->bsea_clk);
-
+ mutex_unlock(&nvavp->open_lock);
return 0;
}
+EXPORT_SYMBOL_GPL(nvavp_enable_audio_clocks);
-static int nvavp_disable_audio_clocks(struct file *filp, unsigned int cmd,
- unsigned long arg)
+int nvavp_disable_audio_clocks(nvavp_clientctx_t client, u32 clk_id)
{
- struct nvavp_clientctx *clientctx = filp->private_data;
+ struct nvavp_clientctx *clientctx = client;
struct nvavp_info *nvavp = clientctx->nvavp;
- struct nvavp_clock_args config;
- if (copy_from_user(&config, (void __user *)arg, sizeof(struct nvavp_clock_args)))
- return -EFAULT;
+ dev_dbg(&nvavp->nvhost_dev->dev, "%s: clk_id = %d\n",
+ __func__, clk_id);
- dev_dbg(&nvavp->nvhost_dev->dev, "%s: clk_id=%d\n",
- __func__, config.id);
-
- if (config.id == NVAVP_MODULE_ID_VCP)
+ mutex_lock(&nvavp->open_lock);
+ if (clk_id == NVAVP_MODULE_ID_VCP)
clk_disable_unprepare(nvavp->vcp_clk);
- else if (config.id == NVAVP_MODULE_ID_BSEA)
+ else if (clk_id == NVAVP_MODULE_ID_BSEA)
clk_disable_unprepare(nvavp->bsea_clk);
-
+ mutex_unlock(&nvavp->open_lock);
return 0;
}
+EXPORT_SYMBOL_GPL(nvavp_disable_audio_clocks);
#else
-static int nvavp_enable_audio_clocks(struct file *filp, unsigned int cmd,
- unsigned long arg)
+int nvavp_enable_audio_clocks(nvavp_clientctx_t client, u32 clk_id)
{
return 0;
}
+EXPORT_SYMBOL_GPL(nvavp_enable_audio_clocks);
-static int nvavp_disable_audio_clocks(struct file *filp, unsigned int cmd,
- unsigned long arg)
+int nvavp_disable_audio_clocks(nvavp_clientctx_t client, u32_clk_id)
{
return 0;
}
+EXPORT_SYMBOL_GPL(nvavp_disable_audio_clocks);
#endif
-static int tegra_nvavp_open(struct inode *inode, struct file *filp, int channel_id)
+static int nvavp_set_min_online_cpus_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct nvavp_clientctx *clientctx = filp->private_data;
+ struct nvavp_info *nvavp = clientctx->nvavp;
+ struct nvavp_num_cpus_args config;
+
+ if (copy_from_user(&config, (void __user *)arg,
+ sizeof(struct nvavp_num_cpus_args)))
+ return -EFAULT;
+
+ dev_dbg(&nvavp->nvhost_dev->dev, "%s: min_online_cpus=%d\n",
+ __func__, config.min_online_cpus);
+
+ if (config.min_online_cpus > 0)
+ pm_qos_update_request(&nvavp->min_online_cpus_req,
+ config.min_online_cpus);
+ else
+ pm_qos_update_request(&nvavp->min_online_cpus_req,
+ PM_QOS_CPU_FREQ_MIN_DEFAULT_VALUE);
+
+ return 0;
+}
+
+static int tegra_nvavp_open(struct nvavp_info *nvavp,
+ struct nvavp_clientctx **client, int channel_id)
{
- struct miscdevice *miscdev = filp->private_data;
- struct nvavp_info *nvavp = dev_get_drvdata(miscdev->parent);
- int ret = 0;
struct nvavp_clientctx *clientctx;
+ int ret = 0;
dev_dbg(&nvavp->nvhost_dev->dev, "%s: ++\n", __func__);
- nonseekable_open(inode, filp);
-
clientctx = kzalloc(sizeof(*clientctx), GFP_KERNEL);
if (!clientctx)
return -ENOMEM;
- mutex_lock(&nvavp->open_lock);
-
pr_debug("tegra_nvavp_open channel_id (%d)\n", channel_id);
clientctx->channel_id = channel_id;
ret = nvavp_init(nvavp, channel_id);
- if (!ret)
+ if (!ret) {
nvavp->refcount++;
+ if (IS_VIDEO_CHANNEL_ID(channel_id))
+ nvavp->video_refcnt++;
+ if (IS_AUDIO_CHANNEL_ID(channel_id))
+ nvavp->audio_refcnt++;
+ }
clientctx->nvavp = nvavp;
-
- filp->private_data = clientctx;
-
- mutex_unlock(&nvavp->open_lock);
+ clientctx->iova_handles = RB_ROOT;
+ *client = clientctx;
return ret;
}
static int tegra_nvavp_video_open(struct inode *inode, struct file *filp)
{
+ struct miscdevice *miscdev = filp->private_data;
+ struct nvavp_info *nvavp = dev_get_drvdata(miscdev->parent);
+ struct nvavp_clientctx *clientctx;
+ int ret = 0;
+
pr_debug("tegra_nvavp_video_open NVAVP_VIDEO_CHANNEL\n");
- return tegra_nvavp_open(inode, filp, NVAVP_VIDEO_CHANNEL);
+
+ nonseekable_open(inode, filp);
+
+ mutex_lock(&nvavp->open_lock);
+ ret = tegra_nvavp_open(nvavp, &clientctx, NVAVP_VIDEO_CHANNEL);
+ filp->private_data = clientctx;
+ mutex_unlock(&nvavp->open_lock);
+
+ return ret;
}
#if defined(CONFIG_TEGRA_NVAVP_AUDIO)
static int tegra_nvavp_audio_open(struct inode *inode, struct file *filp)
{
+ struct miscdevice *miscdev = filp->private_data;
+ struct nvavp_info *nvavp = dev_get_drvdata(miscdev->parent);
+ struct nvavp_clientctx *clientctx;
+ int ret = 0;
+
pr_debug("tegra_nvavp_audio_open NVAVP_AUDIO_CHANNEL\n");
- return tegra_nvavp_open(inode, filp, NVAVP_AUDIO_CHANNEL);
+
+ nonseekable_open(inode, filp);
+
+ mutex_lock(&nvavp->open_lock);
+ ret = tegra_nvavp_open(nvavp, &clientctx, NVAVP_AUDIO_CHANNEL);
+ filp->private_data = clientctx;
+ mutex_unlock(&nvavp->open_lock);
+
+ return ret;
}
+
+int tegra_nvavp_audio_client_open(nvavp_clientctx_t *clientctx)
+{
+ struct nvavp_info *nvavp = nvavp_info_ctx;
+ int ret = 0;
+
+ mutex_lock(&nvavp->open_lock);
+ ret = tegra_nvavp_open(nvavp, (struct nvavp_clientctx **)clientctx,
+ NVAVP_AUDIO_CHANNEL);
+ mutex_unlock(&nvavp->open_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tegra_nvavp_audio_client_open);
#endif
-static int tegra_nvavp_release(struct inode *inode, struct file *filp)
+static int tegra_nvavp_release(struct nvavp_clientctx *clientctx,
+ int channel_id)
{
- struct nvavp_clientctx *clientctx = filp->private_data;
struct nvavp_info *nvavp = clientctx->nvavp;
int ret = 0;
dev_dbg(&nvavp->nvhost_dev->dev, "%s: ++\n", __func__);
- filp->private_data = NULL;
-
- mutex_lock(&nvavp->open_lock);
-
if (!nvavp->refcount) {
dev_err(&nvavp->nvhost_dev->dev,
"releasing while in invalid state\n");
if (nvavp->refcount > 0)
nvavp->refcount--;
- if (!nvavp->refcount)
+ if (!nvavp->refcount) {
+ mutex_unlock(&nvavp->open_lock);
nvavp_uninit(nvavp);
+ mutex_lock(&nvavp->open_lock);
+ }
+
+ if (IS_VIDEO_CHANNEL_ID(channel_id))
+ nvavp->video_refcnt--;
+ if (IS_AUDIO_CHANNEL_ID(channel_id))
+ nvavp->audio_refcnt--;
out:
- nvmap_client_put(clientctx->nvmap);
- mutex_unlock(&nvavp->open_lock);
+ nvavp_remove_iova_mapping(clientctx);
kfree(clientctx);
return ret;
}
+static int tegra_nvavp_video_release(struct inode *inode, struct file *filp)
+{
+ struct nvavp_clientctx *clientctx = filp->private_data;
+ struct nvavp_info *nvavp = clientctx->nvavp;
+ int ret = 0;
+
+ mutex_lock(&nvavp->open_lock);
+ filp->private_data = NULL;
+ ret = tegra_nvavp_release(clientctx, NVAVP_VIDEO_CHANNEL);
+ mutex_unlock(&nvavp->open_lock);
+
+ return ret;
+}
+
+#if defined(CONFIG_TEGRA_NVAVP_AUDIO)
+static int tegra_nvavp_audio_release(struct inode *inode,
+ struct file *filp)
+{
+ struct nvavp_clientctx *clientctx = filp->private_data;
+ struct nvavp_info *nvavp = clientctx->nvavp;
+ int ret = 0;
+
+ mutex_lock(&nvavp->open_lock);
+ filp->private_data = NULL;
+ ret = tegra_nvavp_release(clientctx, NVAVP_AUDIO_CHANNEL);
+ mutex_unlock(&nvavp->open_lock);
+
+ return ret;
+}
+
+int tegra_nvavp_audio_client_release(nvavp_clientctx_t client)
+{
+ struct nvavp_clientctx *clientctx = client;
+ struct nvavp_info *nvavp = clientctx->nvavp;
+ int ret = 0;
+
+ mutex_lock(&nvavp->open_lock);
+ ret = tegra_nvavp_release(clientctx, NVAVP_AUDIO_CHANNEL);
+ mutex_unlock(&nvavp->open_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tegra_nvavp_audio_client_release);
+#endif
+
+
+static int
+nvavp_channel_open(struct file *filp, struct nvavp_channel_open_args *arg)
+{
+ int fd, err = 0;
+ struct file *file;
+ char *name;
+ struct nvavp_clientctx *clientctx = filp->private_data;
+ struct nvavp_info *nvavp = clientctx->nvavp;
+
+ err = get_unused_fd_flags(O_RDWR);
+ if (err < 0)
+ return err;
+
+ fd = err;
+
+ name = kasprintf(GFP_KERNEL, "nvavp-channel-fd%d", fd);
+ if (!name) {
+ err = -ENOMEM;
+ put_unused_fd(fd);
+ return err;
+ }
+
+ file = anon_inode_getfile(name, filp->f_op, &(nvavp->video_misc_dev),
+ O_RDWR);
+ kfree(name);
+ if (IS_ERR(file)) {
+ err = PTR_ERR(file);
+ put_unused_fd(fd);
+ return err;
+ }
+
+ fd_install(fd, file);
+
+ nonseekable_open(file->f_inode, filp);
+ mutex_lock(&nvavp->open_lock);
+ err = tegra_nvavp_open(nvavp,
+ (struct nvavp_clientctx **)&file->private_data,
+ clientctx->channel_id);
+ if (err) {
+ put_unused_fd(fd);
+ fput(file);
+ mutex_unlock(&nvavp->open_lock);
+ return err;
+ }
+ mutex_unlock(&nvavp->open_lock);
+
+ arg->channel_fd = fd;
+ return err;
+}
+
static long tegra_nvavp_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
+ struct nvavp_clientctx *clientctx = filp->private_data;
+ struct nvavp_clock_args config;
int ret = 0;
+ u8 buf[NVAVP_IOCTL_CHANNEL_MAX_ARG_SIZE];
if (_IOC_TYPE(cmd) != NVAVP_IOCTL_MAGIC ||
_IOC_NR(cmd) < NVAVP_IOCTL_MIN_NR ||
switch (cmd) {
case NVAVP_IOCTL_SET_NVMAP_FD:
- ret = nvavp_set_nvmapfd_ioctl(filp, cmd, arg);
break;
case NVAVP_IOCTL_GET_SYNCPOINT_ID:
ret = nvavp_get_syncpointid_ioctl(filp, cmd, arg);
ret = nvavp_force_clock_stay_on_ioctl(filp, cmd, arg);
break;
case NVAVP_IOCTL_ENABLE_AUDIO_CLOCKS:
- ret = nvavp_enable_audio_clocks(filp, cmd, arg);
+ if (copy_from_user(&config, (void __user *)arg,
+ sizeof(struct nvavp_clock_args))) {
+ ret = -EFAULT;
+ break;
+ }
+ ret = nvavp_enable_audio_clocks(clientctx, config.id);
break;
case NVAVP_IOCTL_DISABLE_AUDIO_CLOCKS:
- ret = nvavp_disable_audio_clocks(filp, cmd, arg);
+ if (copy_from_user(&config, (void __user *)arg,
+ sizeof(struct nvavp_clock_args))) {
+ ret = -EFAULT;
+ break;
+ }
+ ret = nvavp_disable_audio_clocks(clientctx, config.id);
+ break;
+ case NVAVP_IOCTL_SET_MIN_ONLINE_CPUS:
+ ret = nvavp_set_min_online_cpus_ioctl(filp, cmd, arg);
+ break;
+ case NVAVP_IOCTL_MAP_IOVA:
+ ret = nvavp_map_iova(filp, cmd, arg);
+ break;
+ case NVAVP_IOCTL_UNMAP_IOVA:
+ ret = nvavp_unmap_iova(filp, arg);
+ break;
+ case NVAVP_IOCTL_CHANNEL_OPEN:
+ ret = nvavp_channel_open(filp, (void *)buf);
+ if (ret == 0)
+ ret = copy_to_user((void __user *)arg, buf,
+ _IOC_SIZE(cmd));
break;
default:
ret = -EINVAL;
return ret;
}
+#ifdef CONFIG_COMPAT
+static long tegra_nvavp_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+
+ if (_IOC_TYPE(cmd) != NVAVP_IOCTL_MAGIC ||
+ _IOC_NR(cmd) < NVAVP_IOCTL_MIN_NR ||
+ _IOC_NR(cmd) > NVAVP_IOCTL_MAX_NR)
+ return -EFAULT;
+
+ switch (cmd) {
+ case NVAVP_IOCTL_PUSH_BUFFER_SUBMIT32:
+ ret = nvavp_pushbuffer_submit_compat_ioctl(filp, cmd, arg);
+ break;
+ default:
+ ret = tegra_nvavp_ioctl(filp, cmd, arg);
+ break;
+ }
+ return ret;
+}
+#endif
+
static const struct file_operations tegra_video_nvavp_fops = {
.owner = THIS_MODULE,
.open = tegra_nvavp_video_open,
- .release = tegra_nvavp_release,
+ .release = tegra_nvavp_video_release,
.unlocked_ioctl = tegra_nvavp_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = tegra_nvavp_compat_ioctl,
+#endif
};
#if defined(CONFIG_TEGRA_NVAVP_AUDIO)
static const struct file_operations tegra_audio_nvavp_fops = {
.owner = THIS_MODULE,
.open = tegra_nvavp_audio_open,
- .release = tegra_nvavp_release,
+ .release = tegra_nvavp_audio_release,
.unlocked_ioctl = tegra_nvavp_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = tegra_nvavp_compat_ioctl,
+#endif
};
#endif
DEVICE_ATTR(boost_sclk, S_IRUGO | S_IWUSR, boost_sclk_show, boost_sclk_store);
+enum nvavp_heap {
+ NVAVP_USE_SMMU = (1 << 0),
+ NVAVP_USE_CARVEOUT = (1 << 1)
+};
+
+static int nvavp_reserve_os_mem(struct nvavp_info *nvavp, dma_addr_t phys)
+{
+ int ret = -ENOMEM;
+ if (!pfn_valid(__phys_to_pfn(phys))) {
+ if (memblock_reserve(phys, SZ_1M)) {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "failed to reserve mem block %lx\n",
+ (unsigned long)phys);
+ } else
+ ret = 0;
+ }
+ return ret;
+}
+
+#ifdef CONFIG_OF
+static struct of_device_id tegra_nvavp_of_match[] = {
+ { .compatible = "nvidia,tegra30-nvavp", NULL },
+ { .compatible = "nvidia,tegra114-nvavp", NULL },
+ { .compatible = "nvidia,tegra124-nvavp", NULL },
+ { },
+};
+#endif
+
static int tegra_nvavp_probe(struct platform_device *ndev)
{
struct nvavp_info *nvavp;
int irq;
- unsigned int heap_mask;
+ enum nvavp_heap heap_mask;
int ret = 0, channel_id;
+ struct device_node *np;
+
+ np = ndev->dev.of_node;
+ if (np) {
+ irq = platform_get_irq(ndev, 0);
+ nvavp_reg_base = of_iomap(np, 0);
+ } else {
+ irq = platform_get_irq_byname(ndev, "mbox_from_nvavp_pending");
+ }
- irq = platform_get_irq_byname(ndev, "mbox_from_nvavp_pending");
if (irq < 0) {
dev_err(&ndev->dev, "invalid nvhost data\n");
return -EINVAL;
}
+ if (!nvavp_reg_base) {
+ dev_err(&ndev->dev, "unable to map, memory mapped IO\n");
+ return -EINVAL;
+ }
+
+ /* Set the max segment size supported. */
+ ndev->dev.dma_parms = &nvavp_dma_parameters;
+
nvavp = kzalloc(sizeof(struct nvavp_info), GFP_KERNEL);
if (!nvavp) {
dev_err(&ndev->dev, "cannot allocate avp_info\n");
memset(nvavp, 0, sizeof(*nvavp));
#if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU) /* Tegra2 with AVP MMU */
- heap_mask = NVMAP_HEAP_CARVEOUT_GENERIC;
+ heap_mask = NVAVP_USE_CARVEOUT;
#elif defined(CONFIG_TEGRA_AVP_KERNEL_ON_SMMU) /* Tegra3 with SMMU */
- heap_mask = NVMAP_HEAP_IOVMM;
+ heap_mask = NVAVP_USE_SMMU;
#else /* nvmem= carveout */
- heap_mask = NVMAP_HEAP_CARVEOUT_GENERIC;
+ heap_mask = NVAVP_USE_CARVEOUT;
#endif
switch (heap_mask) {
- case NVMAP_HEAP_IOVMM:
+ case NVAVP_USE_SMMU:
nvavp->os_info.phys = 0x8ff00000;
nvavp->os_info.data = dma_alloc_at_coherent(
"allocated IOVA at %lx for AVP os\n",
(unsigned long)nvavp->os_info.phys);
break;
- case NVMAP_HEAP_CARVEOUT_GENERIC:
- nvavp->os_info.data = dma_alloc_coherent(
- &ndev->dev,
- SZ_1M,
- &nvavp->os_info.phys,
- GFP_KERNEL);
+ case NVAVP_USE_CARVEOUT:
+ if (!nvavp_reserve_os_mem(nvavp, 0x8e000000))
+ nvavp->os_info.phys = 0x8e000000;
+ else if (!nvavp_reserve_os_mem(nvavp, 0xf7e00000))
+ nvavp->os_info.phys = 0xf7e00000;
+ else if (!nvavp_reserve_os_mem(nvavp, 0x9e000000))
+ nvavp->os_info.phys = 0x9e000000;
+ else if (!nvavp_reserve_os_mem(nvavp, 0xbe000000))
+ nvavp->os_info.phys = 0xbe000000;
+ else {
+ dev_err(&nvavp->nvhost_dev->dev,
+ "cannot find nvmem= carveout to load AVP os\n");
+ dev_err(&nvavp->nvhost_dev->dev,
+ "check kernel command line "
+ "to see if nvmem= is defined\n");
+ BUG();
- if (!nvavp->os_info.data) {
- dev_err(&ndev->dev, "cannot allocate dma memory\n");
- ret = -ENOMEM;
}
dev_info(&ndev->dev,
platform_set_drvdata(ndev, nvavp);
tegra_pd_add_device(&ndev->dev);
+ pm_runtime_use_autosuspend(&ndev->dev);
+ pm_runtime_set_autosuspend_delay(&ndev->dev, 2000);
pm_runtime_enable(&ndev->dev);
ret = device_create_file(&ndev->dev, &dev_attr_boost_sclk);
"%s: device_create_file failed\n", __func__);
goto err_req_irq_pend;
}
+ nvavp_info_ctx = nvavp;
+
+ /* Add PM QoS request but leave it as default value */
+ pm_qos_add_request(&nvavp->min_cpu_freq_req,
+ PM_QOS_CPU_FREQ_MIN,
+ PM_QOS_DEFAULT_VALUE);
+ pm_qos_add_request(&nvavp->min_online_cpus_req,
+ PM_QOS_MIN_ONLINE_CPUS,
+ PM_QOS_DEFAULT_VALUE);
return 0;
clk_put(nvavp->emc_clk);
clk_put(nvavp->sclk);
+ if (!IS_ERR_OR_NULL(&nvavp->min_cpu_freq_req)) {
+ pm_qos_update_request(&nvavp->min_cpu_freq_req,
+ PM_QOS_CPU_FREQ_MIN_DEFAULT_VALUE);
+ pm_qos_remove_request(&nvavp->min_cpu_freq_req);
+ }
+ if (!IS_ERR_OR_NULL(&nvavp->min_online_cpus_req)) {
+ pm_qos_update_request(&nvavp->min_online_cpus_req,
+ PM_QOS_CPU_FREQ_MIN_DEFAULT_VALUE);
+ pm_qos_remove_request(&nvavp->min_online_cpus_req);
+ }
+
kfree(nvavp);
return 0;
}
#ifdef CONFIG_PM
-static int tegra_nvavp_suspend(struct device *dev)
+static int tegra_nvavp_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct nvavp_info *nvavp = platform_get_drvdata(pdev);
int ret = 0;
- mutex_lock(&nvavp->open_lock);
-
if (nvavp->refcount) {
if (!nvavp->clk_enabled) {
#if defined(CONFIG_TEGRA_NVAVP_AUDIO)
}
}
- /* Partition vde has to be left on before suspend for the
- * device to wakeup on resume
- */
- nvavp_unpowergate_vde(nvavp);
-
- mutex_unlock(&nvavp->open_lock);
return ret;
}
-static int tegra_nvavp_resume(struct device *dev)
+static int tegra_nvavp_runtime_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct nvavp_info *nvavp = platform_get_drvdata(pdev);
mutex_lock(&nvavp->open_lock);
- if (nvavp->refcount) {
+ if (nvavp->video_refcnt)
nvavp_init(nvavp, NVAVP_VIDEO_CHANNEL);
#if defined(CONFIG_TEGRA_NVAVP_AUDIO)
+ if (nvavp->audio_refcnt)
nvavp_init(nvavp, NVAVP_AUDIO_CHANNEL);
#endif
+
+ mutex_unlock(&nvavp->open_lock);
+
+ return 0;
+}
+
+static int tegra_nvavp_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct nvavp_info *nvavp = platform_get_drvdata(pdev);
+
+ /* To balance the unpowergate in suspend routine */
+ nvavp_powergate_vde(nvavp);
+
+ tegra_nvavp_runtime_resume(dev);
+
+ return 0;
+}
+
+static int tegra_nvavp_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct nvavp_info *nvavp = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ mutex_lock(&nvavp->open_lock);
+
+ ret = tegra_nvavp_runtime_suspend(dev);
+ if (ret) {
+ mutex_unlock(&nvavp->open_lock);
+ return ret;
}
+
+ /* WAR: Leave partition vde on before suspend so that access
+ * to BSEV registers immediatly after LP0 exit won't fail.
+ */
+ nvavp_unpowergate_vde(nvavp);
+
mutex_unlock(&nvavp->open_lock);
return 0;
}
static const struct dev_pm_ops nvavp_pm_ops = {
+ .runtime_suspend = tegra_nvavp_runtime_suspend,
+ .runtime_resume = tegra_nvavp_runtime_resume,
.suspend = tegra_nvavp_suspend,
.resume = tegra_nvavp_resume,
};
.name = TEGRA_NVAVP_NAME,
.owner = THIS_MODULE,
.pm = NVAVP_PM_OPS,
+ .of_match_table = of_match_ptr(tegra_nvavp_of_match),
},
.probe = tegra_nvavp_probe,
.remove = tegra_nvavp_remove,