2 * drivers/media/video/tegra/nvavp/nvavp_dev.c
4 * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved.
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
11 #define CREATE_TRACE_POINTS
12 #include <trace/events/nvavp.h>
14 #include <linux/uaccess.h>
15 #include <linux/clk.h>
16 #include <linux/compat.h>
17 #include <linux/completion.h>
18 #include <linux/delay.h>
19 #include <linux/dma-buf.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/err.h>
22 #include <linux/firmware.h>
24 #include <linux/interrupt.h>
26 #include <linux/ioctl.h>
27 #include <linux/irq.h>
28 #include <linux/kref.h>
29 #include <linux/list.h>
30 #include <linux/miscdevice.h>
31 #include <linux/module.h>
32 #include <linux/mutex.h>
33 #include <linux/nvhost.h>
34 #include <linux/platform_device.h>
35 #include <linux/rbtree.h>
36 #include <linux/seq_file.h>
37 #include <linux/slab.h>
38 #include <linux/string.h>
39 #include <linux/tegra_nvavp.h>
40 #include <linux/types.h>
41 #include <linux/vmalloc.h>
42 #include <linux/workqueue.h>
43 #include <linux/pm_runtime.h>
44 #include <linux/clk/tegra.h>
45 #include <linux/tegra-powergate.h>
46 #include <linux/irqchip/tegra.h>
47 #include <linux/sched.h>
48 #include <linux/memblock.h>
49 #include <linux/anon_inodes.h>
50 #include <linux/tegra_pm_domains.h>
53 #include <linux/pm_qos.h>
56 #include <linux/of_device.h>
57 #include <linux/of_platform.h>
58 #include <linux/of_address.h>
59 #include <linux/tegra-timer.h>
61 #ifdef CONFIG_TRUSTED_LITTLE_KERNEL
62 #include <linux/ote_protocol.h>
65 #if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU)
66 #include "../avp/headavp.h"
70 #define TEGRA_NVAVP_NAME "nvavp"
72 #define NVAVP_PUSHBUFFER_SIZE 4096
74 #define NVAVP_PUSHBUFFER_MIN_UPDATE_SPACE (sizeof(u32) * 3)
76 static void __iomem *nvavp_reg_base;
78 #define TEGRA_NVAVP_RESET_VECTOR_ADDR (nvavp_reg_base + 0xe200)
80 #define FLOW_CTRL_HALT_COP_EVENTS (nvavp_reg_base + 0x6000 + 0x4)
81 #define FLOW_MODE_STOP (0x2 << 29)
82 #define FLOW_MODE_NONE 0x0
84 #define NVAVP_OS_INBOX (nvavp_reg_base + 0x10)
85 #define NVAVP_OS_OUTBOX (nvavp_reg_base + 0x20)
87 #define NVAVP_INBOX_VALID (1 << 29)
89 /* AVP behavior params */
90 #define NVAVP_OS_IDLE_TIMEOUT 100 /* milli-seconds */
91 #define NVAVP_OUTBOX_WRITE_TIMEOUT 1000 /* milli-seconds */
93 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
94 /* Two control channels: Audio and Video channels */
95 #define NVAVP_NUM_CHANNELS 2
97 #define NVAVP_AUDIO_CHANNEL 1
99 #define IS_AUDIO_CHANNEL_ID(channel_id) (channel_id == NVAVP_AUDIO_CHANNEL ? 1: 0)
101 #define NVAVP_NUM_CHANNELS 1
104 /* Channel ID 0 represents the Video channel control area */
105 #define NVAVP_VIDEO_CHANNEL 0
106 /* Channel ID 1 represents the Audio channel control area */
108 #define IS_VIDEO_CHANNEL_ID(channel_id) (channel_id == NVAVP_VIDEO_CHANNEL ? 1: 0)
110 #define SCLK_BOOST_RATE 40000000
112 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
113 static struct of_device_id tegra_vde_pd[] = {
114 { .compatible = "nvidia,tegra132-vde-pd", },
115 { .compatible = "nvidia,tegra124-vde-pd", },
119 static bool boost_sclk;
120 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
121 static bool audio_enabled;
124 struct nvavp_channel {
125 struct mutex pushbuffer_lock;
126 dma_addr_t pushbuf_phys;
130 struct nv_e276_control *os_control;
135 struct clk *bsev_clk;
138 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
139 struct clk *bsea_clk;
146 unsigned long sclk_rate;
147 unsigned long emc_clk_rate;
149 int mbox_from_avp_pend_irq;
151 struct mutex open_lock;
153 int video_initialized;
155 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
156 int audio_initialized;
158 struct work_struct app_notify_work;
159 void (*audio_notify)(void);
161 struct work_struct clock_disable_work;
164 struct nvavp_os_info os_info;
166 /* ucode information */
167 struct nvavp_ucode_info ucode_info;
169 /* client to change min cpu freq rate*/
170 struct pm_qos_request min_cpu_freq_req;
172 /* client to change number of min online cpus*/
173 struct pm_qos_request min_online_cpus_req;
175 struct nvavp_channel channel_info[NVAVP_NUM_CHANNELS];
182 struct platform_device *nvhost_dev;
183 struct miscdevice video_misc_dev;
184 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
185 struct miscdevice audio_misc_dev;
187 struct task_struct *init_task;
190 struct nvavp_clientctx {
191 struct nvavp_pushbuffer_submit_hdr submit_hdr;
192 struct nvavp_reloc relocs[NVAVP_MAX_RELOCATION_COUNT];
194 struct nvavp_info *nvavp;
197 spinlock_t iova_lock;
198 struct rb_root iova_handles;
200 static struct nvavp_info *nvavp_info_ctx;
202 static int nvavp_runtime_get(struct nvavp_info *nvavp)
204 if (nvavp->init_task != current) {
205 mutex_unlock(&nvavp->open_lock);
206 pm_runtime_get_sync(&nvavp->nvhost_dev->dev);
207 mutex_lock(&nvavp->open_lock);
210 pm_runtime_get_noresume(&nvavp->nvhost_dev->dev);
215 static void nvavp_runtime_put(struct nvavp_info *nvavp)
217 pm_runtime_mark_last_busy(&nvavp->nvhost_dev->dev);
218 pm_runtime_put_autosuspend(&nvavp->nvhost_dev->dev);
221 static struct device_dma_parameters nvavp_dma_parameters = {
222 .max_segment_size = UINT_MAX,
225 struct nvavp_iova_info {
229 struct dma_buf *dmabuf;
230 struct dma_buf_attachment *attachment;
231 struct sg_table *sgt;
235 * Unmap's dmabuf and removes the iova info from rb tree
236 * Call with client iova_lock held.
238 static void nvavp_remove_iova_info_locked(
239 struct nvavp_clientctx *clientctx,
240 struct nvavp_iova_info *b)
242 struct nvavp_info *nvavp = clientctx->nvavp;
244 dev_dbg(&nvavp->nvhost_dev->dev,
245 "remove iova addr (0x%lx))\n", (unsigned long)b->addr);
246 dma_buf_unmap_attachment(b->attachment,
247 b->sgt, DMA_BIDIRECTIONAL);
248 dma_buf_detach(b->dmabuf, b->attachment);
249 dma_buf_put(b->dmabuf);
250 rb_erase(&b->node, &clientctx->iova_handles);
255 * Searches the given addr in rb tree and return valid pointer if present
256 * Call with client iova_lock held.
258 static struct nvavp_iova_info *nvavp_search_iova_info_locked(
259 struct nvavp_clientctx *clientctx, struct dma_buf *dmabuf,
260 struct rb_node **curr_parent)
262 struct rb_node *parent = NULL;
263 struct rb_node **p = &clientctx->iova_handles.rb_node;
266 struct nvavp_iova_info *b;
268 b = rb_entry(parent, struct nvavp_iova_info, node);
269 if (b->dmabuf == dmabuf)
271 else if (dmabuf > b->dmabuf)
272 p = &parent->rb_right;
274 p = &parent->rb_left;
276 *curr_parent = parent;
281 * Adds a newly-created iova info handle to the rb tree
282 * Call with client iova_lock held.
284 static void nvavp_add_iova_info_locked(struct nvavp_clientctx *clientctx,
285 struct nvavp_iova_info *h, struct rb_node *parent)
287 struct nvavp_iova_info *b;
288 struct nvavp_info *nvavp = clientctx->nvavp;
289 struct rb_node **p = &clientctx->iova_handles.rb_node;
291 dev_dbg(&nvavp->nvhost_dev->dev,
292 "add iova addr (0x%lx))\n", (unsigned long)h->addr);
295 b = rb_entry(parent, struct nvavp_iova_info, node);
296 if (h->dmabuf > b->dmabuf)
297 p = &parent->rb_right;
299 p = &parent->rb_left;
301 rb_link_node(&h->node, parent, p);
302 rb_insert_color(&h->node, &clientctx->iova_handles);
306 * Maps and adds the iova address if already not present in rb tree
307 * if present, update ref count and return iova return iova address
309 static int nvavp_get_iova_addr(struct nvavp_clientctx *clientctx,
310 struct dma_buf *dmabuf, dma_addr_t *addr)
312 struct nvavp_info *nvavp = clientctx->nvavp;
313 struct nvavp_iova_info *h;
314 struct nvavp_iova_info *b = NULL;
315 struct rb_node *curr_parent = NULL;
318 spin_lock(&clientctx->iova_lock);
319 b = nvavp_search_iova_info_locked(clientctx, dmabuf, &curr_parent);
321 /* dmabuf already present in rb tree */
324 dev_dbg(&nvavp->nvhost_dev->dev,
325 "found iova addr (0x%pa) ref count(%d))\n",
326 &(b->addr), atomic_read(&b->ref));
329 spin_unlock(&clientctx->iova_lock);
331 /* create new iova_info node */
332 h = kzalloc(sizeof(*h), GFP_KERNEL);
337 h->attachment = dma_buf_attach(dmabuf, &nvavp->nvhost_dev->dev);
338 if (IS_ERR(h->attachment)) {
339 dev_err(&nvavp->nvhost_dev->dev, "cannot attach dmabuf\n");
340 ret = PTR_ERR(h->attachment);
344 h->sgt = dma_buf_map_attachment(h->attachment, DMA_BIDIRECTIONAL);
345 if (IS_ERR(h->sgt)) {
346 dev_err(&nvavp->nvhost_dev->dev, "cannot map dmabuf\n");
347 ret = PTR_ERR(h->sgt);
351 h->addr = sg_dma_address(h->sgt->sgl);
352 atomic_set(&h->ref, 1);
354 spin_lock(&clientctx->iova_lock);
355 b = nvavp_search_iova_info_locked(clientctx, dmabuf, &curr_parent);
357 dev_dbg(&nvavp->nvhost_dev->dev,
358 "found iova addr (0x%pa) ref count(%d))\n",
359 &(b->addr), atomic_read(&b->ref));
362 spin_unlock(&clientctx->iova_lock);
365 nvavp_add_iova_info_locked(clientctx, h, curr_parent);
369 spin_unlock(&clientctx->iova_lock);
372 dma_buf_unmap_attachment(h->attachment, h->sgt, DMA_BIDIRECTIONAL);
374 dma_buf_detach(dmabuf, h->attachment);
382 * Release the given iova address if it is last client otherwise dec ref count.
384 static void nvavp_release_iova_addr(struct nvavp_clientctx *clientctx,
385 struct dma_buf *dmabuf, dma_addr_t addr)
387 struct nvavp_info *nvavp = clientctx->nvavp;
388 struct nvavp_iova_info *b = NULL;
389 struct rb_node *curr_parent;
391 spin_lock(&clientctx->iova_lock);
392 b = nvavp_search_iova_info_locked(clientctx, dmabuf, &curr_parent);
394 dev_err(&nvavp->nvhost_dev->dev,
395 "error iova addr (0x%pa) is not found\n", &addr);
398 /* if it is last reference, release iova info */
399 if (atomic_sub_return(1, &b->ref) == 0)
400 nvavp_remove_iova_info_locked(clientctx, b);
402 spin_unlock(&clientctx->iova_lock);
406 * Release all the iova addresses in rb tree
408 static void nvavp_remove_iova_mapping(struct nvavp_clientctx *clientctx)
410 struct rb_node *p = NULL;
411 struct nvavp_iova_info *b;
413 spin_lock(&clientctx->iova_lock);
414 while ((p = rb_first(&clientctx->iova_handles))) {
415 b = rb_entry(p, struct nvavp_iova_info, node);
416 nvavp_remove_iova_info_locked(clientctx, b);
418 spin_unlock(&clientctx->iova_lock);
421 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
422 static int nvavp_get_audio_init_status(struct nvavp_info *nvavp)
424 return nvavp->audio_initialized;
427 static void nvavp_set_audio_init_status(struct nvavp_info *nvavp, int status)
429 nvavp->audio_initialized = status;
433 static void nvavp_set_video_init_status(struct nvavp_info *nvavp, int status)
435 nvavp->video_initialized = status;
438 static int nvavp_get_video_init_status(struct nvavp_info *nvavp)
440 return nvavp->video_initialized;
443 static struct nvavp_channel *nvavp_get_channel_info(struct nvavp_info *nvavp, int channel_id)
445 return &nvavp->channel_info[channel_id];
448 static int nvavp_outbox_write(unsigned int val)
450 unsigned int wait_ms = 0;
452 while (readl(NVAVP_OS_OUTBOX)) {
453 usleep_range(1000, 2000);
454 if (++wait_ms > NVAVP_OUTBOX_WRITE_TIMEOUT) {
455 pr_err("No update from AVP in %d ms\n", wait_ms);
459 writel(val, NVAVP_OS_OUTBOX);
463 static void nvavp_set_channel_control_area(struct nvavp_info *nvavp, int channel_id)
465 struct nv_e276_control *control;
466 struct nvavp_os_info *os = &nvavp->os_info;
469 struct nvavp_channel *channel_info;
471 ptr = os->data + os->control_offset + (sizeof(struct nv_e276_control) * channel_id);
473 channel_info = nvavp_get_channel_info(nvavp, channel_id);
474 channel_info->os_control = (struct nv_e276_control *)ptr;
476 control = channel_info->os_control;
478 /* init get and put pointers */
479 writel(0x0, &control->put);
480 writel(0x0, &control->get);
482 pr_debug("nvavp_set_channel_control_area for channel_id (%d):\
483 control->put (0x%08lx) control->get (0x%08lx)\n",
484 channel_id, (uintptr_t) &control->put,
485 (uintptr_t) &control->get);
487 /* Clock gating disabled for video and enabled for audio */
488 if (IS_VIDEO_CHANNEL_ID(channel_id))
489 writel(0x1, &control->idle_clk_enable);
491 writel(0x0, &control->idle_clk_enable);
493 /* Disable iram clock gating */
494 writel(0x0, &control->iram_clk_gating);
496 /* enable avp idle timeout interrupt */
497 writel(0x1, &control->idle_notify_enable);
498 writel(NVAVP_OS_IDLE_TIMEOUT, &control->idle_notify_delay);
500 #if defined(CONFIG_ARCH_TEGRA_11x_SOC) || defined(CONFIG_ARCH_TEGRA_14x_SOC)
501 /* enable sync pt trap enable for avp */
502 if (IS_VIDEO_CHANNEL_ID(channel_id))
503 writel(0x1, &control->sync_pt_incr_trap_enable);
506 /* init dma start and end pointers */
507 writel(channel_info->pushbuf_phys, &control->dma_start);
508 writel((channel_info->pushbuf_phys + NVAVP_PUSHBUFFER_SIZE),
511 writel(0x00, &channel_info->pushbuf_index);
512 temp = NVAVP_PUSHBUFFER_SIZE - NVAVP_PUSHBUFFER_MIN_UPDATE_SPACE;
513 writel(temp, &channel_info->pushbuf_fence);
516 static struct clk *nvavp_clk_get(struct nvavp_info *nvavp, int id)
521 if (id == NVAVP_MODULE_ID_AVP)
523 if (id == NVAVP_MODULE_ID_VDE)
524 return nvavp->vde_clk;
525 if (id == NVAVP_MODULE_ID_EMC)
526 return nvavp->emc_clk;
531 static int nvavp_powergate_vde(struct nvavp_info *nvavp)
536 dev_dbg(&nvavp->nvhost_dev->dev, "%s++\n", __func__);
539 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
540 partition_id = tegra_pd_get_powergate_id(tegra_vde_pd);
541 if (partition_id < 0)
544 partition_id = TEGRA_POWERGATE_VDEC;
546 ret = tegra_powergate_partition(partition_id);
548 dev_err(&nvavp->nvhost_dev->dev,
549 "%s: powergate failed\n",
555 static int nvavp_unpowergate_vde(struct nvavp_info *nvavp)
560 dev_dbg(&nvavp->nvhost_dev->dev, "%s++\n", __func__);
562 /* UnPowergate VDE */
563 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
564 partition_id = tegra_pd_get_powergate_id(tegra_vde_pd);
565 if (partition_id < 0)
568 partition_id = TEGRA_POWERGATE_VDEC;
570 ret = tegra_unpowergate_partition(partition_id);
572 dev_err(&nvavp->nvhost_dev->dev,
573 "%s: unpowergate failed\n",
579 static void nvavp_clks_enable(struct nvavp_info *nvavp)
581 if (nvavp->clk_enabled == 0) {
582 nvavp_runtime_get(nvavp);
583 nvavp->clk_enabled++;
584 nvhost_module_busy_ext(nvavp->nvhost_dev);
585 clk_prepare_enable(nvavp->bsev_clk);
586 clk_prepare_enable(nvavp->vde_clk);
587 nvavp_unpowergate_vde(nvavp);
588 clk_set_rate(nvavp->emc_clk, nvavp->emc_clk_rate);
589 clk_set_rate(nvavp->sclk, nvavp->sclk_rate);
590 dev_dbg(&nvavp->nvhost_dev->dev, "%s: setting sclk to %lu\n",
591 __func__, nvavp->sclk_rate);
592 dev_dbg(&nvavp->nvhost_dev->dev, "%s: setting emc_clk to %lu\n",
593 __func__, nvavp->emc_clk_rate);
595 nvavp->clk_enabled++;
599 static void nvavp_clks_disable(struct nvavp_info *nvavp)
601 if ((--nvavp->clk_enabled == 0) && !nvavp->stay_on) {
602 clk_disable_unprepare(nvavp->bsev_clk);
603 clk_disable_unprepare(nvavp->vde_clk);
604 clk_set_rate(nvavp->emc_clk, 0);
606 clk_set_rate(nvavp->sclk, SCLK_BOOST_RATE);
608 clk_set_rate(nvavp->sclk, 0);
609 nvavp_powergate_vde(nvavp);
610 nvhost_module_idle_ext(nvavp->nvhost_dev);
611 nvavp_runtime_put(nvavp);
612 dev_dbg(&nvavp->nvhost_dev->dev, "%s: resetting emc_clk "
613 "and sclk\n", __func__);
617 static u32 nvavp_check_idle(struct nvavp_info *nvavp, int channel_id)
619 struct nvavp_channel *channel_info = nvavp_get_channel_info(nvavp, channel_id);
620 struct nv_e276_control *control = channel_info->os_control;
622 return (control->put == control->get) ? 1 : 0;
625 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
626 static void app_notify_handler(struct work_struct *work)
628 struct nvavp_info *nvavp;
630 nvavp = container_of(work, struct nvavp_info,
632 if (nvavp->audio_notify)
633 nvavp->audio_notify();
635 kobject_uevent(&nvavp->nvhost_dev->dev.kobj, KOBJ_CHANGE);
639 static void clock_disable_handler(struct work_struct *work)
641 struct nvavp_info *nvavp;
642 struct nvavp_channel *channel_info;
644 nvavp = container_of(work, struct nvavp_info,
647 channel_info = nvavp_get_channel_info(nvavp, NVAVP_VIDEO_CHANNEL);
648 mutex_lock(&channel_info->pushbuffer_lock);
649 mutex_lock(&nvavp->open_lock);
651 trace_nvavp_clock_disable_handler(channel_info->os_control->put,
652 channel_info->os_control->get,
655 if (nvavp_check_idle(nvavp, NVAVP_VIDEO_CHANNEL) && nvavp->pending) {
656 nvavp->pending = false;
657 nvavp_clks_disable(nvavp);
659 mutex_unlock(&nvavp->open_lock);
660 mutex_unlock(&channel_info->pushbuffer_lock);
663 static int nvavp_service(struct nvavp_info *nvavp)
665 struct nvavp_os_info *os = &nvavp->os_info;
669 inbox = readl(NVAVP_OS_INBOX);
670 if (!(inbox & NVAVP_INBOX_VALID))
673 if ((inbox & NVE276_OS_INTERRUPT_VIDEO_IDLE) && (!nvavp->stay_on))
674 schedule_work(&nvavp->clock_disable_work);
676 if (inbox & NVE276_OS_INTERRUPT_SYNCPT_INCR_TRAP) {
678 if (nvavp->syncpt_id == NVE276_OS_SYNCPT_INCR_TRAP_GET_SYNCPT(inbox))
679 nvhost_syncpt_cpu_incr_ext(
680 nvavp->nvhost_dev, nvavp->syncpt_id);
683 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
684 if (inbox & NVE276_OS_INTERRUPT_AUDIO_IDLE) {
686 audio_enabled = false;
687 nvavp_runtime_put(nvavp);
689 pr_debug("nvavp_service NVE276_OS_INTERRUPT_AUDIO_IDLE\n");
692 if (inbox & NVE276_OS_INTERRUPT_DEBUG_STRING) {
693 /* Should only occur with debug AVP OS builds */
694 debug_print = os->data;
695 debug_print += os->debug_offset;
696 dev_info(&nvavp->nvhost_dev->dev, "%s\n", debug_print);
698 if (inbox & (NVE276_OS_INTERRUPT_SEMAPHORE_AWAKEN |
699 NVE276_OS_INTERRUPT_EXECUTE_AWAKEN)) {
700 dev_info(&nvavp->nvhost_dev->dev,
701 "AVP awaken event (0x%x)\n", inbox);
703 if (inbox & NVE276_OS_INTERRUPT_AVP_FATAL_ERROR) {
704 dev_err(&nvavp->nvhost_dev->dev,
705 "fatal AVP error (0x%08X)\n", inbox);
707 if (inbox & NVE276_OS_INTERRUPT_AVP_BREAKPOINT)
708 dev_err(&nvavp->nvhost_dev->dev, "AVP breakpoint hit\n");
709 if (inbox & NVE276_OS_INTERRUPT_TIMEOUT)
710 dev_err(&nvavp->nvhost_dev->dev, "AVP timeout\n");
711 writel(inbox & NVAVP_INBOX_VALID, NVAVP_OS_INBOX);
713 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
714 if (inbox & NVE276_OS_INTERRUPT_APP_NOTIFY) {
715 pr_debug("nvavp_service NVE276_OS_INTERRUPT_APP_NOTIFY\n");
716 schedule_work(&nvavp->app_notify_work);
723 static irqreturn_t nvavp_mbox_pending_isr(int irq, void *data)
725 struct nvavp_info *nvavp = data;
727 nvavp_service(nvavp);
732 static void nvavp_halt_avp(struct nvavp_info *nvavp)
734 /* ensure the AVP is halted */
735 writel(FLOW_MODE_STOP, FLOW_CTRL_HALT_COP_EVENTS);
736 tegra_periph_reset_assert(nvavp->cop_clk);
738 writel(0, NVAVP_OS_OUTBOX);
739 writel(0, NVAVP_OS_INBOX);
742 static int nvavp_reset_avp(struct nvavp_info *nvavp, unsigned long reset_addr)
744 #if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU)
745 unsigned long stub_code_phys = virt_to_phys(_tegra_avp_boot_stub);
746 dma_addr_t stub_data_phys;
749 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
750 if (!(nvavp_check_idle(nvavp, NVAVP_AUDIO_CHANNEL)))
754 #if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU)
755 _tegra_avp_boot_stub_data.map_phys_addr = avp->kernel_phys;
756 _tegra_avp_boot_stub_data.jump_addr = reset_addr;
758 stub_data_phys = dma_map_single(NULL, &_tegra_avp_boot_stub_data,
759 sizeof(_tegra_avp_boot_stub_data),
762 reset_addr = (unsigned long)stub_data_phys;
764 writel(FLOW_MODE_STOP, FLOW_CTRL_HALT_COP_EVENTS);
766 writel(reset_addr, TEGRA_NVAVP_RESET_VECTOR_ADDR);
768 clk_prepare_enable(nvavp->sclk);
769 clk_prepare_enable(nvavp->emc_clk);
771 /* If sclk_rate and emc_clk is not set by user space,
772 * max clock in dvfs table will be used to get best performance.
774 nvavp->sclk_rate = ULONG_MAX;
775 nvavp->emc_clk_rate = ULONG_MAX;
777 tegra_periph_reset_assert(nvavp->cop_clk);
779 tegra_periph_reset_deassert(nvavp->cop_clk);
781 writel(FLOW_MODE_NONE, FLOW_CTRL_HALT_COP_EVENTS);
783 #if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU)
784 dma_unmap_single(NULL, stub_data_phys,
785 sizeof(_tegra_avp_boot_stub_data),
791 static void nvavp_halt_vde(struct nvavp_info *nvavp)
793 if (nvavp->clk_enabled && !nvavp->pending)
796 if (nvavp->pending) {
797 nvavp_clks_disable(nvavp);
798 nvavp->pending = false;
801 tegra_periph_reset_assert(nvavp->bsev_clk);
802 tegra_periph_reset_assert(nvavp->vde_clk);
805 static int nvavp_reset_vde(struct nvavp_info *nvavp)
807 if (nvavp->clk_enabled)
810 nvavp_clks_enable(nvavp);
812 tegra_periph_reset_assert(nvavp->bsev_clk);
814 tegra_periph_reset_deassert(nvavp->bsev_clk);
816 tegra_periph_reset_assert(nvavp->vde_clk);
818 tegra_periph_reset_deassert(nvavp->vde_clk);
821 * VDE clock is set to max freq by default.
822 * VDE clock can be set to different freq if needed
825 clk_set_rate(nvavp->vde_clk, ULONG_MAX);
827 nvavp_clks_disable(nvavp);
832 static int nvavp_pushbuffer_alloc(struct nvavp_info *nvavp, int channel_id)
836 struct nvavp_channel *channel_info = nvavp_get_channel_info(
839 channel_info->pushbuf_data = dma_zalloc_coherent(&nvavp->nvhost_dev->dev,
840 NVAVP_PUSHBUFFER_SIZE,
841 &channel_info->pushbuf_phys,
844 if (!channel_info->pushbuf_data) {
845 dev_err(&nvavp->nvhost_dev->dev,
846 "cannot alloc pushbuffer memory\n");
853 static void nvavp_pushbuffer_free(struct nvavp_info *nvavp)
857 for (channel_id = 0; channel_id < NVAVP_NUM_CHANNELS; channel_id++) {
858 if (nvavp->channel_info[channel_id].pushbuf_data) {
859 dma_free_coherent(&nvavp->nvhost_dev->dev,
860 NVAVP_PUSHBUFFER_SIZE,
861 nvavp->channel_info[channel_id].pushbuf_data,
862 nvavp->channel_info[channel_id].pushbuf_phys);
868 static int nvavp_pushbuffer_init(struct nvavp_info *nvavp)
873 for (channel_id = 0; channel_id < NVAVP_NUM_CHANNELS; channel_id++) {
874 ret = nvavp_pushbuffer_alloc(nvavp, channel_id);
876 dev_err(&nvavp->nvhost_dev->dev,
877 "unable to alloc pushbuffer\n");
880 nvavp_set_channel_control_area(nvavp, channel_id);
881 if (IS_VIDEO_CHANNEL_ID(channel_id)) {
882 nvavp->syncpt_id = NVSYNCPT_AVP_0;
883 if (!nvhost_syncpt_read_ext_check(nvavp->nvhost_dev,
884 nvavp->syncpt_id, &val))
885 nvavp->syncpt_value = val;
892 static void nvavp_pushbuffer_deinit(struct nvavp_info *nvavp)
894 nvavp_pushbuffer_free(nvavp);
897 static int nvavp_pushbuffer_update(struct nvavp_info *nvavp, u32 phys_addr,
898 u32 gather_count, struct nvavp_syncpt *syncpt,
899 u32 ext_ucode_flag, int channel_id)
901 struct nvavp_channel *channel_info;
902 struct nv_e276_control *control;
903 u32 gather_cmd, setucode_cmd, sync = 0;
905 u32 index, value = -1;
908 mutex_lock(&nvavp->open_lock);
909 nvavp_runtime_get(nvavp);
910 mutex_unlock(&nvavp->open_lock);
911 channel_info = nvavp_get_channel_info(nvavp, channel_id);
913 control = channel_info->os_control;
914 pr_debug("nvavp_pushbuffer_update for channel_id (%d):\
915 control->put (0x%lx) control->get (0x%lx)\n",
916 channel_id, (uintptr_t) &control->put,
917 (uintptr_t) &control->get);
919 mutex_lock(&channel_info->pushbuffer_lock);
921 /* check for pushbuffer wrapping */
922 if (channel_info->pushbuf_index >= channel_info->pushbuf_fence)
923 channel_info->pushbuf_index = 0;
925 if (!ext_ucode_flag) {
927 NVE26E_CH_OPCODE_INCR(NVE276_SET_MICROCODE_A, 3);
929 index = wordcount + channel_info->pushbuf_index;
930 writel(setucode_cmd, (channel_info->pushbuf_data + index));
931 wordcount += sizeof(u32);
933 index = wordcount + channel_info->pushbuf_index;
934 writel(0, (channel_info->pushbuf_data + index));
935 wordcount += sizeof(u32);
937 index = wordcount + channel_info->pushbuf_index;
938 writel(nvavp->ucode_info.phys,
939 (channel_info->pushbuf_data + index));
940 wordcount += sizeof(u32);
942 index = wordcount + channel_info->pushbuf_index;
943 writel(nvavp->ucode_info.size,
944 (channel_info->pushbuf_data + index));
945 wordcount += sizeof(u32);
948 gather_cmd = NVE26E_CH_OPCODE_GATHER(0, 0, 0, gather_count);
951 value = ++nvavp->syncpt_value;
952 /* XXX: NvSchedValueWrappingComparison */
953 sync = NVE26E_CH_OPCODE_IMM(NVE26E_HOST1X_INCR_SYNCPT,
954 (NVE26E_HOST1X_INCR_SYNCPT_COND_OP_DONE << 8) |
955 (nvavp->syncpt_id & 0xFF));
958 /* write commands out */
959 index = wordcount + channel_info->pushbuf_index;
960 writel(gather_cmd, (channel_info->pushbuf_data + index));
961 wordcount += sizeof(u32);
963 index = wordcount + channel_info->pushbuf_index;
964 writel(phys_addr, (channel_info->pushbuf_data + index));
965 wordcount += sizeof(u32);
968 index = wordcount + channel_info->pushbuf_index;
969 writel(sync, (channel_info->pushbuf_data + index));
970 wordcount += sizeof(u32);
973 /* enable clocks to VDE/BSEV */
974 mutex_lock(&nvavp->open_lock);
975 if (!nvavp->pending && IS_VIDEO_CHANNEL_ID(channel_id)) {
976 nvavp_clks_enable(nvavp);
977 nvavp->pending = true;
979 mutex_unlock(&nvavp->open_lock);
981 /* update put pointer */
982 channel_info->pushbuf_index = (channel_info->pushbuf_index + wordcount)&
983 (NVAVP_PUSHBUFFER_SIZE - 1);
985 writel(channel_info->pushbuf_index, &control->put);
990 if (IS_VIDEO_CHANNEL_ID(channel_id)) {
991 pr_debug("Wake up Video Channel\n");
992 ret = nvavp_outbox_write(0xA0000001);
997 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
998 if (IS_AUDIO_CHANNEL_ID(channel_id)) {
999 pr_debug("Wake up Audio Channel\n");
1000 if (!audio_enabled) {
1001 mutex_lock(&nvavp->open_lock);
1002 nvavp_runtime_get(nvavp);
1003 mutex_unlock(&nvavp->open_lock);
1004 audio_enabled = true;
1006 ret = nvavp_outbox_write(0xA0000002);
1012 /* Fill out fence struct */
1014 syncpt->id = nvavp->syncpt_id;
1015 syncpt->value = value;
1018 trace_nvavp_pushbuffer_update(channel_id, control->put, control->get,
1019 phys_addr, gather_count,
1020 sizeof(struct nvavp_syncpt), syncpt);
1023 mutex_unlock(&channel_info->pushbuffer_lock);
1024 nvavp_runtime_put(nvavp);
1029 static void nvavp_unload_ucode(struct nvavp_info *nvavp)
1031 dma_free_coherent(&nvavp->nvhost_dev->dev, nvavp->ucode_info.size,
1032 nvavp->ucode_info.data, nvavp->ucode_info.phys);
1033 kfree(nvavp->ucode_info.ucode_bin);
1036 static int nvavp_load_ucode(struct nvavp_info *nvavp)
1038 struct nvavp_ucode_info *ucode_info = &nvavp->ucode_info;
1039 const struct firmware *nvavp_ucode_fw;
1040 char fw_ucode_file[32];
1044 if (!ucode_info->ucode_bin) {
1045 sprintf(fw_ucode_file, "nvavp_vid_ucode.bin");
1047 ret = request_firmware(&nvavp_ucode_fw, fw_ucode_file,
1048 nvavp->video_misc_dev.this_device);
1050 /* Try alternative version */
1051 sprintf(fw_ucode_file, "nvavp_vid_ucode_alt.bin");
1053 ret = request_firmware(&nvavp_ucode_fw,
1055 nvavp->video_misc_dev.this_device);
1058 dev_err(&nvavp->nvhost_dev->dev,
1059 "cannot read ucode firmware '%s'\n",
1065 dev_info(&nvavp->nvhost_dev->dev,
1066 "read ucode firmware from '%s' (%zu bytes)\n",
1067 fw_ucode_file, nvavp_ucode_fw->size);
1069 ptr = (void *)nvavp_ucode_fw->data;
1071 if (strncmp((const char *)ptr, "NVAVPAPP", 8)) {
1072 dev_dbg(&nvavp->nvhost_dev->dev,
1073 "ucode hdr string mismatch\n");
1078 ucode_info->size = nvavp_ucode_fw->size - 8;
1080 ucode_info->ucode_bin = kzalloc(ucode_info->size,
1082 if (!ucode_info->ucode_bin) {
1083 dev_err(&nvavp->nvhost_dev->dev,
1084 "cannot allocate ucode bin\n");
1086 goto err_ubin_alloc;
1089 ucode_info->data = dma_alloc_coherent(&nvavp->nvhost_dev->dev,
1093 if (!ucode_info->data) {
1094 dev_err(&nvavp->nvhost_dev->dev,
1095 "cannot alloc memory for ucode\n");
1097 goto err_ucode_alloc;
1099 memcpy(ucode_info->ucode_bin, ptr, ucode_info->size);
1100 release_firmware(nvavp_ucode_fw);
1103 memcpy(ucode_info->data, ucode_info->ucode_bin, ucode_info->size);
1107 kfree(nvavp->ucode_info.ucode_bin);
1109 release_firmware(nvavp_ucode_fw);
1114 static void nvavp_unload_os(struct nvavp_info *nvavp)
1116 dma_free_coherent(&nvavp->nvhost_dev->dev, SZ_1M,
1117 nvavp->os_info.data, nvavp->os_info.phys);
1118 kfree(nvavp->os_info.os_bin);
1121 static int nvavp_load_os(struct nvavp_info *nvavp, char *fw_os_file)
1123 struct nvavp_os_info *os_info = &nvavp->os_info;
1124 const struct firmware *nvavp_os_fw;
1129 if (!os_info->os_bin) {
1130 ret = request_firmware(&nvavp_os_fw, fw_os_file,
1131 nvavp->video_misc_dev.this_device);
1133 dev_err(&nvavp->nvhost_dev->dev,
1134 "cannot read os firmware '%s'\n", fw_os_file);
1138 dev_info(&nvavp->nvhost_dev->dev,
1139 "read firmware from '%s' (%zu bytes)\n",
1140 fw_os_file, nvavp_os_fw->size);
1142 ptr = (void *)nvavp_os_fw->data;
1144 if (strncmp((const char *)ptr, "NVAVP-OS", 8)) {
1145 dev_dbg(&nvavp->nvhost_dev->dev,
1146 "os hdr string mismatch\n");
1152 os_info->entry_offset = *((u32 *)ptr);
1154 os_info->control_offset = *((u32 *)ptr);
1156 os_info->debug_offset = *((u32 *)ptr);
1159 size = *((u32 *)ptr); ptr += sizeof(u32);
1161 os_info->size = size;
1162 os_info->os_bin = kzalloc(os_info->size,
1164 if (!os_info->os_bin) {
1165 dev_err(&nvavp->nvhost_dev->dev,
1166 "cannot allocate os bin\n");
1171 memcpy(os_info->os_bin, ptr, os_info->size);
1172 memset(os_info->data + os_info->size, 0, SZ_1M - os_info->size);
1174 dev_dbg(&nvavp->nvhost_dev->dev,
1175 "entry=%08x control=%08x debug=%08x size=%d\n",
1176 os_info->entry_offset, os_info->control_offset,
1177 os_info->debug_offset, os_info->size);
1178 release_firmware(nvavp_os_fw);
1181 memcpy(os_info->data, os_info->os_bin, os_info->size);
1182 os_info->reset_addr = os_info->phys + os_info->entry_offset;
1184 dev_dbg(&nvavp->nvhost_dev->dev,
1185 "AVP os at vaddr=%p paddr=%llx reset_addr=%llx\n",
1186 os_info->data, (u64)(os_info->phys), (u64)os_info->reset_addr);
1190 release_firmware(nvavp_os_fw);
1196 static int nvavp_os_init(struct nvavp_info *nvavp)
1198 char fw_os_file[32];
1200 int video_initialized, audio_initialized = 0;
1202 video_initialized = nvavp_get_video_init_status(nvavp);
1204 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
1205 audio_initialized = nvavp_get_audio_init_status(nvavp);
1207 pr_debug("video_initialized(%d) audio_initialized(%d)\n",
1208 video_initialized, audio_initialized);
1210 /* Video and Audio both are initialized */
1211 if (video_initialized || audio_initialized)
1214 /* Video or Audio both are uninitialized */
1215 pr_debug("video_initialized == audio_initialized (%d)\n",
1216 nvavp->video_initialized);
1217 #if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU) /* Tegra2 with AVP MMU */
1218 /* paddr is phys address */
1219 /* vaddr is AVP_KERNEL_VIRT_BASE */
1220 dev_dbg(&nvavp->nvhost_dev->dev,
1221 "using AVP MMU to relocate AVP os\n");
1222 sprintf(fw_os_file, "nvavp_os.bin");
1223 nvavp->os_info.reset_addr = AVP_KERNEL_VIRT_BASE;
1224 #elif defined(CONFIG_TEGRA_AVP_KERNEL_ON_SMMU) /* Tegra3 with SMMU */
1225 /* paddr is any address behind SMMU */
1226 /* vaddr is TEGRA_SMMU_BASE */
1227 dev_dbg(&nvavp->nvhost_dev->dev,
1228 "using SMMU at %lx to load AVP kernel\n",
1229 (unsigned long)nvavp->os_info.phys);
1230 BUG_ON(nvavp->os_info.phys != 0xeff00000
1231 && nvavp->os_info.phys != 0x0ff00000
1232 && nvavp->os_info.phys != 0x8ff00000);
1233 sprintf(fw_os_file, "nvavp_os_%08lx.bin",
1234 (unsigned long)nvavp->os_info.phys);
1235 nvavp->os_info.reset_addr = nvavp->os_info.phys;
1236 #else /* nvmem= carveout */
1237 dev_dbg(&nvavp->nvhost_dev->dev,
1238 "using nvmem= carveout at %llx to load AVP os\n",
1239 (u64)nvavp->os_info.phys);
1240 sprintf(fw_os_file, "nvavp_os_%08llx.bin", (u64)nvavp->os_info.phys);
1241 nvavp->os_info.reset_addr = nvavp->os_info.phys;
1242 nvavp->os_info.data = ioremap(nvavp->os_info.phys, SZ_1M);
1244 ret = nvavp_load_os(nvavp, fw_os_file);
1246 dev_err(&nvavp->nvhost_dev->dev,
1247 "unable to load os firmware '%s'\n", fw_os_file);
1251 ret = nvavp_pushbuffer_init(nvavp);
1253 dev_err(&nvavp->nvhost_dev->dev,
1254 "unable to init pushbuffer\n");
1257 tegra_init_legacy_irq_cop();
1258 enable_irq(nvavp->mbox_from_avp_pend_irq);
1263 static int nvavp_init(struct nvavp_info *nvavp, int channel_id)
1266 int video_initialized = 0;
1267 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
1268 int audio_initialized = 0;
1271 nvavp->init_task = current;
1273 ret = nvavp_os_init(nvavp);
1275 dev_err(&nvavp->nvhost_dev->dev,
1276 "unable to load os firmware and allocate buffers\n");
1279 video_initialized = nvavp_get_video_init_status(nvavp);
1280 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
1281 audio_initialized = nvavp_get_audio_init_status(nvavp);
1284 if (IS_VIDEO_CHANNEL_ID(channel_id) && (!video_initialized)) {
1285 pr_debug("nvavp_init : channel_ID (%d)\n", channel_id);
1286 ret = nvavp_load_ucode(nvavp);
1288 dev_err(&nvavp->nvhost_dev->dev,
1289 "unable to load ucode\n");
1293 nvavp_reset_vde(nvavp);
1294 nvavp_reset_avp(nvavp, nvavp->os_info.reset_addr);
1296 nvavp_set_video_init_status(nvavp, 1);
1298 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
1299 if (IS_AUDIO_CHANNEL_ID(channel_id) && (!audio_initialized)) {
1300 pr_debug("nvavp_init : channel_ID (%d)\n", channel_id);
1301 nvavp_reset_avp(nvavp, nvavp->os_info.reset_addr);
1302 nvavp_set_audio_init_status(nvavp, 1);
1307 nvavp->init_task = NULL;
1311 #define TIMER_EN (1 << 31)
1312 #define TIMER_PERIODIC (1 << 30)
1313 #define TIMER_PCR 0x4
1314 #define TIMER_PCR_INTR (1 << 30)
1316 /* This should be called with the open_lock held */
1317 static void nvavp_uninit(struct nvavp_info *nvavp)
1319 int video_initialized, audio_initialized = 0;
1322 video_initialized = nvavp_get_video_init_status(nvavp);
1324 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
1325 audio_initialized = nvavp_get_audio_init_status(nvavp);
1328 pr_debug("nvavp_uninit video_initialized(%d) audio_initialized(%d)\n",
1329 video_initialized, audio_initialized);
1331 /* Video and Audio both are uninitialized */
1332 if (!video_initialized && !audio_initialized)
1335 nvavp->init_task = current;
1337 if (video_initialized) {
1338 pr_debug("nvavp_uninit nvavp->video_initialized\n");
1339 nvavp_halt_vde(nvavp);
1340 nvavp_set_video_init_status(nvavp, 0);
1341 video_initialized = 0;
1344 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
1345 if (audio_initialized) {
1346 cancel_work_sync(&nvavp->app_notify_work);
1347 nvavp_set_audio_init_status(nvavp, 0);
1348 audio_initialized = 0;
1352 /* Video and Audio both becomes uninitialized */
1353 if (!video_initialized && !audio_initialized) {
1354 pr_debug("nvavp_uninit both channels uninitialized\n");
1356 clk_disable_unprepare(nvavp->sclk);
1357 clk_disable_unprepare(nvavp->emc_clk);
1358 disable_irq(nvavp->mbox_from_avp_pend_irq);
1359 nvavp_pushbuffer_deinit(nvavp);
1360 nvavp_halt_avp(nvavp);
1364 * WAR: turn off TMR2 for fix LP1 wake up by TMR2.
1365 * turn off the periodic interrupt and the timer temporarily
1367 reg = timer_readl(TIMER2_OFFSET + TIMER_PTV);
1368 reg &= ~(TIMER_EN | TIMER_PERIODIC);
1369 timer_writel(reg, TIMER2_OFFSET + TIMER_PTV);
1371 /* write a 1 to the intr_clr field to clear the interrupt */
1372 reg = TIMER_PCR_INTR;
1373 timer_writel(reg, TIMER2_OFFSET + TIMER_PCR);
1374 nvavp->init_task = NULL;
1377 static int nvcpu_set_clock(struct nvavp_info *nvavp,
1378 struct nvavp_clock_args config,
1381 dev_dbg(&nvavp->nvhost_dev->dev, "%s: update cpu freq to clk_rate=%u\n",
1382 __func__, config.rate);
1384 if (config.rate > 0)
1385 pm_qos_update_request(&nvavp->min_cpu_freq_req, config.rate);
1387 pm_qos_update_request(&nvavp->min_cpu_freq_req,
1388 PM_QOS_CPU_FREQ_MIN_DEFAULT_VALUE);
1393 static int nvavp_map_iova(struct file *filp, unsigned int cmd,
1396 struct nvavp_clientctx *clientctx = filp->private_data;
1397 struct nvavp_info *nvavp = clientctx->nvavp;
1398 struct nvavp_map_args map_arg;
1399 struct dma_buf *dmabuf;
1400 dma_addr_t addr = 0;
1403 if (copy_from_user(&map_arg, (void __user *)arg,
1404 sizeof(struct nvavp_map_args))) {
1405 dev_err(&nvavp->nvhost_dev->dev,
1406 "failed to copy memory handle\n");
1410 dev_err(&nvavp->nvhost_dev->dev,
1411 "invalid memory handle %08x\n", map_arg.fd);
1415 dmabuf = dma_buf_get(map_arg.fd);
1416 if (IS_ERR(dmabuf)) {
1417 dev_err(&nvavp->nvhost_dev->dev,
1418 "invalid buffer handle %08x\n", map_arg.fd);
1419 return PTR_ERR(dmabuf);
1422 ret = nvavp_get_iova_addr(clientctx, dmabuf, &addr);
1426 map_arg.addr = (__u32)addr;
1428 trace_nvavp_map_iova(clientctx->channel_id, map_arg.fd, map_arg.addr);
1430 if (copy_to_user((void __user *)arg, &map_arg,
1431 sizeof(struct nvavp_map_args))) {
1432 dev_err(&nvavp->nvhost_dev->dev,
1433 "failed to copy phys addr\n");
1441 static int nvavp_unmap_iova(struct file *filp, unsigned long arg)
1443 struct nvavp_clientctx *clientctx = filp->private_data;
1444 struct nvavp_info *nvavp = clientctx->nvavp;
1445 struct nvavp_map_args map_arg;
1446 struct dma_buf *dmabuf;
1448 if (copy_from_user(&map_arg, (void __user *)arg,
1449 sizeof(struct nvavp_map_args))) {
1450 dev_err(&nvavp->nvhost_dev->dev,
1451 "failed to copy memory handle\n");
1455 dmabuf = dma_buf_get(map_arg.fd);
1456 if (IS_ERR(dmabuf)) {
1457 dev_err(&nvavp->nvhost_dev->dev,
1458 "invalid buffer handle %08x\n", map_arg.fd);
1459 return PTR_ERR(dmabuf);
1462 trace_nvavp_unmap_iova(clientctx->channel_id, map_arg.fd, map_arg.addr);
1464 nvavp_release_iova_addr(clientctx, dmabuf, (dma_addr_t)map_arg.addr);
1465 dma_buf_put(dmabuf);
1470 static int nvavp_set_clock_ioctl(struct file *filp, unsigned int cmd,
1473 struct nvavp_clientctx *clientctx = filp->private_data;
1474 struct nvavp_info *nvavp = clientctx->nvavp;
1476 struct nvavp_clock_args config;
1478 if (copy_from_user(&config, (void __user *)arg, sizeof(struct nvavp_clock_args)))
1481 dev_dbg(&nvavp->nvhost_dev->dev, "%s: clk_id=%d, clk_rate=%u\n",
1482 __func__, config.id, config.rate);
1484 if (config.id == NVAVP_MODULE_ID_AVP)
1485 nvavp->sclk_rate = config.rate;
1486 else if (config.id == NVAVP_MODULE_ID_EMC)
1487 nvavp->emc_clk_rate = config.rate;
1488 else if (config.id == NVAVP_MODULE_ID_CPU)
1489 return nvcpu_set_clock(nvavp, config, arg);
1491 c = nvavp_clk_get(nvavp, config.id);
1492 if (IS_ERR_OR_NULL(c))
1495 clk_prepare_enable(c);
1496 clk_set_rate(c, config.rate);
1498 config.rate = clk_get_rate(c);
1499 clk_disable_unprepare(c);
1501 trace_nvavp_set_clock_ioctl(clientctx->channel_id, config.id,
1504 if (copy_to_user((void __user *)arg, &config, sizeof(struct nvavp_clock_args)))
1510 static int nvavp_get_clock_ioctl(struct file *filp, unsigned int cmd,
1513 struct nvavp_clientctx *clientctx = filp->private_data;
1514 struct nvavp_info *nvavp = clientctx->nvavp;
1516 struct nvavp_clock_args config;
1518 if (copy_from_user(&config, (void __user *)arg, sizeof(struct nvavp_clock_args)))
1521 c = nvavp_clk_get(nvavp, config.id);
1522 if (IS_ERR_OR_NULL(c))
1525 clk_prepare_enable(c);
1526 config.rate = clk_get_rate(c);
1527 clk_disable_unprepare(c);
1529 trace_nvavp_get_clock_ioctl(clientctx->channel_id, config.id,
1532 if (copy_to_user((void __user *)arg, &config, sizeof(struct nvavp_clock_args)))
1538 static int nvavp_get_syncpointid_ioctl(struct file *filp, unsigned int cmd,
1541 struct nvavp_clientctx *clientctx = filp->private_data;
1542 struct nvavp_info *nvavp = clientctx->nvavp;
1543 u32 id = nvavp->syncpt_id;
1545 if (_IOC_DIR(cmd) & _IOC_READ) {
1546 if (copy_to_user((void __user *)arg, &id, sizeof(u32)))
1552 trace_nvavp_get_syncpointid_ioctl(clientctx->channel_id, id);
1557 static int nvavp_pushbuffer_submit_ioctl(struct file *filp, unsigned int cmd,
1560 struct nvavp_clientctx *clientctx = filp->private_data;
1561 struct nvavp_info *nvavp = clientctx->nvavp;
1562 struct nvavp_pushbuffer_submit_hdr hdr;
1564 struct dma_buf *cmdbuf_dmabuf;
1565 struct dma_buf_attachment *cmdbuf_attach;
1566 struct sg_table *cmdbuf_sgt;
1568 phys_addr_t phys_addr;
1569 unsigned long virt_addr;
1570 struct nvavp_pushbuffer_submit_hdr *user_hdr =
1571 (struct nvavp_pushbuffer_submit_hdr *) arg;
1572 struct nvavp_syncpt syncpt;
1574 syncpt.id = NVSYNCPT_INVALID;
1577 if (_IOC_DIR(cmd) & _IOC_WRITE) {
1578 if (copy_from_user(&hdr, (void __user *)arg,
1579 sizeof(struct nvavp_pushbuffer_submit_hdr)))
1583 if (!hdr.cmdbuf.mem)
1586 if (hdr.num_relocs > NVAVP_MAX_RELOCATION_COUNT) {
1587 dev_err(&nvavp->nvhost_dev->dev,
1588 "invalid num_relocs %d\n", hdr.num_relocs);
1592 if (copy_from_user(clientctx->relocs, (void __user *)hdr.relocs,
1593 sizeof(struct nvavp_reloc) * hdr.num_relocs)) {
1597 cmdbuf_dmabuf = dma_buf_get(hdr.cmdbuf.mem);
1598 if (IS_ERR(cmdbuf_dmabuf)) {
1599 dev_err(&nvavp->nvhost_dev->dev,
1600 "invalid cmd buffer handle %08x\n", hdr.cmdbuf.mem);
1601 return PTR_ERR(cmdbuf_dmabuf);
1604 if (hdr.cmdbuf.offset > cmdbuf_dmabuf->size) {
1605 dev_err(&nvavp->nvhost_dev->dev,
1606 "invalid cmdbuf offset %d\n", hdr.cmdbuf.offset);
1608 goto err_dmabuf_attach;
1611 cmdbuf_attach = dma_buf_attach(cmdbuf_dmabuf, &nvavp->nvhost_dev->dev);
1612 if (IS_ERR(cmdbuf_attach)) {
1613 dev_err(&nvavp->nvhost_dev->dev, "cannot attach cmdbuf_dmabuf\n");
1614 ret = PTR_ERR(cmdbuf_attach);
1615 goto err_dmabuf_attach;
1618 cmdbuf_sgt = dma_buf_map_attachment(cmdbuf_attach, DMA_BIDIRECTIONAL);
1619 if (IS_ERR(cmdbuf_sgt)) {
1620 dev_err(&nvavp->nvhost_dev->dev, "cannot map cmdbuf_dmabuf\n");
1621 ret = PTR_ERR(cmdbuf_sgt);
1622 goto err_dmabuf_map;
1625 phys_addr = sg_dma_address(cmdbuf_sgt->sgl);
1627 virt_addr = (unsigned long)dma_buf_vmap(cmdbuf_dmabuf);
1629 dev_err(&nvavp->nvhost_dev->dev, "cannot vmap cmdbuf_dmabuf\n");
1631 goto err_dmabuf_vmap;
1634 cmdbuf_data = (u32 *)(virt_addr + hdr.cmdbuf.offset);
1635 for (i = 0; i < hdr.num_relocs; i++) {
1636 struct dma_buf *target_dmabuf;
1637 struct dma_buf_attachment *target_attach;
1638 struct sg_table *target_sgt;
1639 u32 *reloc_addr, target_phys_addr;
1641 if (clientctx->relocs[i].cmdbuf_mem != hdr.cmdbuf.mem) {
1642 dev_err(&nvavp->nvhost_dev->dev,
1643 "reloc info does not match target bufferID\n");
1645 goto err_reloc_info;
1648 if (clientctx->relocs[i].cmdbuf_offset > cmdbuf_dmabuf->size) {
1649 dev_err(&nvavp->nvhost_dev->dev,
1650 "invalid reloc offset in cmdbuf %d\n",
1651 clientctx->relocs[i].cmdbuf_offset);
1653 goto err_reloc_info;
1656 reloc_addr = cmdbuf_data +
1657 (clientctx->relocs[i].cmdbuf_offset >> 2);
1659 target_dmabuf = dma_buf_get(clientctx->relocs[i].target);
1660 if (IS_ERR(target_dmabuf)) {
1661 ret = PTR_ERR(target_dmabuf);
1662 goto target_dmabuf_fail;
1665 if (clientctx->relocs[i].target_offset > target_dmabuf->size) {
1666 dev_err(&nvavp->nvhost_dev->dev,
1667 "invalid target offset in reloc %d\n",
1668 clientctx->relocs[i].target_offset);
1670 goto target_attach_fail;
1673 target_attach = dma_buf_attach(target_dmabuf,
1674 &nvavp->nvhost_dev->dev);
1675 if (IS_ERR(target_attach)) {
1676 ret = PTR_ERR(target_attach);
1677 goto target_attach_fail;
1679 target_sgt = dma_buf_map_attachment(target_attach,
1681 if (IS_ERR(target_sgt)) {
1682 ret = PTR_ERR(target_sgt);
1683 goto target_map_fail;
1686 target_phys_addr = sg_dma_address(target_sgt->sgl);
1687 if (!target_phys_addr)
1688 target_phys_addr = sg_phys(target_sgt->sgl);
1689 target_phys_addr += clientctx->relocs[i].target_offset;
1690 writel(target_phys_addr, reloc_addr);
1691 dma_buf_unmap_attachment(target_attach, target_sgt,
1694 dma_buf_detach(target_dmabuf, target_attach);
1696 dma_buf_put(target_dmabuf);
1699 goto err_reloc_info;
1702 trace_nvavp_pushbuffer_submit_ioctl(clientctx->channel_id,
1703 hdr.cmdbuf.mem, hdr.cmdbuf.offset,
1704 hdr.cmdbuf.words, hdr.num_relocs, hdr.flags);
1707 ret = nvavp_pushbuffer_update(nvavp,
1708 (phys_addr + hdr.cmdbuf.offset),
1709 hdr.cmdbuf.words, &syncpt,
1710 (hdr.flags & NVAVP_UCODE_EXT),
1711 clientctx->channel_id);
1713 if (copy_to_user((void __user *)user_hdr->syncpt, &syncpt,
1714 sizeof(struct nvavp_syncpt))) {
1716 goto err_reloc_info;
1719 ret = nvavp_pushbuffer_update(nvavp,
1720 (phys_addr + hdr.cmdbuf.offset),
1721 hdr.cmdbuf.words, NULL,
1722 (hdr.flags & NVAVP_UCODE_EXT),
1723 clientctx->channel_id);
1727 dma_buf_vunmap(cmdbuf_dmabuf, (void *)virt_addr);
1729 dma_buf_unmap_attachment(cmdbuf_attach, cmdbuf_sgt, DMA_BIDIRECTIONAL);
1731 dma_buf_detach(cmdbuf_dmabuf, cmdbuf_attach);
1733 dma_buf_put(cmdbuf_dmabuf);
1737 #ifdef CONFIG_COMPAT
1738 static int nvavp_pushbuffer_submit_compat_ioctl(struct file *filp,
1742 struct nvavp_pushbuffer_submit_hdr_v32 hdr_v32;
1743 struct nvavp_pushbuffer_submit_hdr __user *user_hdr;
1746 if (_IOC_DIR(cmd) & _IOC_WRITE) {
1747 if (copy_from_user(&hdr_v32, (void __user *)arg,
1748 sizeof(struct nvavp_pushbuffer_submit_hdr_v32)))
1752 if (!hdr_v32.cmdbuf.mem)
1755 user_hdr = compat_alloc_user_space(sizeof(*user_hdr));
1756 if (!access_ok(VERIFY_WRITE, user_hdr, sizeof(*user_hdr)))
1759 if (__put_user(hdr_v32.cmdbuf.mem, &user_hdr->cmdbuf.mem)
1760 || __put_user(hdr_v32.cmdbuf.offset, &user_hdr->cmdbuf.offset)
1761 || __put_user(hdr_v32.cmdbuf.words, &user_hdr->cmdbuf.words)
1762 || __put_user((void __user *)(unsigned long)hdr_v32.relocs,
1764 || __put_user(hdr_v32.num_relocs, &user_hdr->num_relocs)
1765 || __put_user((void __user *)(unsigned long)hdr_v32.syncpt,
1767 || __put_user(hdr_v32.flags, &user_hdr->flags))
1770 ret = nvavp_pushbuffer_submit_ioctl(filp, cmd, (unsigned long)user_hdr);
1774 if (__get_user(hdr_v32.syncpt, (uintptr_t *)&user_hdr->syncpt))
1777 if (copy_to_user((void __user *)arg, &hdr_v32,
1778 sizeof(struct nvavp_pushbuffer_submit_hdr_v32))) {
1786 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
1787 int nvavp_pushbuffer_submit_audio(nvavp_clientctx_t client, int cmd_buf_phys,
1790 struct nvavp_clientctx *clientctx = client;
1791 struct nvavp_info *nvavp = clientctx->nvavp;
1793 return nvavp_pushbuffer_update(nvavp,
1795 cmd_buf_words, NULL,
1797 NVAVP_AUDIO_CHANNEL);
1799 EXPORT_SYMBOL_GPL(nvavp_pushbuffer_submit_audio);
1801 void nvavp_register_audio_cb(nvavp_clientctx_t client, void (*cb)(void))
1803 struct nvavp_clientctx *clientctx = client;
1804 struct nvavp_info *nvavp = clientctx->nvavp;
1806 nvavp->audio_notify = cb;
1808 EXPORT_SYMBOL_GPL(nvavp_register_audio_cb);
1811 static int nvavp_wake_avp_ioctl(struct file *filp, unsigned int cmd,
1816 return nvavp_outbox_write(0xA0000001);
1819 static int nvavp_force_clock_stay_on_ioctl(struct file *filp, unsigned int cmd,
1822 struct nvavp_clientctx *clientctx = filp->private_data;
1823 struct nvavp_info *nvavp = clientctx->nvavp;
1824 struct nvavp_clock_stay_on_state_args clock;
1826 if (copy_from_user(&clock, (void __user *)arg,
1827 sizeof(struct nvavp_clock_stay_on_state_args)))
1830 dev_dbg(&nvavp->nvhost_dev->dev, "%s: state=%d\n",
1831 __func__, clock.state);
1833 if (clock.state != NVAVP_CLOCK_STAY_ON_DISABLED &&
1834 clock.state != NVAVP_CLOCK_STAY_ON_ENABLED) {
1835 dev_err(&nvavp->nvhost_dev->dev, "%s: invalid argument=%d\n",
1836 __func__, clock.state);
1840 trace_nvavp_force_clock_stay_on_ioctl(clientctx->channel_id,
1841 clock.state, clientctx->clk_reqs);
1844 mutex_lock(&nvavp->open_lock);
1845 if (clientctx->clk_reqs++ == 0) {
1846 nvavp_clks_enable(nvavp);
1847 nvavp->stay_on = true;
1849 mutex_unlock(&nvavp->open_lock);
1850 cancel_work_sync(&nvavp->clock_disable_work);
1852 mutex_lock(&nvavp->open_lock);
1853 if (--clientctx->clk_reqs == 0) {
1854 nvavp->stay_on = false;
1855 nvavp_clks_disable(nvavp);
1857 mutex_unlock(&nvavp->open_lock);
1858 if (!nvavp->stay_on)
1859 schedule_work(&nvavp->clock_disable_work);
1864 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
1865 int nvavp_enable_audio_clocks(nvavp_clientctx_t client, u32 clk_id)
1867 struct nvavp_clientctx *clientctx = client;
1868 struct nvavp_info *nvavp = clientctx->nvavp;
1870 dev_dbg(&nvavp->nvhost_dev->dev, "%s: clk_id = %d\n",
1873 trace_nvavp_enable_audio_clocks(clientctx->channel_id, clk_id);
1875 mutex_lock(&nvavp->open_lock);
1876 if (clk_id == NVAVP_MODULE_ID_VCP)
1877 clk_prepare_enable(nvavp->vcp_clk);
1878 else if (clk_id == NVAVP_MODULE_ID_BSEA)
1879 clk_prepare_enable(nvavp->bsea_clk);
1880 mutex_unlock(&nvavp->open_lock);
1883 EXPORT_SYMBOL_GPL(nvavp_enable_audio_clocks);
1885 int nvavp_disable_audio_clocks(nvavp_clientctx_t client, u32 clk_id)
1887 struct nvavp_clientctx *clientctx = client;
1888 struct nvavp_info *nvavp = clientctx->nvavp;
1890 dev_dbg(&nvavp->nvhost_dev->dev, "%s: clk_id = %d\n",
1893 trace_nvavp_disable_audio_clocks(clientctx->channel_id, clk_id);
1895 mutex_lock(&nvavp->open_lock);
1896 if (clk_id == NVAVP_MODULE_ID_VCP)
1897 clk_disable_unprepare(nvavp->vcp_clk);
1898 else if (clk_id == NVAVP_MODULE_ID_BSEA)
1899 clk_disable_unprepare(nvavp->bsea_clk);
1900 mutex_unlock(&nvavp->open_lock);
1903 EXPORT_SYMBOL_GPL(nvavp_disable_audio_clocks);
1905 int nvavp_enable_audio_clocks(nvavp_clientctx_t client, u32 clk_id)
1909 EXPORT_SYMBOL_GPL(nvavp_enable_audio_clocks);
1911 int nvavp_disable_audio_clocks(nvavp_clientctx_t client, u32 clk_id)
1915 EXPORT_SYMBOL_GPL(nvavp_disable_audio_clocks);
1918 static int nvavp_set_min_online_cpus_ioctl(struct file *filp, unsigned int cmd,
1921 struct nvavp_clientctx *clientctx = filp->private_data;
1922 struct nvavp_info *nvavp = clientctx->nvavp;
1923 struct nvavp_num_cpus_args config;
1925 if (copy_from_user(&config, (void __user *)arg,
1926 sizeof(struct nvavp_num_cpus_args)))
1929 dev_dbg(&nvavp->nvhost_dev->dev, "%s: min_online_cpus=%d\n",
1930 __func__, config.min_online_cpus);
1932 trace_nvavp_set_min_online_cpus_ioctl(clientctx->channel_id,
1933 config.min_online_cpus);
1935 if (config.min_online_cpus > 0)
1936 pm_qos_update_request(&nvavp->min_online_cpus_req,
1937 config.min_online_cpus);
1939 pm_qos_update_request(&nvavp->min_online_cpus_req,
1940 PM_QOS_CPU_FREQ_MIN_DEFAULT_VALUE);
1945 static int tegra_nvavp_open(struct nvavp_info *nvavp,
1946 struct nvavp_clientctx **client, int channel_id)
1948 struct nvavp_clientctx *clientctx;
1951 dev_dbg(&nvavp->nvhost_dev->dev, "%s: ++\n", __func__);
1953 clientctx = kzalloc(sizeof(*clientctx), GFP_KERNEL);
1957 pr_debug("tegra_nvavp_open channel_id (%d)\n", channel_id);
1959 clientctx->channel_id = channel_id;
1961 ret = nvavp_init(nvavp, channel_id);
1965 if (IS_VIDEO_CHANNEL_ID(channel_id))
1966 nvavp->video_refcnt++;
1967 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
1968 if (IS_AUDIO_CHANNEL_ID(channel_id))
1969 nvavp->audio_refcnt++;
1973 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
1974 trace_tegra_nvavp_open(channel_id, nvavp->refcount,
1975 nvavp->video_refcnt, nvavp->audio_refcnt);
1977 trace_tegra_nvavp_open(channel_id, nvavp->refcount,
1978 nvavp->video_refcnt, 0);
1981 clientctx->nvavp = nvavp;
1982 clientctx->iova_handles = RB_ROOT;
1983 *client = clientctx;
1988 static int tegra_nvavp_video_open(struct inode *inode, struct file *filp)
1990 struct miscdevice *miscdev = filp->private_data;
1991 struct nvavp_info *nvavp = dev_get_drvdata(miscdev->parent);
1992 struct nvavp_clientctx *clientctx;
1995 pr_debug("tegra_nvavp_video_open NVAVP_VIDEO_CHANNEL\n");
1997 nonseekable_open(inode, filp);
1999 mutex_lock(&nvavp->open_lock);
2000 ret = tegra_nvavp_open(nvavp, &clientctx, NVAVP_VIDEO_CHANNEL);
2001 filp->private_data = clientctx;
2002 mutex_unlock(&nvavp->open_lock);
2007 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
2008 static int tegra_nvavp_audio_open(struct inode *inode, struct file *filp)
2010 struct miscdevice *miscdev = filp->private_data;
2011 struct nvavp_info *nvavp = dev_get_drvdata(miscdev->parent);
2012 struct nvavp_clientctx *clientctx;
2015 pr_debug("tegra_nvavp_audio_open NVAVP_AUDIO_CHANNEL\n");
2017 nonseekable_open(inode, filp);
2019 mutex_lock(&nvavp->open_lock);
2020 ret = tegra_nvavp_open(nvavp, &clientctx, NVAVP_AUDIO_CHANNEL);
2021 filp->private_data = clientctx;
2022 mutex_unlock(&nvavp->open_lock);
2027 int tegra_nvavp_audio_client_open(nvavp_clientctx_t *clientctx)
2029 struct nvavp_info *nvavp = nvavp_info_ctx;
2032 mutex_lock(&nvavp->open_lock);
2033 ret = tegra_nvavp_open(nvavp, (struct nvavp_clientctx **)clientctx,
2034 NVAVP_AUDIO_CHANNEL);
2035 mutex_unlock(&nvavp->open_lock);
2039 EXPORT_SYMBOL_GPL(tegra_nvavp_audio_client_open);
2042 static int tegra_nvavp_release(struct nvavp_clientctx *clientctx,
2045 struct nvavp_info *nvavp = clientctx->nvavp;
2048 dev_dbg(&nvavp->nvhost_dev->dev, "%s: ++\n", __func__);
2050 if (!nvavp->refcount) {
2051 dev_err(&nvavp->nvhost_dev->dev,
2052 "releasing while in invalid state\n");
2057 /* if this client had any requests, drop our clk ref */
2058 if (clientctx->clk_reqs)
2059 nvavp_clks_disable(nvavp);
2061 if (nvavp->refcount > 0)
2063 if (!nvavp->refcount)
2064 nvavp_uninit(nvavp);
2066 if (IS_VIDEO_CHANNEL_ID(channel_id))
2067 nvavp->video_refcnt--;
2068 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
2069 if (IS_AUDIO_CHANNEL_ID(channel_id))
2070 nvavp->audio_refcnt--;
2073 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
2074 trace_tegra_nvavp_release(channel_id, nvavp->refcount,
2075 nvavp->video_refcnt, nvavp->audio_refcnt);
2077 trace_tegra_nvavp_release(channel_id, nvavp->refcount,
2078 nvavp->video_refcnt, 0);
2082 nvavp_remove_iova_mapping(clientctx);
2087 static int tegra_nvavp_video_release(struct inode *inode, struct file *filp)
2089 struct nvavp_clientctx *clientctx = filp->private_data;
2090 struct nvavp_info *nvavp = clientctx->nvavp;
2093 mutex_lock(&nvavp->open_lock);
2094 filp->private_data = NULL;
2095 ret = tegra_nvavp_release(clientctx, NVAVP_VIDEO_CHANNEL);
2096 mutex_unlock(&nvavp->open_lock);
2101 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
2102 static int tegra_nvavp_audio_release(struct inode *inode,
2105 struct nvavp_clientctx *clientctx = filp->private_data;
2106 struct nvavp_info *nvavp = clientctx->nvavp;
2109 mutex_lock(&nvavp->open_lock);
2110 filp->private_data = NULL;
2111 ret = tegra_nvavp_release(clientctx, NVAVP_AUDIO_CHANNEL);
2112 mutex_unlock(&nvavp->open_lock);
2117 int tegra_nvavp_audio_client_release(nvavp_clientctx_t client)
2119 struct nvavp_clientctx *clientctx = client;
2120 struct nvavp_info *nvavp = clientctx->nvavp;
2123 mutex_lock(&nvavp->open_lock);
2124 ret = tegra_nvavp_release(clientctx, NVAVP_AUDIO_CHANNEL);
2125 mutex_unlock(&nvavp->open_lock);
2129 EXPORT_SYMBOL_GPL(tegra_nvavp_audio_client_release);
2134 nvavp_channel_open(struct file *filp, struct nvavp_channel_open_args *arg)
2139 struct nvavp_clientctx *clientctx = filp->private_data;
2140 struct nvavp_info *nvavp = clientctx->nvavp;
2142 err = get_unused_fd_flags(O_RDWR);
2148 name = kasprintf(GFP_KERNEL, "nvavp-channel-fd%d", fd);
2155 file = anon_inode_getfile(name, filp->f_op, &(nvavp->video_misc_dev),
2159 err = PTR_ERR(file);
2164 fd_install(fd, file);
2166 nonseekable_open(file->f_inode, filp);
2167 mutex_lock(&nvavp->open_lock);
2168 err = tegra_nvavp_open(nvavp,
2169 (struct nvavp_clientctx **)&file->private_data,
2170 clientctx->channel_id);
2174 mutex_unlock(&nvavp->open_lock);
2177 mutex_unlock(&nvavp->open_lock);
2179 arg->channel_fd = fd;
2183 extern struct device tegra_vpr_dev;
2184 static long tegra_nvavp_ioctl(struct file *filp, unsigned int cmd,
2187 struct nvavp_clientctx *clientctx = filp->private_data;
2188 struct nvavp_clock_args config;
2190 u8 buf[NVAVP_IOCTL_CHANNEL_MAX_ARG_SIZE];
2193 if (_IOC_TYPE(cmd) != NVAVP_IOCTL_MAGIC ||
2194 _IOC_NR(cmd) < NVAVP_IOCTL_MIN_NR ||
2195 _IOC_NR(cmd) > NVAVP_IOCTL_MAX_NR)
2199 case NVAVP_IOCTL_SET_NVMAP_FD:
2201 case NVAVP_IOCTL_GET_SYNCPOINT_ID:
2202 ret = nvavp_get_syncpointid_ioctl(filp, cmd, arg);
2204 case NVAVP_IOCTL_PUSH_BUFFER_SUBMIT:
2205 ret = nvavp_pushbuffer_submit_ioctl(filp, cmd, arg);
2207 case NVAVP_IOCTL_SET_CLOCK:
2208 ret = nvavp_set_clock_ioctl(filp, cmd, arg);
2210 case NVAVP_IOCTL_GET_CLOCK:
2211 ret = nvavp_get_clock_ioctl(filp, cmd, arg);
2213 case NVAVP_IOCTL_WAKE_AVP:
2214 ret = nvavp_wake_avp_ioctl(filp, cmd, arg);
2216 case NVAVP_IOCTL_FORCE_CLOCK_STAY_ON:
2217 ret = nvavp_force_clock_stay_on_ioctl(filp, cmd, arg);
2219 case NVAVP_IOCTL_ENABLE_AUDIO_CLOCKS:
2220 if (copy_from_user(&config, (void __user *)arg,
2221 sizeof(struct nvavp_clock_args))) {
2225 ret = nvavp_enable_audio_clocks(clientctx, config.id);
2227 case NVAVP_IOCTL_DISABLE_AUDIO_CLOCKS:
2228 if (copy_from_user(&config, (void __user *)arg,
2229 sizeof(struct nvavp_clock_args))) {
2233 ret = nvavp_disable_audio_clocks(clientctx, config.id);
2235 case NVAVP_IOCTL_SET_MIN_ONLINE_CPUS:
2236 ret = nvavp_set_min_online_cpus_ioctl(filp, cmd, arg);
2238 case NVAVP_IOCTL_MAP_IOVA:
2239 ret = nvavp_map_iova(filp, cmd, arg);
2241 case NVAVP_IOCTL_UNMAP_IOVA:
2242 ret = nvavp_unmap_iova(filp, arg);
2244 case NVAVP_IOCTL_CHANNEL_OPEN:
2245 ret = nvavp_channel_open(filp, (void *)buf);
2247 ret = copy_to_user((void __user *)arg, buf,
2250 case NVAVP_IOCTL_VPR_FLOOR_SIZE:
2251 if (copy_from_user(&floor_size, (void __user *)arg,
2252 sizeof(floor_size))) {
2256 ret = dma_set_resizable_heap_floor_size(&tegra_vpr_dev,
2266 #ifdef CONFIG_COMPAT
2267 static long tegra_nvavp_compat_ioctl(struct file *filp, unsigned int cmd,
2272 if (_IOC_TYPE(cmd) != NVAVP_IOCTL_MAGIC ||
2273 _IOC_NR(cmd) < NVAVP_IOCTL_MIN_NR ||
2274 _IOC_NR(cmd) > NVAVP_IOCTL_MAX_NR)
2278 case NVAVP_IOCTL_PUSH_BUFFER_SUBMIT32:
2279 ret = nvavp_pushbuffer_submit_compat_ioctl(filp, cmd, arg);
2282 ret = tegra_nvavp_ioctl(filp, cmd, arg);
2289 static const struct file_operations tegra_video_nvavp_fops = {
2290 .owner = THIS_MODULE,
2291 .open = tegra_nvavp_video_open,
2292 .release = tegra_nvavp_video_release,
2293 .unlocked_ioctl = tegra_nvavp_ioctl,
2294 #ifdef CONFIG_COMPAT
2295 .compat_ioctl = tegra_nvavp_compat_ioctl,
2299 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
2300 static const struct file_operations tegra_audio_nvavp_fops = {
2301 .owner = THIS_MODULE,
2302 .open = tegra_nvavp_audio_open,
2303 .release = tegra_nvavp_audio_release,
2304 .unlocked_ioctl = tegra_nvavp_ioctl,
2305 #ifdef CONFIG_COMPAT
2306 .compat_ioctl = tegra_nvavp_compat_ioctl,
2311 static ssize_t boost_sclk_show(struct device *dev,
2312 struct device_attribute *attr, char *buf)
2314 return snprintf(buf, PAGE_SIZE, "%d\n", boost_sclk);
2317 static ssize_t boost_sclk_store(struct device *dev,
2318 struct device_attribute *attr, const char *buf, size_t count)
2320 struct platform_device *ndev = to_platform_device(dev);
2321 struct nvavp_info *nvavp = platform_get_drvdata(ndev);
2322 unsigned long val = 0;
2324 if (kstrtoul(buf, 10, &val) < 0)
2328 clk_set_rate(nvavp->sclk, SCLK_BOOST_RATE);
2330 clk_set_rate(nvavp->sclk, 0);
2337 DEVICE_ATTR(boost_sclk, S_IRUGO | S_IWUSR, boost_sclk_show, boost_sclk_store);
2340 NVAVP_USE_SMMU = (1 << 0),
2341 NVAVP_USE_CARVEOUT = (1 << 1)
2344 static int nvavp_reserve_os_mem(struct nvavp_info *nvavp, dma_addr_t phys)
2347 if (!pfn_valid(__phys_to_pfn(phys))) {
2348 if (memblock_reserve(phys, SZ_1M)) {
2349 dev_err(&nvavp->nvhost_dev->dev,
2350 "failed to reserve mem block %lx\n",
2351 (unsigned long)phys);
2359 static struct of_device_id tegra_nvavp_of_match[] = {
2360 { .compatible = "nvidia,tegra30-nvavp", NULL },
2361 { .compatible = "nvidia,tegra114-nvavp", NULL },
2362 { .compatible = "nvidia,tegra124-nvavp", NULL },
2367 static int tegra_nvavp_probe(struct platform_device *ndev)
2369 struct nvavp_info *nvavp;
2371 enum nvavp_heap heap_mask;
2372 int ret = 0, channel_id;
2373 struct device_node *np;
2375 np = ndev->dev.of_node;
2377 irq = platform_get_irq(ndev, 0);
2378 nvavp_reg_base = of_iomap(np, 0);
2380 irq = platform_get_irq_byname(ndev, "mbox_from_nvavp_pending");
2384 dev_err(&ndev->dev, "invalid nvhost data\n");
2388 if (!nvavp_reg_base) {
2389 dev_err(&ndev->dev, "unable to map, memory mapped IO\n");
2393 /* Set the max segment size supported. */
2394 ndev->dev.dma_parms = &nvavp_dma_parameters;
2396 nvavp = kzalloc(sizeof(struct nvavp_info), GFP_KERNEL);
2398 dev_err(&ndev->dev, "cannot allocate avp_info\n");
2402 memset(nvavp, 0, sizeof(*nvavp));
2404 #if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU) /* Tegra2 with AVP MMU */
2405 heap_mask = NVAVP_USE_CARVEOUT;
2406 #elif defined(CONFIG_TEGRA_AVP_KERNEL_ON_SMMU) /* Tegra3 with SMMU */
2407 heap_mask = NVAVP_USE_SMMU;
2408 #else /* nvmem= carveout */
2409 heap_mask = NVAVP_USE_CARVEOUT;
2411 switch (heap_mask) {
2412 case NVAVP_USE_SMMU:
2414 nvavp->os_info.phys = 0x8ff00000;
2415 nvavp->os_info.data = dma_alloc_at_coherent(
2418 &nvavp->os_info.phys,
2421 if (!nvavp->os_info.data || nvavp->os_info.phys != 0x8ff00000) {
2422 nvavp->os_info.phys = 0x0ff00000;
2423 nvavp->os_info.data = dma_alloc_at_coherent(
2426 &nvavp->os_info.phys,
2429 if (!nvavp->os_info.data ||
2430 nvavp->os_info.phys != 0x0ff00000) {
2431 dev_err(&ndev->dev, "cannot allocate IOVA memory\n");
2436 dev_info(&ndev->dev,
2437 "allocated IOVA at %lx for AVP os\n",
2438 (unsigned long)nvavp->os_info.phys);
2440 case NVAVP_USE_CARVEOUT:
2441 if (!nvavp_reserve_os_mem(nvavp, 0x8e000000))
2442 nvavp->os_info.phys = 0x8e000000;
2443 else if (!nvavp_reserve_os_mem(nvavp, 0xf7e00000))
2444 nvavp->os_info.phys = 0xf7e00000;
2445 else if (!nvavp_reserve_os_mem(nvavp, 0x9e000000))
2446 nvavp->os_info.phys = 0x9e000000;
2447 else if (!nvavp_reserve_os_mem(nvavp, 0xbe000000))
2448 nvavp->os_info.phys = 0xbe000000;
2450 dev_err(&nvavp->nvhost_dev->dev,
2451 "cannot find nvmem= carveout to load AVP os\n");
2452 dev_err(&nvavp->nvhost_dev->dev,
2453 "check kernel command line "
2454 "to see if nvmem= is defined\n");
2459 dev_info(&ndev->dev,
2460 "allocated carveout memory at %lx for AVP os\n",
2461 (unsigned long)nvavp->os_info.phys);
2464 dev_err(&ndev->dev, "invalid/non-supported heap for AVP os\n");
2466 goto err_get_syncpt;
2469 nvavp->mbox_from_avp_pend_irq = irq;
2470 mutex_init(&nvavp->open_lock);
2472 for (channel_id = 0; channel_id < NVAVP_NUM_CHANNELS; channel_id++)
2473 mutex_init(&nvavp->channel_info[channel_id].pushbuffer_lock);
2475 /* TODO DO NOT USE NVAVP DEVICE */
2476 nvavp->cop_clk = clk_get(&ndev->dev, "cop");
2477 if (IS_ERR(nvavp->cop_clk)) {
2478 dev_err(&ndev->dev, "cannot get cop clock\n");
2480 goto err_get_cop_clk;
2483 nvavp->vde_clk = clk_get(&ndev->dev, "vde");
2484 if (IS_ERR(nvavp->vde_clk)) {
2485 dev_err(&ndev->dev, "cannot get vde clock\n");
2487 goto err_get_vde_clk;
2490 nvavp->bsev_clk = clk_get(&ndev->dev, "bsev");
2491 if (IS_ERR(nvavp->bsev_clk)) {
2492 dev_err(&ndev->dev, "cannot get bsev clock\n");
2494 goto err_get_bsev_clk;
2497 nvavp->sclk = clk_get(&ndev->dev, "sclk");
2498 if (IS_ERR(nvavp->sclk)) {
2499 dev_err(&ndev->dev, "cannot get avp.sclk clock\n");
2504 nvavp->emc_clk = clk_get(&ndev->dev, "emc");
2505 if (IS_ERR(nvavp->emc_clk)) {
2506 dev_err(&ndev->dev, "cannot get emc clock\n");
2508 goto err_get_emc_clk;
2511 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
2512 nvavp->bsea_clk = clk_get(&ndev->dev, "bsea");
2513 if (IS_ERR(nvavp->bsea_clk)) {
2514 dev_err(&ndev->dev, "cannot get bsea clock\n");
2516 goto err_get_bsea_clk;
2519 nvavp->vcp_clk = clk_get(&ndev->dev, "vcp");
2520 if (IS_ERR(nvavp->vcp_clk)) {
2521 dev_err(&ndev->dev, "cannot get vcp clock\n");
2523 goto err_get_vcp_clk;
2527 nvavp->clk_enabled = 0;
2528 nvavp_halt_avp(nvavp);
2530 INIT_WORK(&nvavp->clock_disable_work, clock_disable_handler);
2532 nvavp->video_misc_dev.minor = MISC_DYNAMIC_MINOR;
2533 nvavp->video_misc_dev.name = "tegra_avpchannel";
2534 nvavp->video_misc_dev.fops = &tegra_video_nvavp_fops;
2535 nvavp->video_misc_dev.mode = S_IRWXUGO;
2536 nvavp->video_misc_dev.parent = &ndev->dev;
2538 ret = misc_register(&nvavp->video_misc_dev);
2540 dev_err(&ndev->dev, "unable to register misc device!\n");
2544 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
2545 INIT_WORK(&nvavp->app_notify_work, app_notify_handler);
2546 nvavp->audio_misc_dev.minor = MISC_DYNAMIC_MINOR;
2547 nvavp->audio_misc_dev.name = "tegra_audio_avpchannel";
2548 nvavp->audio_misc_dev.fops = &tegra_audio_nvavp_fops;
2549 nvavp->audio_misc_dev.mode = S_IRWXUGO;
2550 nvavp->audio_misc_dev.parent = &ndev->dev;
2552 ret = misc_register(&nvavp->audio_misc_dev);
2554 dev_err(&ndev->dev, "unable to register misc device!\n");
2555 goto err_audio_misc_reg;
2559 ret = request_irq(irq, nvavp_mbox_pending_isr, 0,
2560 TEGRA_NVAVP_NAME, nvavp);
2562 dev_err(&ndev->dev, "cannot register irq handler\n");
2563 goto err_req_irq_pend;
2565 disable_irq(nvavp->mbox_from_avp_pend_irq);
2567 nvavp->nvhost_dev = ndev;
2568 platform_set_drvdata(ndev, nvavp);
2570 tegra_pd_add_device(&ndev->dev);
2571 pm_runtime_use_autosuspend(&ndev->dev);
2572 pm_runtime_set_autosuspend_delay(&ndev->dev, 2000);
2573 pm_runtime_enable(&ndev->dev);
2575 ret = device_create_file(&ndev->dev, &dev_attr_boost_sclk);
2578 "%s: device_create_file failed\n", __func__);
2579 goto err_req_irq_pend;
2581 nvavp_info_ctx = nvavp;
2583 /* Add PM QoS request but leave it as default value */
2584 pm_qos_add_request(&nvavp->min_cpu_freq_req,
2585 PM_QOS_CPU_FREQ_MIN,
2586 PM_QOS_DEFAULT_VALUE);
2587 pm_qos_add_request(&nvavp->min_online_cpus_req,
2588 PM_QOS_MIN_ONLINE_CPUS,
2589 PM_QOS_DEFAULT_VALUE);
2594 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
2595 misc_deregister(&nvavp->audio_misc_dev);
2598 misc_deregister(&nvavp->video_misc_dev);
2600 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
2601 clk_put(nvavp->vcp_clk);
2603 clk_put(nvavp->bsea_clk);
2606 clk_put(nvavp->emc_clk);
2608 clk_put(nvavp->sclk);
2610 clk_put(nvavp->bsev_clk);
2612 clk_put(nvavp->vde_clk);
2614 clk_put(nvavp->cop_clk);
2621 static int tegra_nvavp_remove(struct platform_device *ndev)
2623 struct nvavp_info *nvavp = platform_get_drvdata(ndev);
2628 mutex_lock(&nvavp->open_lock);
2629 if (nvavp->refcount) {
2630 mutex_unlock(&nvavp->open_lock);
2633 mutex_unlock(&nvavp->open_lock);
2635 nvavp_unload_ucode(nvavp);
2636 nvavp_unload_os(nvavp);
2638 device_remove_file(&ndev->dev, &dev_attr_boost_sclk);
2640 misc_deregister(&nvavp->video_misc_dev);
2642 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
2643 misc_deregister(&nvavp->audio_misc_dev);
2644 clk_put(nvavp->vcp_clk);
2645 clk_put(nvavp->bsea_clk);
2647 clk_put(nvavp->bsev_clk);
2648 clk_put(nvavp->vde_clk);
2649 clk_put(nvavp->cop_clk);
2651 clk_put(nvavp->emc_clk);
2652 clk_put(nvavp->sclk);
2654 if (!IS_ERR_OR_NULL(&nvavp->min_cpu_freq_req)) {
2655 pm_qos_update_request(&nvavp->min_cpu_freq_req,
2656 PM_QOS_CPU_FREQ_MIN_DEFAULT_VALUE);
2657 pm_qos_remove_request(&nvavp->min_cpu_freq_req);
2659 if (!IS_ERR_OR_NULL(&nvavp->min_online_cpus_req)) {
2660 pm_qos_update_request(&nvavp->min_online_cpus_req,
2661 PM_QOS_CPU_FREQ_MIN_DEFAULT_VALUE);
2662 pm_qos_remove_request(&nvavp->min_online_cpus_req);
2670 static int tegra_nvavp_runtime_suspend(struct device *dev)
2672 struct platform_device *pdev = to_platform_device(dev);
2673 struct nvavp_info *nvavp = platform_get_drvdata(pdev);
2676 mutex_lock(&nvavp->open_lock);
2678 if (nvavp->refcount) {
2679 if (!nvavp->clk_enabled) {
2680 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
2681 if (nvavp_check_idle(nvavp, NVAVP_AUDIO_CHANNEL))
2682 nvavp_uninit(nvavp);
2686 nvavp_uninit(nvavp);
2694 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
2695 trace_tegra_nvavp_runtime_suspend(nvavp->refcount, nvavp->video_refcnt,
2696 nvavp->audio_refcnt);
2698 trace_tegra_nvavp_runtime_suspend(nvavp->refcount,
2699 nvavp->video_refcnt, 0);
2702 mutex_unlock(&nvavp->open_lock);
2707 static int tegra_nvavp_runtime_resume(struct device *dev)
2709 struct platform_device *pdev = to_platform_device(dev);
2710 struct nvavp_info *nvavp = platform_get_drvdata(pdev);
2712 mutex_lock(&nvavp->open_lock);
2714 if (nvavp->video_refcnt)
2715 nvavp_init(nvavp, NVAVP_VIDEO_CHANNEL);
2716 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
2717 if (nvavp->audio_refcnt)
2718 nvavp_init(nvavp, NVAVP_AUDIO_CHANNEL);
2721 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
2722 trace_tegra_nvavp_runtime_resume(nvavp->refcount, nvavp->video_refcnt,
2723 nvavp->audio_refcnt);
2725 trace_tegra_nvavp_runtime_resume(nvavp->refcount,
2726 nvavp->video_refcnt, 0);
2729 mutex_unlock(&nvavp->open_lock);
2734 static int tegra_nvavp_resume(struct device *dev)
2736 struct platform_device *pdev = to_platform_device(dev);
2737 struct nvavp_info *nvavp = platform_get_drvdata(pdev);
2739 /* To balance the unpowergate in suspend routine */
2740 nvavp_powergate_vde(nvavp);
2742 nvavp_halt_avp(nvavp);
2743 tegra_nvavp_runtime_resume(dev);
2745 #ifdef CONFIG_TRUSTED_LITTLE_KERNEL
2746 nvavp_clks_enable(nvavp);
2747 te_restore_keyslots();
2748 nvavp_clks_disable(nvavp);
2754 static int tegra_nvavp_suspend(struct device *dev)
2756 struct platform_device *pdev = to_platform_device(dev);
2757 struct nvavp_info *nvavp = platform_get_drvdata(pdev);
2760 ret = tegra_nvavp_runtime_suspend(dev);
2764 /* WAR: Leave partition vde on before suspend so that access
2765 * to BSEV registers immediatly after LP0 exit won't fail.
2767 nvavp_unpowergate_vde(nvavp);
2772 static const struct dev_pm_ops nvavp_pm_ops = {
2773 .runtime_suspend = tegra_nvavp_runtime_suspend,
2774 .runtime_resume = tegra_nvavp_runtime_resume,
2775 .suspend = tegra_nvavp_suspend,
2776 .resume = tegra_nvavp_resume,
2779 #define NVAVP_PM_OPS (&nvavp_pm_ops)
2781 #else /* CONFIG_PM */
2783 #define NVAVP_PM_OPS NULL
2785 #endif /* CONFIG_PM */
2787 static struct platform_driver tegra_nvavp_driver = {
2789 .name = TEGRA_NVAVP_NAME,
2790 .owner = THIS_MODULE,
2792 .of_match_table = of_match_ptr(tegra_nvavp_of_match),
2794 .probe = tegra_nvavp_probe,
2795 .remove = tegra_nvavp_remove,
2798 static int __init tegra_nvavp_init(void)
2800 return platform_driver_register(&tegra_nvavp_driver);
2803 static void __exit tegra_nvavp_exit(void)
2805 platform_driver_unregister(&tegra_nvavp_driver);
2808 module_init(tegra_nvavp_init);
2809 module_exit(tegra_nvavp_exit);
2811 MODULE_AUTHOR("NVIDIA");
2812 MODULE_DESCRIPTION("Channel based AVP driver for Tegra");
2813 MODULE_VERSION("1.0");
2814 MODULE_LICENSE("Dual BSD/GPL");