2 * drivers/media/video/tegra/nvavp/nvavp_dev.c
4 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
11 #define CREATE_TRACE_POINTS
12 #include <trace/events/nvavp.h>
14 #include <linux/uaccess.h>
15 #include <linux/clk.h>
16 #include <linux/compat.h>
17 #include <linux/completion.h>
18 #include <linux/delay.h>
19 #include <linux/dma-buf.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/err.h>
22 #include <linux/firmware.h>
24 #include <linux/interrupt.h>
26 #include <linux/ioctl.h>
27 #include <linux/irq.h>
28 #include <linux/kref.h>
29 #include <linux/list.h>
30 #include <linux/miscdevice.h>
31 #include <linux/module.h>
32 #include <linux/mutex.h>
33 #include <linux/nvhost.h>
34 #include <linux/platform_device.h>
35 #include <linux/rbtree.h>
36 #include <linux/seq_file.h>
37 #include <linux/slab.h>
38 #include <linux/string.h>
39 #include <linux/tegra_nvavp.h>
40 #include <linux/types.h>
41 #include <linux/vmalloc.h>
42 #include <linux/workqueue.h>
43 #include <linux/pm_runtime.h>
44 #include <linux/clk/tegra.h>
45 #include <linux/tegra-powergate.h>
46 #include <linux/irqchip/tegra.h>
47 #include <linux/sched.h>
48 #include <linux/memblock.h>
49 #include <linux/anon_inodes.h>
50 #include <linux/tegra_pm_domains.h>
53 #include <linux/pm_qos.h>
56 #include <linux/of_device.h>
57 #include <linux/of_platform.h>
58 #include <linux/of_address.h>
59 #include <linux/tegra-timer.h>
61 #if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU)
62 #include "../avp/headavp.h"
66 #define TEGRA_NVAVP_NAME "nvavp"
68 #define NVAVP_PUSHBUFFER_SIZE 4096
70 #define NVAVP_PUSHBUFFER_MIN_UPDATE_SPACE (sizeof(u32) * 3)
72 static void __iomem *nvavp_reg_base;
74 #define TEGRA_NVAVP_RESET_VECTOR_ADDR (nvavp_reg_base + 0xe200)
76 #define FLOW_CTRL_HALT_COP_EVENTS (nvavp_reg_base + 0x6000 + 0x4)
77 #define FLOW_MODE_STOP (0x2 << 29)
78 #define FLOW_MODE_NONE 0x0
80 #define NVAVP_OS_INBOX (nvavp_reg_base + 0x10)
81 #define NVAVP_OS_OUTBOX (nvavp_reg_base + 0x20)
83 #define NVAVP_INBOX_VALID (1 << 29)
85 /* AVP behavior params */
86 #define NVAVP_OS_IDLE_TIMEOUT 100 /* milli-seconds */
87 #define NVAVP_OUTBOX_WRITE_TIMEOUT 1000 /* milli-seconds */
89 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
90 /* Two control channels: Audio and Video channels */
91 #define NVAVP_NUM_CHANNELS 2
93 #define NVAVP_AUDIO_CHANNEL 1
95 #define IS_AUDIO_CHANNEL_ID(channel_id) (channel_id == NVAVP_AUDIO_CHANNEL ? 1: 0)
97 #define NVAVP_NUM_CHANNELS 1
100 /* Channel ID 0 represents the Video channel control area */
101 #define NVAVP_VIDEO_CHANNEL 0
102 /* Channel ID 1 represents the Audio channel control area */
104 #define IS_VIDEO_CHANNEL_ID(channel_id) (channel_id == NVAVP_VIDEO_CHANNEL ? 1: 0)
106 #define SCLK_BOOST_RATE 40000000
108 static bool boost_sclk;
109 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
110 static bool audio_enabled;
113 struct nvavp_channel {
114 struct mutex pushbuffer_lock;
115 dma_addr_t pushbuf_phys;
119 struct nv_e276_control *os_control;
124 struct clk *bsev_clk;
127 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
128 struct clk *bsea_clk;
135 unsigned long sclk_rate;
136 unsigned long emc_clk_rate;
138 int mbox_from_avp_pend_irq;
140 struct mutex open_lock;
142 int video_initialized;
144 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
145 int audio_initialized;
147 struct work_struct app_notify_work;
148 void (*audio_notify)(void);
150 struct work_struct clock_disable_work;
153 struct nvavp_os_info os_info;
155 /* ucode information */
156 struct nvavp_ucode_info ucode_info;
158 /* client to change min cpu freq rate*/
159 struct pm_qos_request min_cpu_freq_req;
161 /* client to change number of min online cpus*/
162 struct pm_qos_request min_online_cpus_req;
164 struct nvavp_channel channel_info[NVAVP_NUM_CHANNELS];
171 struct platform_device *nvhost_dev;
172 struct miscdevice video_misc_dev;
173 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
174 struct miscdevice audio_misc_dev;
176 struct task_struct *init_task;
179 struct nvavp_clientctx {
180 struct nvavp_pushbuffer_submit_hdr submit_hdr;
181 struct nvavp_reloc relocs[NVAVP_MAX_RELOCATION_COUNT];
183 struct nvavp_info *nvavp;
186 spinlock_t iova_lock;
187 struct rb_root iova_handles;
189 static struct nvavp_info *nvavp_info_ctx;
191 static int nvavp_runtime_get(struct nvavp_info *nvavp)
193 if (nvavp->init_task != current) {
194 mutex_unlock(&nvavp->open_lock);
195 pm_runtime_get_sync(&nvavp->nvhost_dev->dev);
196 mutex_lock(&nvavp->open_lock);
199 pm_runtime_get_noresume(&nvavp->nvhost_dev->dev);
204 static void nvavp_runtime_put(struct nvavp_info *nvavp)
206 pm_runtime_mark_last_busy(&nvavp->nvhost_dev->dev);
207 pm_runtime_put_autosuspend(&nvavp->nvhost_dev->dev);
210 static struct device_dma_parameters nvavp_dma_parameters = {
211 .max_segment_size = UINT_MAX,
214 struct nvavp_iova_info {
218 struct dma_buf *dmabuf;
219 struct dma_buf_attachment *attachment;
220 struct sg_table *sgt;
224 * Unmap's dmabuf and removes the iova info from rb tree
225 * Call with client iova_lock held.
227 static void nvavp_remove_iova_info_locked(
228 struct nvavp_clientctx *clientctx,
229 struct nvavp_iova_info *b)
231 struct nvavp_info *nvavp = clientctx->nvavp;
233 dev_dbg(&nvavp->nvhost_dev->dev,
234 "remove iova addr (0x%lx))\n", (unsigned long)b->addr);
235 dma_buf_unmap_attachment(b->attachment,
236 b->sgt, DMA_BIDIRECTIONAL);
237 dma_buf_detach(b->dmabuf, b->attachment);
238 dma_buf_put(b->dmabuf);
239 rb_erase(&b->node, &clientctx->iova_handles);
244 * Searches the given addr in rb tree and return valid pointer if present
245 * Call with client iova_lock held.
247 static struct nvavp_iova_info *nvavp_search_iova_info_locked(
248 struct nvavp_clientctx *clientctx, struct dma_buf *dmabuf,
249 struct rb_node **curr_parent)
251 struct rb_node *parent = NULL;
252 struct rb_node **p = &clientctx->iova_handles.rb_node;
255 struct nvavp_iova_info *b;
257 b = rb_entry(parent, struct nvavp_iova_info, node);
258 if (b->dmabuf == dmabuf)
260 else if (dmabuf > b->dmabuf)
261 p = &parent->rb_right;
263 p = &parent->rb_left;
265 *curr_parent = parent;
270 * Adds a newly-created iova info handle to the rb tree
271 * Call with client iova_lock held.
273 static void nvavp_add_iova_info_locked(struct nvavp_clientctx *clientctx,
274 struct nvavp_iova_info *h, struct rb_node *parent)
276 struct nvavp_iova_info *b;
277 struct nvavp_info *nvavp = clientctx->nvavp;
278 struct rb_node **p = &clientctx->iova_handles.rb_node;
280 dev_dbg(&nvavp->nvhost_dev->dev,
281 "add iova addr (0x%lx))\n", (unsigned long)h->addr);
284 b = rb_entry(parent, struct nvavp_iova_info, node);
285 if (h->dmabuf > b->dmabuf)
286 p = &parent->rb_right;
288 p = &parent->rb_left;
290 rb_link_node(&h->node, parent, p);
291 rb_insert_color(&h->node, &clientctx->iova_handles);
295 * Maps and adds the iova address if already not present in rb tree
296 * if present, update ref count and return iova return iova address
298 static int nvavp_get_iova_addr(struct nvavp_clientctx *clientctx,
299 struct dma_buf *dmabuf, dma_addr_t *addr)
301 struct nvavp_info *nvavp = clientctx->nvavp;
302 struct nvavp_iova_info *h;
303 struct nvavp_iova_info *b = NULL;
304 struct rb_node *curr_parent = NULL;
307 spin_lock(&clientctx->iova_lock);
308 b = nvavp_search_iova_info_locked(clientctx, dmabuf, &curr_parent);
310 /* dmabuf already present in rb tree */
313 dev_dbg(&nvavp->nvhost_dev->dev,
314 "found iova addr (0x%pa) ref count(%d))\n",
315 &(b->addr), atomic_read(&b->ref));
318 spin_unlock(&clientctx->iova_lock);
320 /* create new iova_info node */
321 h = kzalloc(sizeof(*h), GFP_KERNEL);
326 h->attachment = dma_buf_attach(dmabuf, &nvavp->nvhost_dev->dev);
327 if (IS_ERR(h->attachment)) {
328 dev_err(&nvavp->nvhost_dev->dev, "cannot attach dmabuf\n");
329 ret = PTR_ERR(h->attachment);
333 h->sgt = dma_buf_map_attachment(h->attachment, DMA_BIDIRECTIONAL);
334 if (IS_ERR(h->sgt)) {
335 dev_err(&nvavp->nvhost_dev->dev, "cannot map dmabuf\n");
336 ret = PTR_ERR(h->sgt);
340 h->addr = sg_dma_address(h->sgt->sgl);
341 atomic_set(&h->ref, 1);
343 spin_lock(&clientctx->iova_lock);
344 b = nvavp_search_iova_info_locked(clientctx, dmabuf, &curr_parent);
346 dev_dbg(&nvavp->nvhost_dev->dev,
347 "found iova addr (0x%pa) ref count(%d))\n",
348 &(b->addr), atomic_read(&b->ref));
351 spin_unlock(&clientctx->iova_lock);
354 nvavp_add_iova_info_locked(clientctx, h, curr_parent);
358 spin_unlock(&clientctx->iova_lock);
361 dma_buf_unmap_attachment(h->attachment, h->sgt, DMA_BIDIRECTIONAL);
363 dma_buf_detach(dmabuf, h->attachment);
371 * Release the given iova address if it is last client otherwise dec ref count.
373 static void nvavp_release_iova_addr(struct nvavp_clientctx *clientctx,
374 struct dma_buf *dmabuf, dma_addr_t addr)
376 struct nvavp_info *nvavp = clientctx->nvavp;
377 struct nvavp_iova_info *b = NULL;
378 struct rb_node *curr_parent;
380 spin_lock(&clientctx->iova_lock);
381 b = nvavp_search_iova_info_locked(clientctx, dmabuf, &curr_parent);
383 dev_err(&nvavp->nvhost_dev->dev,
384 "error iova addr (0x%pa) is not found\n", &addr);
387 /* if it is last reference, release iova info */
388 if (atomic_sub_return(1, &b->ref) == 0)
389 nvavp_remove_iova_info_locked(clientctx, b);
391 spin_unlock(&clientctx->iova_lock);
395 * Release all the iova addresses in rb tree
397 static void nvavp_remove_iova_mapping(struct nvavp_clientctx *clientctx)
399 struct rb_node *p = NULL;
400 struct nvavp_iova_info *b;
402 spin_lock(&clientctx->iova_lock);
403 while ((p = rb_first(&clientctx->iova_handles))) {
404 b = rb_entry(p, struct nvavp_iova_info, node);
405 nvavp_remove_iova_info_locked(clientctx, b);
407 spin_unlock(&clientctx->iova_lock);
410 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
411 static int nvavp_get_audio_init_status(struct nvavp_info *nvavp)
413 return nvavp->audio_initialized;
416 static void nvavp_set_audio_init_status(struct nvavp_info *nvavp, int status)
418 nvavp->audio_initialized = status;
422 static void nvavp_set_video_init_status(struct nvavp_info *nvavp, int status)
424 nvavp->video_initialized = status;
427 static int nvavp_get_video_init_status(struct nvavp_info *nvavp)
429 return nvavp->video_initialized;
432 static struct nvavp_channel *nvavp_get_channel_info(struct nvavp_info *nvavp, int channel_id)
434 return &nvavp->channel_info[channel_id];
437 static int nvavp_outbox_write(unsigned int val)
439 unsigned int wait_ms = 0;
441 while (readl(NVAVP_OS_OUTBOX)) {
442 usleep_range(1000, 2000);
443 if (++wait_ms > NVAVP_OUTBOX_WRITE_TIMEOUT) {
444 pr_err("No update from AVP in %d ms\n", wait_ms);
448 writel(val, NVAVP_OS_OUTBOX);
452 static void nvavp_set_channel_control_area(struct nvavp_info *nvavp, int channel_id)
454 struct nv_e276_control *control;
455 struct nvavp_os_info *os = &nvavp->os_info;
458 struct nvavp_channel *channel_info;
460 ptr = os->data + os->control_offset + (sizeof(struct nv_e276_control) * channel_id);
462 channel_info = nvavp_get_channel_info(nvavp, channel_id);
463 channel_info->os_control = (struct nv_e276_control *)ptr;
465 control = channel_info->os_control;
467 /* init get and put pointers */
468 writel(0x0, &control->put);
469 writel(0x0, &control->get);
471 pr_debug("nvavp_set_channel_control_area for channel_id (%d):\
472 control->put (0x%08x) control->get (0x%08x)\n",
473 channel_id, (u32) &control->put, (u32) &control->get);
475 /* Clock gating disabled for video and enabled for audio */
476 if (IS_VIDEO_CHANNEL_ID(channel_id))
477 writel(0x1, &control->idle_clk_enable);
479 writel(0x0, &control->idle_clk_enable);
481 /* Disable iram clock gating */
482 writel(0x0, &control->iram_clk_gating);
484 /* enable avp idle timeout interrupt */
485 writel(0x1, &control->idle_notify_enable);
486 writel(NVAVP_OS_IDLE_TIMEOUT, &control->idle_notify_delay);
488 #if defined(CONFIG_ARCH_TEGRA_11x_SOC) || defined(CONFIG_ARCH_TEGRA_14x_SOC)
489 /* enable sync pt trap enable for avp */
490 if (IS_VIDEO_CHANNEL_ID(channel_id))
491 writel(0x1, &control->sync_pt_incr_trap_enable);
494 /* init dma start and end pointers */
495 writel(channel_info->pushbuf_phys, &control->dma_start);
496 writel((channel_info->pushbuf_phys + NVAVP_PUSHBUFFER_SIZE),
499 writel(0x00, &channel_info->pushbuf_index);
500 temp = NVAVP_PUSHBUFFER_SIZE - NVAVP_PUSHBUFFER_MIN_UPDATE_SPACE;
501 writel(temp, &channel_info->pushbuf_fence);
504 static struct clk *nvavp_clk_get(struct nvavp_info *nvavp, int id)
509 if (id == NVAVP_MODULE_ID_AVP)
511 if (id == NVAVP_MODULE_ID_VDE)
512 return nvavp->vde_clk;
513 if (id == NVAVP_MODULE_ID_EMC)
514 return nvavp->emc_clk;
519 static int nvavp_powergate_vde(struct nvavp_info *nvavp)
523 dev_dbg(&nvavp->nvhost_dev->dev, "%s++\n", __func__);
526 ret = tegra_powergate_partition(TEGRA_POWERGATE_VDEC);
528 dev_err(&nvavp->nvhost_dev->dev,
529 "%s: powergate failed\n",
535 static int nvavp_unpowergate_vde(struct nvavp_info *nvavp)
539 dev_dbg(&nvavp->nvhost_dev->dev, "%s++\n", __func__);
541 /* UnPowergate VDE */
542 ret = tegra_unpowergate_partition(TEGRA_POWERGATE_VDEC);
544 dev_err(&nvavp->nvhost_dev->dev,
545 "%s: unpowergate failed\n",
551 static void nvavp_clks_enable(struct nvavp_info *nvavp)
553 if (nvavp->clk_enabled == 0) {
554 nvavp_runtime_get(nvavp);
555 nvavp->clk_enabled++;
556 nvhost_module_busy_ext(nvavp->nvhost_dev);
557 clk_prepare_enable(nvavp->bsev_clk);
558 clk_prepare_enable(nvavp->vde_clk);
559 nvavp_unpowergate_vde(nvavp);
560 clk_set_rate(nvavp->emc_clk, nvavp->emc_clk_rate);
561 clk_set_rate(nvavp->sclk, nvavp->sclk_rate);
562 dev_dbg(&nvavp->nvhost_dev->dev, "%s: setting sclk to %lu\n",
563 __func__, nvavp->sclk_rate);
564 dev_dbg(&nvavp->nvhost_dev->dev, "%s: setting emc_clk to %lu\n",
565 __func__, nvavp->emc_clk_rate);
567 nvavp->clk_enabled++;
571 static void nvavp_clks_disable(struct nvavp_info *nvavp)
573 if ((--nvavp->clk_enabled == 0) && !nvavp->stay_on) {
574 clk_disable_unprepare(nvavp->bsev_clk);
575 clk_disable_unprepare(nvavp->vde_clk);
576 clk_set_rate(nvavp->emc_clk, 0);
578 clk_set_rate(nvavp->sclk, SCLK_BOOST_RATE);
580 clk_set_rate(nvavp->sclk, 0);
581 nvavp_powergate_vde(nvavp);
582 nvhost_module_idle_ext(nvavp->nvhost_dev);
583 nvavp_runtime_put(nvavp);
584 dev_dbg(&nvavp->nvhost_dev->dev, "%s: resetting emc_clk "
585 "and sclk\n", __func__);
589 static u32 nvavp_check_idle(struct nvavp_info *nvavp, int channel_id)
591 struct nvavp_channel *channel_info = nvavp_get_channel_info(nvavp, channel_id);
592 struct nv_e276_control *control = channel_info->os_control;
594 return (control->put == control->get) ? 1 : 0;
597 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
598 static void app_notify_handler(struct work_struct *work)
600 struct nvavp_info *nvavp;
602 nvavp = container_of(work, struct nvavp_info,
604 if (nvavp->audio_notify)
605 nvavp->audio_notify();
607 kobject_uevent(&nvavp->nvhost_dev->dev.kobj, KOBJ_CHANGE);
611 static void clock_disable_handler(struct work_struct *work)
613 struct nvavp_info *nvavp;
614 struct nvavp_channel *channel_info;
616 nvavp = container_of(work, struct nvavp_info,
619 channel_info = nvavp_get_channel_info(nvavp, NVAVP_VIDEO_CHANNEL);
620 mutex_lock(&channel_info->pushbuffer_lock);
621 mutex_lock(&nvavp->open_lock);
623 trace_nvavp_clock_disable_handler(channel_info->os_control->put,
624 channel_info->os_control->get,
627 if (nvavp_check_idle(nvavp, NVAVP_VIDEO_CHANNEL) && nvavp->pending) {
628 nvavp->pending = false;
629 nvavp_clks_disable(nvavp);
631 mutex_unlock(&nvavp->open_lock);
632 mutex_unlock(&channel_info->pushbuffer_lock);
635 static int nvavp_service(struct nvavp_info *nvavp)
637 struct nvavp_os_info *os = &nvavp->os_info;
641 inbox = readl(NVAVP_OS_INBOX);
642 if (!(inbox & NVAVP_INBOX_VALID))
645 if ((inbox & NVE276_OS_INTERRUPT_VIDEO_IDLE) && (!nvavp->stay_on))
646 schedule_work(&nvavp->clock_disable_work);
648 if (inbox & NVE276_OS_INTERRUPT_SYNCPT_INCR_TRAP) {
650 if (nvavp->syncpt_id == NVE276_OS_SYNCPT_INCR_TRAP_GET_SYNCPT(inbox))
651 nvhost_syncpt_cpu_incr_ext(
652 nvavp->nvhost_dev, nvavp->syncpt_id);
655 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
656 if (inbox & NVE276_OS_INTERRUPT_AUDIO_IDLE) {
658 audio_enabled = false;
659 nvavp_runtime_put(nvavp);
661 pr_debug("nvavp_service NVE276_OS_INTERRUPT_AUDIO_IDLE\n");
664 if (inbox & NVE276_OS_INTERRUPT_DEBUG_STRING) {
665 /* Should only occur with debug AVP OS builds */
666 debug_print = os->data;
667 debug_print += os->debug_offset;
668 dev_info(&nvavp->nvhost_dev->dev, "%s\n", debug_print);
670 if (inbox & (NVE276_OS_INTERRUPT_SEMAPHORE_AWAKEN |
671 NVE276_OS_INTERRUPT_EXECUTE_AWAKEN)) {
672 dev_info(&nvavp->nvhost_dev->dev,
673 "AVP awaken event (0x%x)\n", inbox);
675 if (inbox & NVE276_OS_INTERRUPT_AVP_FATAL_ERROR) {
676 dev_err(&nvavp->nvhost_dev->dev,
677 "fatal AVP error (0x%08X)\n", inbox);
679 if (inbox & NVE276_OS_INTERRUPT_AVP_BREAKPOINT)
680 dev_err(&nvavp->nvhost_dev->dev, "AVP breakpoint hit\n");
681 if (inbox & NVE276_OS_INTERRUPT_TIMEOUT)
682 dev_err(&nvavp->nvhost_dev->dev, "AVP timeout\n");
683 writel(inbox & NVAVP_INBOX_VALID, NVAVP_OS_INBOX);
685 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
686 if (inbox & NVE276_OS_INTERRUPT_APP_NOTIFY) {
687 pr_debug("nvavp_service NVE276_OS_INTERRUPT_APP_NOTIFY\n");
688 schedule_work(&nvavp->app_notify_work);
695 static irqreturn_t nvavp_mbox_pending_isr(int irq, void *data)
697 struct nvavp_info *nvavp = data;
699 nvavp_service(nvavp);
704 static void nvavp_halt_avp(struct nvavp_info *nvavp)
706 /* ensure the AVP is halted */
707 writel(FLOW_MODE_STOP, FLOW_CTRL_HALT_COP_EVENTS);
708 tegra_periph_reset_assert(nvavp->cop_clk);
710 writel(0, NVAVP_OS_OUTBOX);
711 writel(0, NVAVP_OS_INBOX);
714 static int nvavp_reset_avp(struct nvavp_info *nvavp, unsigned long reset_addr)
716 #if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU)
717 unsigned long stub_code_phys = virt_to_phys(_tegra_avp_boot_stub);
718 dma_addr_t stub_data_phys;
721 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
722 if (!(nvavp_check_idle(nvavp, NVAVP_AUDIO_CHANNEL)))
726 #if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU)
727 _tegra_avp_boot_stub_data.map_phys_addr = avp->kernel_phys;
728 _tegra_avp_boot_stub_data.jump_addr = reset_addr;
730 stub_data_phys = dma_map_single(NULL, &_tegra_avp_boot_stub_data,
731 sizeof(_tegra_avp_boot_stub_data),
734 reset_addr = (unsigned long)stub_data_phys;
736 writel(FLOW_MODE_STOP, FLOW_CTRL_HALT_COP_EVENTS);
738 writel(reset_addr, TEGRA_NVAVP_RESET_VECTOR_ADDR);
740 clk_prepare_enable(nvavp->sclk);
741 clk_prepare_enable(nvavp->emc_clk);
743 /* If sclk_rate and emc_clk is not set by user space,
744 * max clock in dvfs table will be used to get best performance.
746 nvavp->sclk_rate = ULONG_MAX;
747 nvavp->emc_clk_rate = ULONG_MAX;
749 tegra_periph_reset_assert(nvavp->cop_clk);
751 tegra_periph_reset_deassert(nvavp->cop_clk);
753 writel(FLOW_MODE_NONE, FLOW_CTRL_HALT_COP_EVENTS);
755 #if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU)
756 dma_unmap_single(NULL, stub_data_phys,
757 sizeof(_tegra_avp_boot_stub_data),
763 static void nvavp_halt_vde(struct nvavp_info *nvavp)
765 if (nvavp->clk_enabled && !nvavp->pending)
768 if (nvavp->pending) {
769 nvavp_clks_disable(nvavp);
770 nvavp->pending = false;
773 tegra_periph_reset_assert(nvavp->bsev_clk);
774 tegra_periph_reset_assert(nvavp->vde_clk);
777 static int nvavp_reset_vde(struct nvavp_info *nvavp)
779 if (nvavp->clk_enabled)
782 nvavp_clks_enable(nvavp);
784 tegra_periph_reset_assert(nvavp->bsev_clk);
786 tegra_periph_reset_deassert(nvavp->bsev_clk);
788 tegra_periph_reset_assert(nvavp->vde_clk);
790 tegra_periph_reset_deassert(nvavp->vde_clk);
793 * VDE clock is set to max freq by default.
794 * VDE clock can be set to different freq if needed
797 clk_set_rate(nvavp->vde_clk, ULONG_MAX);
799 nvavp_clks_disable(nvavp);
804 static int nvavp_pushbuffer_alloc(struct nvavp_info *nvavp, int channel_id)
808 struct nvavp_channel *channel_info = nvavp_get_channel_info(
811 channel_info->pushbuf_data = dma_zalloc_coherent(&nvavp->nvhost_dev->dev,
812 NVAVP_PUSHBUFFER_SIZE,
813 &channel_info->pushbuf_phys,
816 if (!channel_info->pushbuf_data) {
817 dev_err(&nvavp->nvhost_dev->dev,
818 "cannot alloc pushbuffer memory\n");
825 static void nvavp_pushbuffer_free(struct nvavp_info *nvavp)
829 for (channel_id = 0; channel_id < NVAVP_NUM_CHANNELS; channel_id++) {
830 if (nvavp->channel_info[channel_id].pushbuf_data) {
831 dma_free_coherent(&nvavp->nvhost_dev->dev,
832 NVAVP_PUSHBUFFER_SIZE,
833 nvavp->channel_info[channel_id].pushbuf_data,
834 nvavp->channel_info[channel_id].pushbuf_phys);
840 static int nvavp_pushbuffer_init(struct nvavp_info *nvavp)
845 for (channel_id = 0; channel_id < NVAVP_NUM_CHANNELS; channel_id++) {
846 ret = nvavp_pushbuffer_alloc(nvavp, channel_id);
848 dev_err(&nvavp->nvhost_dev->dev,
849 "unable to alloc pushbuffer\n");
852 nvavp_set_channel_control_area(nvavp, channel_id);
853 if (IS_VIDEO_CHANNEL_ID(channel_id)) {
854 nvavp->syncpt_id = NVSYNCPT_AVP_0;
855 if (!nvhost_syncpt_read_ext_check(nvavp->nvhost_dev,
856 nvavp->syncpt_id, &val))
857 nvavp->syncpt_value = val;
864 static void nvavp_pushbuffer_deinit(struct nvavp_info *nvavp)
866 nvavp_pushbuffer_free(nvavp);
869 static int nvavp_pushbuffer_update(struct nvavp_info *nvavp, u32 phys_addr,
870 u32 gather_count, struct nvavp_syncpt *syncpt,
871 u32 ext_ucode_flag, int channel_id)
873 struct nvavp_channel *channel_info;
874 struct nv_e276_control *control;
875 u32 gather_cmd, setucode_cmd, sync = 0;
877 u32 index, value = -1;
880 mutex_lock(&nvavp->open_lock);
881 nvavp_runtime_get(nvavp);
882 mutex_unlock(&nvavp->open_lock);
883 channel_info = nvavp_get_channel_info(nvavp, channel_id);
885 control = channel_info->os_control;
886 pr_debug("nvavp_pushbuffer_update for channel_id (%d):\
887 control->put (0x%x) control->get (0x%x)\n",
888 channel_id, (u32) &control->put, (u32) &control->get);
890 mutex_lock(&channel_info->pushbuffer_lock);
892 /* check for pushbuffer wrapping */
893 if (channel_info->pushbuf_index >= channel_info->pushbuf_fence)
894 channel_info->pushbuf_index = 0;
896 if (!ext_ucode_flag) {
898 NVE26E_CH_OPCODE_INCR(NVE276_SET_MICROCODE_A, 3);
900 index = wordcount + channel_info->pushbuf_index;
901 writel(setucode_cmd, (channel_info->pushbuf_data + index));
902 wordcount += sizeof(u32);
904 index = wordcount + channel_info->pushbuf_index;
905 writel(0, (channel_info->pushbuf_data + index));
906 wordcount += sizeof(u32);
908 index = wordcount + channel_info->pushbuf_index;
909 writel(nvavp->ucode_info.phys,
910 (channel_info->pushbuf_data + index));
911 wordcount += sizeof(u32);
913 index = wordcount + channel_info->pushbuf_index;
914 writel(nvavp->ucode_info.size,
915 (channel_info->pushbuf_data + index));
916 wordcount += sizeof(u32);
919 gather_cmd = NVE26E_CH_OPCODE_GATHER(0, 0, 0, gather_count);
922 value = ++nvavp->syncpt_value;
923 /* XXX: NvSchedValueWrappingComparison */
924 sync = NVE26E_CH_OPCODE_IMM(NVE26E_HOST1X_INCR_SYNCPT,
925 (NVE26E_HOST1X_INCR_SYNCPT_COND_OP_DONE << 8) |
926 (nvavp->syncpt_id & 0xFF));
929 /* write commands out */
930 index = wordcount + channel_info->pushbuf_index;
931 writel(gather_cmd, (channel_info->pushbuf_data + index));
932 wordcount += sizeof(u32);
934 index = wordcount + channel_info->pushbuf_index;
935 writel(phys_addr, (channel_info->pushbuf_data + index));
936 wordcount += sizeof(u32);
939 index = wordcount + channel_info->pushbuf_index;
940 writel(sync, (channel_info->pushbuf_data + index));
941 wordcount += sizeof(u32);
944 /* enable clocks to VDE/BSEV */
945 mutex_lock(&nvavp->open_lock);
946 if (!nvavp->pending && IS_VIDEO_CHANNEL_ID(channel_id)) {
947 nvavp_clks_enable(nvavp);
948 nvavp->pending = true;
950 mutex_unlock(&nvavp->open_lock);
952 /* update put pointer */
953 channel_info->pushbuf_index = (channel_info->pushbuf_index + wordcount)&
954 (NVAVP_PUSHBUFFER_SIZE - 1);
956 writel(channel_info->pushbuf_index, &control->put);
961 if (IS_VIDEO_CHANNEL_ID(channel_id)) {
962 pr_debug("Wake up Video Channel\n");
963 ret = nvavp_outbox_write(0xA0000001);
968 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
969 if (IS_AUDIO_CHANNEL_ID(channel_id)) {
970 pr_debug("Wake up Audio Channel\n");
971 if (!audio_enabled) {
972 mutex_lock(&nvavp->open_lock);
973 nvavp_runtime_get(nvavp);
974 mutex_unlock(&nvavp->open_lock);
975 audio_enabled = true;
977 ret = nvavp_outbox_write(0xA0000002);
983 /* Fill out fence struct */
985 syncpt->id = nvavp->syncpt_id;
986 syncpt->value = value;
989 trace_nvavp_pushbuffer_update(channel_id, control->put, control->get,
990 phys_addr, gather_count,
991 sizeof(struct nvavp_syncpt), syncpt);
994 mutex_unlock(&channel_info->pushbuffer_lock);
995 nvavp_runtime_put(nvavp);
1000 static void nvavp_unload_ucode(struct nvavp_info *nvavp)
1002 dma_free_coherent(&nvavp->nvhost_dev->dev, nvavp->ucode_info.size,
1003 nvavp->ucode_info.data, nvavp->ucode_info.phys);
1004 kfree(nvavp->ucode_info.ucode_bin);
1007 static int nvavp_load_ucode(struct nvavp_info *nvavp)
1009 struct nvavp_ucode_info *ucode_info = &nvavp->ucode_info;
1010 const struct firmware *nvavp_ucode_fw;
1011 char fw_ucode_file[32];
1015 if (!ucode_info->ucode_bin) {
1016 sprintf(fw_ucode_file, "nvavp_vid_ucode.bin");
1018 ret = request_firmware(&nvavp_ucode_fw, fw_ucode_file,
1019 nvavp->video_misc_dev.this_device);
1021 /* Try alternative version */
1022 sprintf(fw_ucode_file, "nvavp_vid_ucode_alt.bin");
1024 ret = request_firmware(&nvavp_ucode_fw,
1026 nvavp->video_misc_dev.this_device);
1029 dev_err(&nvavp->nvhost_dev->dev,
1030 "cannot read ucode firmware '%s'\n",
1036 dev_info(&nvavp->nvhost_dev->dev,
1037 "read ucode firmware from '%s' (%d bytes)\n",
1038 fw_ucode_file, nvavp_ucode_fw->size);
1040 ptr = (void *)nvavp_ucode_fw->data;
1042 if (strncmp((const char *)ptr, "NVAVPAPP", 8)) {
1043 dev_info(&nvavp->nvhost_dev->dev,
1044 "ucode hdr string mismatch\n");
1049 ucode_info->size = nvavp_ucode_fw->size - 8;
1051 ucode_info->ucode_bin = kzalloc(ucode_info->size,
1053 if (!ucode_info->ucode_bin) {
1054 dev_err(&nvavp->nvhost_dev->dev,
1055 "cannot allocate ucode bin\n");
1057 goto err_ubin_alloc;
1060 ucode_info->data = dma_alloc_coherent(&nvavp->nvhost_dev->dev,
1064 if (!ucode_info->data) {
1065 dev_err(&nvavp->nvhost_dev->dev,
1066 "cannot alloc memory for ucode\n");
1068 goto err_ucode_alloc;
1070 memcpy(ucode_info->ucode_bin, ptr, ucode_info->size);
1071 release_firmware(nvavp_ucode_fw);
1074 memcpy(ucode_info->data, ucode_info->ucode_bin, ucode_info->size);
1078 kfree(nvavp->ucode_info.ucode_bin);
1080 release_firmware(nvavp_ucode_fw);
1085 static void nvavp_unload_os(struct nvavp_info *nvavp)
1087 dma_free_coherent(&nvavp->nvhost_dev->dev, SZ_1M,
1088 nvavp->os_info.data, nvavp->os_info.phys);
1089 kfree(nvavp->os_info.os_bin);
1092 static int nvavp_load_os(struct nvavp_info *nvavp, char *fw_os_file)
1094 struct nvavp_os_info *os_info = &nvavp->os_info;
1095 const struct firmware *nvavp_os_fw;
1100 if (!os_info->os_bin) {
1101 ret = request_firmware(&nvavp_os_fw, fw_os_file,
1102 nvavp->video_misc_dev.this_device);
1104 dev_err(&nvavp->nvhost_dev->dev,
1105 "cannot read os firmware '%s'\n", fw_os_file);
1109 dev_info(&nvavp->nvhost_dev->dev,
1110 "read firmware from '%s' (%d bytes)\n",
1111 fw_os_file, nvavp_os_fw->size);
1113 ptr = (void *)nvavp_os_fw->data;
1115 if (strncmp((const char *)ptr, "NVAVP-OS", 8)) {
1116 dev_info(&nvavp->nvhost_dev->dev,
1117 "os hdr string mismatch\n");
1123 os_info->entry_offset = *((u32 *)ptr);
1125 os_info->control_offset = *((u32 *)ptr);
1127 os_info->debug_offset = *((u32 *)ptr);
1130 size = *((u32 *)ptr); ptr += sizeof(u32);
1132 os_info->size = size;
1133 os_info->os_bin = kzalloc(os_info->size,
1135 if (!os_info->os_bin) {
1136 dev_err(&nvavp->nvhost_dev->dev,
1137 "cannot allocate os bin\n");
1142 memcpy(os_info->os_bin, ptr, os_info->size);
1143 memset(os_info->data + os_info->size, 0, SZ_1M - os_info->size);
1145 dev_info(&nvavp->nvhost_dev->dev,
1146 "entry=%08x control=%08x debug=%08x size=%d\n",
1147 os_info->entry_offset, os_info->control_offset,
1148 os_info->debug_offset, os_info->size);
1149 release_firmware(nvavp_os_fw);
1152 memcpy(os_info->data, os_info->os_bin, os_info->size);
1153 os_info->reset_addr = os_info->phys + os_info->entry_offset;
1155 dev_info(&nvavp->nvhost_dev->dev,
1156 "AVP os at vaddr=%p paddr=%llx reset_addr=%llx\n",
1157 os_info->data, (u64)(os_info->phys), (u64)os_info->reset_addr);
1161 release_firmware(nvavp_os_fw);
1167 static int nvavp_os_init(struct nvavp_info *nvavp)
1169 char fw_os_file[32];
1171 int video_initialized, audio_initialized = 0;
1173 video_initialized = nvavp_get_video_init_status(nvavp);
1175 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
1176 audio_initialized = nvavp_get_audio_init_status(nvavp);
1178 pr_debug("video_initialized(%d) audio_initialized(%d)\n",
1179 video_initialized, audio_initialized);
1181 /* Video and Audio both are initialized */
1182 if (video_initialized || audio_initialized)
1185 /* Video or Audio both are uninitialized */
1186 pr_debug("video_initialized == audio_initialized (%d)\n",
1187 nvavp->video_initialized);
1188 #if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU) /* Tegra2 with AVP MMU */
1189 /* paddr is phys address */
1190 /* vaddr is AVP_KERNEL_VIRT_BASE */
1191 dev_info(&nvavp->nvhost_dev->dev,
1192 "using AVP MMU to relocate AVP os\n");
1193 sprintf(fw_os_file, "nvavp_os.bin");
1194 nvavp->os_info.reset_addr = AVP_KERNEL_VIRT_BASE;
1195 #elif defined(CONFIG_TEGRA_AVP_KERNEL_ON_SMMU) /* Tegra3 with SMMU */
1196 /* paddr is any address behind SMMU */
1197 /* vaddr is TEGRA_SMMU_BASE */
1198 dev_info(&nvavp->nvhost_dev->dev,
1199 "using SMMU at %lx to load AVP kernel\n",
1200 (unsigned long)nvavp->os_info.phys);
1201 BUG_ON(nvavp->os_info.phys != 0xeff00000
1202 && nvavp->os_info.phys != 0x0ff00000
1203 && nvavp->os_info.phys != 0x8ff00000);
1204 sprintf(fw_os_file, "nvavp_os_%08lx.bin",
1205 (unsigned long)nvavp->os_info.phys);
1206 nvavp->os_info.reset_addr = nvavp->os_info.phys;
1207 #else /* nvmem= carveout */
1208 dev_info(&nvavp->nvhost_dev->dev,
1209 "using nvmem= carveout at %llx to load AVP os\n",
1210 (u64)nvavp->os_info.phys);
1211 sprintf(fw_os_file, "nvavp_os_%08llx.bin", (u64)nvavp->os_info.phys);
1212 nvavp->os_info.reset_addr = nvavp->os_info.phys;
1213 nvavp->os_info.data = ioremap(nvavp->os_info.phys, SZ_1M);
1215 ret = nvavp_load_os(nvavp, fw_os_file);
1217 dev_err(&nvavp->nvhost_dev->dev,
1218 "unable to load os firmware '%s'\n", fw_os_file);
1222 ret = nvavp_pushbuffer_init(nvavp);
1224 dev_err(&nvavp->nvhost_dev->dev,
1225 "unable to init pushbuffer\n");
1228 tegra_init_legacy_irq_cop();
1229 enable_irq(nvavp->mbox_from_avp_pend_irq);
1234 static int nvavp_init(struct nvavp_info *nvavp, int channel_id)
1237 int video_initialized = 0, audio_initialized = 0;
1239 nvavp->init_task = current;
1241 ret = nvavp_os_init(nvavp);
1243 dev_err(&nvavp->nvhost_dev->dev,
1244 "unable to load os firmware and allocate buffers\n");
1247 video_initialized = nvavp_get_video_init_status(nvavp);
1248 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
1249 audio_initialized = nvavp_get_audio_init_status(nvavp);
1252 if (IS_VIDEO_CHANNEL_ID(channel_id) && (!video_initialized)) {
1253 pr_debug("nvavp_init : channel_ID (%d)\n", channel_id);
1254 ret = nvavp_load_ucode(nvavp);
1256 dev_err(&nvavp->nvhost_dev->dev,
1257 "unable to load ucode\n");
1261 nvavp_reset_vde(nvavp);
1262 nvavp_reset_avp(nvavp, nvavp->os_info.reset_addr);
1264 nvavp_set_video_init_status(nvavp, 1);
1266 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
1267 if (IS_AUDIO_CHANNEL_ID(channel_id) && (!audio_initialized)) {
1268 pr_debug("nvavp_init : channel_ID (%d)\n", channel_id);
1269 nvavp_reset_avp(nvavp, nvavp->os_info.reset_addr);
1270 nvavp_set_audio_init_status(nvavp, 1);
1275 nvavp->init_task = NULL;
1279 #define TIMER_EN (1 << 31)
1280 #define TIMER_PERIODIC (1 << 30)
1281 #define TIMER_PCR 0x4
1282 #define TIMER_PCR_INTR (1 << 30)
1284 /* This should be called with the open_lock held */
1285 static void nvavp_uninit(struct nvavp_info *nvavp)
1287 int video_initialized, audio_initialized = 0;
1290 video_initialized = nvavp_get_video_init_status(nvavp);
1292 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
1293 audio_initialized = nvavp_get_audio_init_status(nvavp);
1296 pr_debug("nvavp_uninit video_initialized(%d) audio_initialized(%d)\n",
1297 video_initialized, audio_initialized);
1299 /* Video and Audio both are uninitialized */
1300 if (!video_initialized && !audio_initialized)
1303 nvavp->init_task = current;
1305 if (video_initialized) {
1306 pr_debug("nvavp_uninit nvavp->video_initialized\n");
1307 nvavp_halt_vde(nvavp);
1308 nvavp_set_video_init_status(nvavp, 0);
1309 video_initialized = 0;
1312 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
1313 if (audio_initialized) {
1314 cancel_work_sync(&nvavp->app_notify_work);
1315 nvavp_set_audio_init_status(nvavp, 0);
1316 audio_initialized = 0;
1320 /* Video and Audio both becomes uninitialized */
1321 if (!video_initialized && !audio_initialized) {
1322 pr_debug("nvavp_uninit both channels uninitialized\n");
1324 clk_disable_unprepare(nvavp->sclk);
1325 clk_disable_unprepare(nvavp->emc_clk);
1326 disable_irq(nvavp->mbox_from_avp_pend_irq);
1327 nvavp_pushbuffer_deinit(nvavp);
1328 nvavp_halt_avp(nvavp);
1332 * WAR: turn off TMR2 for fix LP1 wake up by TMR2.
1333 * turn off the periodic interrupt and the timer temporarily
1335 reg = timer_readl(TIMER2_OFFSET + TIMER_PTV);
1336 reg &= ~(TIMER_EN | TIMER_PERIODIC);
1337 timer_writel(reg, TIMER2_OFFSET + TIMER_PTV);
1339 /* write a 1 to the intr_clr field to clear the interrupt */
1340 reg = TIMER_PCR_INTR;
1341 timer_writel(reg, TIMER2_OFFSET + TIMER_PCR);
1342 nvavp->init_task = NULL;
1345 static int nvcpu_set_clock(struct nvavp_info *nvavp,
1346 struct nvavp_clock_args config,
1349 dev_dbg(&nvavp->nvhost_dev->dev, "%s: update cpu freq to clk_rate=%u\n",
1350 __func__, config.rate);
1352 if (config.rate > 0)
1353 pm_qos_update_request(&nvavp->min_cpu_freq_req, config.rate);
1355 pm_qos_update_request(&nvavp->min_cpu_freq_req,
1356 PM_QOS_CPU_FREQ_MIN_DEFAULT_VALUE);
1361 static int nvavp_map_iova(struct file *filp, unsigned int cmd,
1364 struct nvavp_clientctx *clientctx = filp->private_data;
1365 struct nvavp_info *nvavp = clientctx->nvavp;
1366 struct nvavp_map_args map_arg;
1367 struct dma_buf *dmabuf;
1368 dma_addr_t addr = 0;
1371 if (copy_from_user(&map_arg, (void __user *)arg,
1372 sizeof(struct nvavp_map_args))) {
1373 dev_err(&nvavp->nvhost_dev->dev,
1374 "failed to copy memory handle\n");
1378 dev_err(&nvavp->nvhost_dev->dev,
1379 "invalid memory handle %08x\n", map_arg.fd);
1383 dmabuf = dma_buf_get(map_arg.fd);
1384 if (IS_ERR(dmabuf)) {
1385 dev_err(&nvavp->nvhost_dev->dev,
1386 "invalid buffer handle %08x\n", map_arg.fd);
1387 return PTR_ERR(dmabuf);
1390 ret = nvavp_get_iova_addr(clientctx, dmabuf, &addr);
1394 map_arg.addr = (__u32)addr;
1396 trace_nvavp_map_iova(clientctx->channel_id, map_arg.fd, map_arg.addr);
1398 if (copy_to_user((void __user *)arg, &map_arg,
1399 sizeof(struct nvavp_map_args))) {
1400 dev_err(&nvavp->nvhost_dev->dev,
1401 "failed to copy phys addr\n");
1409 static int nvavp_unmap_iova(struct file *filp, unsigned long arg)
1411 struct nvavp_clientctx *clientctx = filp->private_data;
1412 struct nvavp_info *nvavp = clientctx->nvavp;
1413 struct nvavp_map_args map_arg;
1414 struct dma_buf *dmabuf;
1416 if (copy_from_user(&map_arg, (void __user *)arg,
1417 sizeof(struct nvavp_map_args))) {
1418 dev_err(&nvavp->nvhost_dev->dev,
1419 "failed to copy memory handle\n");
1423 dmabuf = dma_buf_get(map_arg.fd);
1424 if (IS_ERR(dmabuf)) {
1425 dev_err(&nvavp->nvhost_dev->dev,
1426 "invalid buffer handle %08x\n", map_arg.fd);
1427 return PTR_ERR(dmabuf);
1430 trace_nvavp_unmap_iova(clientctx->channel_id, map_arg.fd, map_arg.addr);
1432 nvavp_release_iova_addr(clientctx, dmabuf, (dma_addr_t)map_arg.addr);
1433 dma_buf_put(dmabuf);
1438 static int nvavp_set_clock_ioctl(struct file *filp, unsigned int cmd,
1441 struct nvavp_clientctx *clientctx = filp->private_data;
1442 struct nvavp_info *nvavp = clientctx->nvavp;
1444 struct nvavp_clock_args config;
1446 if (copy_from_user(&config, (void __user *)arg, sizeof(struct nvavp_clock_args)))
1449 dev_dbg(&nvavp->nvhost_dev->dev, "%s: clk_id=%d, clk_rate=%u\n",
1450 __func__, config.id, config.rate);
1452 if (config.id == NVAVP_MODULE_ID_AVP)
1453 nvavp->sclk_rate = config.rate;
1454 else if (config.id == NVAVP_MODULE_ID_EMC)
1455 nvavp->emc_clk_rate = config.rate;
1456 else if (config.id == NVAVP_MODULE_ID_CPU)
1457 return nvcpu_set_clock(nvavp, config, arg);
1459 c = nvavp_clk_get(nvavp, config.id);
1460 if (IS_ERR_OR_NULL(c))
1463 clk_prepare_enable(c);
1464 clk_set_rate(c, config.rate);
1466 config.rate = clk_get_rate(c);
1467 clk_disable_unprepare(c);
1469 trace_nvavp_set_clock_ioctl(clientctx->channel_id, config.id,
1472 if (copy_to_user((void __user *)arg, &config, sizeof(struct nvavp_clock_args)))
1478 static int nvavp_get_clock_ioctl(struct file *filp, unsigned int cmd,
1481 struct nvavp_clientctx *clientctx = filp->private_data;
1482 struct nvavp_info *nvavp = clientctx->nvavp;
1484 struct nvavp_clock_args config;
1486 if (copy_from_user(&config, (void __user *)arg, sizeof(struct nvavp_clock_args)))
1489 c = nvavp_clk_get(nvavp, config.id);
1490 if (IS_ERR_OR_NULL(c))
1493 clk_prepare_enable(c);
1494 config.rate = clk_get_rate(c);
1495 clk_disable_unprepare(c);
1497 trace_nvavp_get_clock_ioctl(clientctx->channel_id, config.id,
1500 if (copy_to_user((void __user *)arg, &config, sizeof(struct nvavp_clock_args)))
1506 static int nvavp_get_syncpointid_ioctl(struct file *filp, unsigned int cmd,
1509 struct nvavp_clientctx *clientctx = filp->private_data;
1510 struct nvavp_info *nvavp = clientctx->nvavp;
1511 u32 id = nvavp->syncpt_id;
1513 if (_IOC_DIR(cmd) & _IOC_READ) {
1514 if (copy_to_user((void __user *)arg, &id, sizeof(u32)))
1520 trace_nvavp_get_syncpointid_ioctl(clientctx->channel_id, id);
1525 static int nvavp_pushbuffer_submit_ioctl(struct file *filp, unsigned int cmd,
1528 struct nvavp_clientctx *clientctx = filp->private_data;
1529 struct nvavp_info *nvavp = clientctx->nvavp;
1530 struct nvavp_pushbuffer_submit_hdr hdr;
1532 struct dma_buf *cmdbuf_dmabuf;
1533 struct dma_buf_attachment *cmdbuf_attach;
1534 struct sg_table *cmdbuf_sgt;
1536 phys_addr_t phys_addr;
1537 unsigned long virt_addr;
1538 struct nvavp_pushbuffer_submit_hdr *user_hdr =
1539 (struct nvavp_pushbuffer_submit_hdr *) arg;
1540 struct nvavp_syncpt syncpt;
1542 syncpt.id = NVSYNCPT_INVALID;
1545 if (_IOC_DIR(cmd) & _IOC_WRITE) {
1546 if (copy_from_user(&hdr, (void __user *)arg,
1547 sizeof(struct nvavp_pushbuffer_submit_hdr)))
1551 if (!hdr.cmdbuf.mem)
1554 if (copy_from_user(clientctx->relocs, (void __user *)hdr.relocs,
1555 sizeof(struct nvavp_reloc) * hdr.num_relocs)) {
1559 cmdbuf_dmabuf = dma_buf_get(hdr.cmdbuf.mem);
1560 if (IS_ERR(cmdbuf_dmabuf)) {
1561 dev_err(&nvavp->nvhost_dev->dev,
1562 "invalid cmd buffer handle %08x\n", hdr.cmdbuf.mem);
1563 return PTR_ERR(cmdbuf_dmabuf);
1566 cmdbuf_attach = dma_buf_attach(cmdbuf_dmabuf, &nvavp->nvhost_dev->dev);
1567 if (IS_ERR(cmdbuf_attach)) {
1568 dev_err(&nvavp->nvhost_dev->dev, "cannot attach cmdbuf_dmabuf\n");
1569 ret = PTR_ERR(cmdbuf_attach);
1570 goto err_dmabuf_attach;
1573 cmdbuf_sgt = dma_buf_map_attachment(cmdbuf_attach, DMA_BIDIRECTIONAL);
1574 if (IS_ERR(cmdbuf_sgt)) {
1575 dev_err(&nvavp->nvhost_dev->dev, "cannot map cmdbuf_dmabuf\n");
1576 ret = PTR_ERR(cmdbuf_sgt);
1577 goto err_dmabuf_map;
1580 phys_addr = sg_dma_address(cmdbuf_sgt->sgl);
1582 virt_addr = (unsigned long)dma_buf_vmap(cmdbuf_dmabuf);
1584 dev_err(&nvavp->nvhost_dev->dev, "cannot vmap cmdbuf_dmabuf\n");
1586 goto err_dmabuf_vmap;
1589 cmdbuf_data = (u32 *)(virt_addr + hdr.cmdbuf.offset);
1590 for (i = 0; i < hdr.num_relocs; i++) {
1591 struct dma_buf *target_dmabuf;
1592 struct dma_buf_attachment *target_attach;
1593 struct sg_table *target_sgt;
1594 u32 *reloc_addr, target_phys_addr;
1596 if (clientctx->relocs[i].cmdbuf_mem != hdr.cmdbuf.mem) {
1597 dev_err(&nvavp->nvhost_dev->dev,
1598 "reloc info does not match target bufferID\n");
1600 goto err_reloc_info;
1603 reloc_addr = cmdbuf_data +
1604 (clientctx->relocs[i].cmdbuf_offset >> 2);
1606 target_dmabuf = dma_buf_get(clientctx->relocs[i].target);
1607 if (IS_ERR(target_dmabuf)) {
1608 ret = PTR_ERR(target_dmabuf);
1609 goto target_dmabuf_fail;
1611 target_attach = dma_buf_attach(target_dmabuf,
1612 &nvavp->nvhost_dev->dev);
1613 if (IS_ERR(target_attach)) {
1614 ret = PTR_ERR(target_attach);
1615 goto target_attach_fail;
1617 target_sgt = dma_buf_map_attachment(target_attach,
1619 if (IS_ERR(target_sgt)) {
1620 ret = PTR_ERR(target_sgt);
1621 goto target_map_fail;
1624 target_phys_addr = sg_dma_address(target_sgt->sgl);
1625 if (!target_phys_addr)
1626 target_phys_addr = sg_phys(target_sgt->sgl);
1627 target_phys_addr += clientctx->relocs[i].target_offset;
1628 writel(target_phys_addr, reloc_addr);
1629 dma_buf_unmap_attachment(target_attach, target_sgt,
1632 dma_buf_detach(target_dmabuf, target_attach);
1634 dma_buf_put(target_dmabuf);
1637 goto err_reloc_info;
1640 trace_nvavp_pushbuffer_submit_ioctl(clientctx->channel_id,
1641 hdr.cmdbuf.mem, hdr.cmdbuf.offset,
1642 hdr.cmdbuf.words, hdr.num_relocs, hdr.flags);
1645 ret = nvavp_pushbuffer_update(nvavp,
1646 (phys_addr + hdr.cmdbuf.offset),
1647 hdr.cmdbuf.words, &syncpt,
1648 (hdr.flags & NVAVP_UCODE_EXT),
1649 clientctx->channel_id);
1651 if (copy_to_user((void __user *)user_hdr->syncpt, &syncpt,
1652 sizeof(struct nvavp_syncpt))) {
1654 goto err_reloc_info;
1657 ret = nvavp_pushbuffer_update(nvavp,
1658 (phys_addr + hdr.cmdbuf.offset),
1659 hdr.cmdbuf.words, NULL,
1660 (hdr.flags & NVAVP_UCODE_EXT),
1661 clientctx->channel_id);
1665 dma_buf_vunmap(cmdbuf_dmabuf, (void *)virt_addr);
1667 dma_buf_unmap_attachment(cmdbuf_attach, cmdbuf_sgt, DMA_BIDIRECTIONAL);
1669 dma_buf_detach(cmdbuf_dmabuf, cmdbuf_attach);
1671 dma_buf_put(cmdbuf_dmabuf);
1675 #ifdef CONFIG_COMPAT
1676 static int nvavp_pushbuffer_submit_compat_ioctl(struct file *filp,
1680 struct nvavp_clientctx *clientctx = filp->private_data;
1681 struct nvavp_info *nvavp = clientctx->nvavp;
1682 struct nvavp_pushbuffer_submit_hdr_v32 hdr_v32;
1683 struct nvavp_pushbuffer_submit_hdr __user *user_hdr;
1686 if (_IOC_DIR(cmd) & _IOC_WRITE) {
1687 if (copy_from_user(&hdr_v32, (void __user *)arg,
1688 sizeof(struct nvavp_pushbuffer_submit_hdr_v32)))
1692 if (!hdr_v32.cmdbuf.mem)
1695 user_hdr = compat_alloc_user_space(sizeof(*user_hdr));
1696 if (!access_ok(VERIFY_WRITE, user_hdr, sizeof(*user_hdr)))
1699 if (__put_user(hdr_v32.cmdbuf.mem, &user_hdr->cmdbuf.mem)
1700 || __put_user(hdr_v32.cmdbuf.offset, &user_hdr->cmdbuf.offset)
1701 || __put_user(hdr_v32.cmdbuf.words, &user_hdr->cmdbuf.words)
1702 || __put_user((void __user *)(unsigned long)hdr_v32.relocs,
1704 || __put_user(hdr_v32.num_relocs, &user_hdr->num_relocs)
1705 || __put_user((void __user *)(unsigned long)hdr_v32.syncpt,
1707 || __put_user(hdr_v32.flags, &user_hdr->flags))
1710 ret = nvavp_pushbuffer_submit_ioctl(filp, cmd, (unsigned long)user_hdr);
1714 if (__get_user(hdr_v32.syncpt, &user_hdr->syncpt))
1717 if (copy_to_user((void __user *)arg, &hdr_v32,
1718 sizeof(struct nvavp_pushbuffer_submit_hdr_v32))) {
1726 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
1727 int nvavp_pushbuffer_submit_audio(nvavp_clientctx_t client, int cmd_buf_phys,
1730 struct nvavp_clientctx *clientctx = client;
1731 struct nvavp_info *nvavp = clientctx->nvavp;
1733 return nvavp_pushbuffer_update(nvavp,
1735 cmd_buf_words, NULL,
1737 NVAVP_AUDIO_CHANNEL);
1739 EXPORT_SYMBOL_GPL(nvavp_pushbuffer_submit_audio);
1741 void nvavp_register_audio_cb(nvavp_clientctx_t client, void (*cb)(void))
1743 struct nvavp_clientctx *clientctx = client;
1744 struct nvavp_info *nvavp = clientctx->nvavp;
1746 nvavp->audio_notify = cb;
1748 EXPORT_SYMBOL_GPL(nvavp_register_audio_cb);
1751 static int nvavp_wake_avp_ioctl(struct file *filp, unsigned int cmd,
1756 return nvavp_outbox_write(0xA0000001);
1759 static int nvavp_force_clock_stay_on_ioctl(struct file *filp, unsigned int cmd,
1762 struct nvavp_clientctx *clientctx = filp->private_data;
1763 struct nvavp_info *nvavp = clientctx->nvavp;
1764 struct nvavp_clock_stay_on_state_args clock;
1766 if (copy_from_user(&clock, (void __user *)arg,
1767 sizeof(struct nvavp_clock_stay_on_state_args)))
1770 dev_dbg(&nvavp->nvhost_dev->dev, "%s: state=%d\n",
1771 __func__, clock.state);
1773 if (clock.state != NVAVP_CLOCK_STAY_ON_DISABLED &&
1774 clock.state != NVAVP_CLOCK_STAY_ON_ENABLED) {
1775 dev_err(&nvavp->nvhost_dev->dev, "%s: invalid argument=%d\n",
1776 __func__, clock.state);
1780 trace_nvavp_force_clock_stay_on_ioctl(clientctx->channel_id,
1781 clock.state, clientctx->clk_reqs);
1784 mutex_lock(&nvavp->open_lock);
1785 if (clientctx->clk_reqs++ == 0) {
1786 nvavp_clks_enable(nvavp);
1787 nvavp->stay_on = true;
1789 mutex_unlock(&nvavp->open_lock);
1790 cancel_work_sync(&nvavp->clock_disable_work);
1792 mutex_lock(&nvavp->open_lock);
1793 if (--clientctx->clk_reqs == 0) {
1794 nvavp->stay_on = false;
1795 nvavp_clks_disable(nvavp);
1797 mutex_unlock(&nvavp->open_lock);
1798 if (!nvavp->stay_on)
1799 schedule_work(&nvavp->clock_disable_work);
1804 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
1805 int nvavp_enable_audio_clocks(nvavp_clientctx_t client, u32 clk_id)
1807 struct nvavp_clientctx *clientctx = client;
1808 struct nvavp_info *nvavp = clientctx->nvavp;
1810 dev_dbg(&nvavp->nvhost_dev->dev, "%s: clk_id = %d\n",
1813 trace_nvavp_enable_audio_clocks(clientctx->channel_id, clk_id);
1815 mutex_lock(&nvavp->open_lock);
1816 if (clk_id == NVAVP_MODULE_ID_VCP)
1817 clk_prepare_enable(nvavp->vcp_clk);
1818 else if (clk_id == NVAVP_MODULE_ID_BSEA)
1819 clk_prepare_enable(nvavp->bsea_clk);
1820 mutex_unlock(&nvavp->open_lock);
1823 EXPORT_SYMBOL_GPL(nvavp_enable_audio_clocks);
1825 int nvavp_disable_audio_clocks(nvavp_clientctx_t client, u32 clk_id)
1827 struct nvavp_clientctx *clientctx = client;
1828 struct nvavp_info *nvavp = clientctx->nvavp;
1830 dev_dbg(&nvavp->nvhost_dev->dev, "%s: clk_id = %d\n",
1833 trace_nvavp_disable_audio_clocks(clientctx->channel_id, clk_id);
1835 mutex_lock(&nvavp->open_lock);
1836 if (clk_id == NVAVP_MODULE_ID_VCP)
1837 clk_disable_unprepare(nvavp->vcp_clk);
1838 else if (clk_id == NVAVP_MODULE_ID_BSEA)
1839 clk_disable_unprepare(nvavp->bsea_clk);
1840 mutex_unlock(&nvavp->open_lock);
1843 EXPORT_SYMBOL_GPL(nvavp_disable_audio_clocks);
1845 int nvavp_enable_audio_clocks(nvavp_clientctx_t client, u32 clk_id)
1849 EXPORT_SYMBOL_GPL(nvavp_enable_audio_clocks);
1851 int nvavp_disable_audio_clocks(nvavp_clientctx_t client, u32_clk_id)
1855 EXPORT_SYMBOL_GPL(nvavp_disable_audio_clocks);
1858 static int nvavp_set_min_online_cpus_ioctl(struct file *filp, unsigned int cmd,
1861 struct nvavp_clientctx *clientctx = filp->private_data;
1862 struct nvavp_info *nvavp = clientctx->nvavp;
1863 struct nvavp_num_cpus_args config;
1865 if (copy_from_user(&config, (void __user *)arg,
1866 sizeof(struct nvavp_num_cpus_args)))
1869 dev_dbg(&nvavp->nvhost_dev->dev, "%s: min_online_cpus=%d\n",
1870 __func__, config.min_online_cpus);
1872 trace_nvavp_set_min_online_cpus_ioctl(clientctx->channel_id,
1873 config.min_online_cpus);
1875 if (config.min_online_cpus > 0)
1876 pm_qos_update_request(&nvavp->min_online_cpus_req,
1877 config.min_online_cpus);
1879 pm_qos_update_request(&nvavp->min_online_cpus_req,
1880 PM_QOS_CPU_FREQ_MIN_DEFAULT_VALUE);
1885 static int tegra_nvavp_open(struct nvavp_info *nvavp,
1886 struct nvavp_clientctx **client, int channel_id)
1888 struct nvavp_clientctx *clientctx;
1891 dev_dbg(&nvavp->nvhost_dev->dev, "%s: ++\n", __func__);
1893 clientctx = kzalloc(sizeof(*clientctx), GFP_KERNEL);
1897 pr_debug("tegra_nvavp_open channel_id (%d)\n", channel_id);
1899 clientctx->channel_id = channel_id;
1901 ret = nvavp_init(nvavp, channel_id);
1905 if (IS_VIDEO_CHANNEL_ID(channel_id))
1906 nvavp->video_refcnt++;
1907 if (IS_AUDIO_CHANNEL_ID(channel_id))
1908 nvavp->audio_refcnt++;
1911 trace_tegra_nvavp_open(channel_id, nvavp->refcount,
1912 nvavp->video_refcnt, nvavp->audio_refcnt);
1914 clientctx->nvavp = nvavp;
1915 clientctx->iova_handles = RB_ROOT;
1916 *client = clientctx;
1921 static int tegra_nvavp_video_open(struct inode *inode, struct file *filp)
1923 struct miscdevice *miscdev = filp->private_data;
1924 struct nvavp_info *nvavp = dev_get_drvdata(miscdev->parent);
1925 struct nvavp_clientctx *clientctx;
1928 pr_debug("tegra_nvavp_video_open NVAVP_VIDEO_CHANNEL\n");
1930 nonseekable_open(inode, filp);
1932 mutex_lock(&nvavp->open_lock);
1933 ret = tegra_nvavp_open(nvavp, &clientctx, NVAVP_VIDEO_CHANNEL);
1934 filp->private_data = clientctx;
1935 mutex_unlock(&nvavp->open_lock);
1940 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
1941 static int tegra_nvavp_audio_open(struct inode *inode, struct file *filp)
1943 struct miscdevice *miscdev = filp->private_data;
1944 struct nvavp_info *nvavp = dev_get_drvdata(miscdev->parent);
1945 struct nvavp_clientctx *clientctx;
1948 pr_debug("tegra_nvavp_audio_open NVAVP_AUDIO_CHANNEL\n");
1950 nonseekable_open(inode, filp);
1952 mutex_lock(&nvavp->open_lock);
1953 ret = tegra_nvavp_open(nvavp, &clientctx, NVAVP_AUDIO_CHANNEL);
1954 filp->private_data = clientctx;
1955 mutex_unlock(&nvavp->open_lock);
1960 int tegra_nvavp_audio_client_open(nvavp_clientctx_t *clientctx)
1962 struct nvavp_info *nvavp = nvavp_info_ctx;
1965 mutex_lock(&nvavp->open_lock);
1966 ret = tegra_nvavp_open(nvavp, (struct nvavp_clientctx **)clientctx,
1967 NVAVP_AUDIO_CHANNEL);
1968 mutex_unlock(&nvavp->open_lock);
1972 EXPORT_SYMBOL_GPL(tegra_nvavp_audio_client_open);
1975 static int tegra_nvavp_release(struct nvavp_clientctx *clientctx,
1978 struct nvavp_info *nvavp = clientctx->nvavp;
1981 dev_dbg(&nvavp->nvhost_dev->dev, "%s: ++\n", __func__);
1983 if (!nvavp->refcount) {
1984 dev_err(&nvavp->nvhost_dev->dev,
1985 "releasing while in invalid state\n");
1990 /* if this client had any requests, drop our clk ref */
1991 if (clientctx->clk_reqs)
1992 nvavp_clks_disable(nvavp);
1994 if (nvavp->refcount > 0)
1996 if (!nvavp->refcount)
1997 nvavp_uninit(nvavp);
1999 if (IS_VIDEO_CHANNEL_ID(channel_id))
2000 nvavp->video_refcnt--;
2001 if (IS_AUDIO_CHANNEL_ID(channel_id))
2002 nvavp->audio_refcnt--;
2004 trace_tegra_nvavp_release(channel_id, nvavp->refcount,
2005 nvavp->video_refcnt, nvavp->audio_refcnt);
2008 nvavp_remove_iova_mapping(clientctx);
2013 static int tegra_nvavp_video_release(struct inode *inode, struct file *filp)
2015 struct nvavp_clientctx *clientctx = filp->private_data;
2016 struct nvavp_info *nvavp = clientctx->nvavp;
2019 mutex_lock(&nvavp->open_lock);
2020 filp->private_data = NULL;
2021 ret = tegra_nvavp_release(clientctx, NVAVP_VIDEO_CHANNEL);
2022 mutex_unlock(&nvavp->open_lock);
2027 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
2028 static int tegra_nvavp_audio_release(struct inode *inode,
2031 struct nvavp_clientctx *clientctx = filp->private_data;
2032 struct nvavp_info *nvavp = clientctx->nvavp;
2035 mutex_lock(&nvavp->open_lock);
2036 filp->private_data = NULL;
2037 ret = tegra_nvavp_release(clientctx, NVAVP_AUDIO_CHANNEL);
2038 mutex_unlock(&nvavp->open_lock);
2043 int tegra_nvavp_audio_client_release(nvavp_clientctx_t client)
2045 struct nvavp_clientctx *clientctx = client;
2046 struct nvavp_info *nvavp = clientctx->nvavp;
2049 mutex_lock(&nvavp->open_lock);
2050 ret = tegra_nvavp_release(clientctx, NVAVP_AUDIO_CHANNEL);
2051 mutex_unlock(&nvavp->open_lock);
2055 EXPORT_SYMBOL_GPL(tegra_nvavp_audio_client_release);
2060 nvavp_channel_open(struct file *filp, struct nvavp_channel_open_args *arg)
2065 struct nvavp_clientctx *clientctx = filp->private_data;
2066 struct nvavp_info *nvavp = clientctx->nvavp;
2068 err = get_unused_fd_flags(O_RDWR);
2074 name = kasprintf(GFP_KERNEL, "nvavp-channel-fd%d", fd);
2081 file = anon_inode_getfile(name, filp->f_op, &(nvavp->video_misc_dev),
2085 err = PTR_ERR(file);
2090 fd_install(fd, file);
2092 nonseekable_open(file->f_inode, filp);
2093 mutex_lock(&nvavp->open_lock);
2094 err = tegra_nvavp_open(nvavp,
2095 (struct nvavp_clientctx **)&file->private_data,
2096 clientctx->channel_id);
2100 mutex_unlock(&nvavp->open_lock);
2103 mutex_unlock(&nvavp->open_lock);
2105 arg->channel_fd = fd;
2109 static long tegra_nvavp_ioctl(struct file *filp, unsigned int cmd,
2112 struct nvavp_clientctx *clientctx = filp->private_data;
2113 struct nvavp_clock_args config;
2115 u8 buf[NVAVP_IOCTL_CHANNEL_MAX_ARG_SIZE];
2117 if (_IOC_TYPE(cmd) != NVAVP_IOCTL_MAGIC ||
2118 _IOC_NR(cmd) < NVAVP_IOCTL_MIN_NR ||
2119 _IOC_NR(cmd) > NVAVP_IOCTL_MAX_NR)
2123 case NVAVP_IOCTL_SET_NVMAP_FD:
2125 case NVAVP_IOCTL_GET_SYNCPOINT_ID:
2126 ret = nvavp_get_syncpointid_ioctl(filp, cmd, arg);
2128 case NVAVP_IOCTL_PUSH_BUFFER_SUBMIT:
2129 ret = nvavp_pushbuffer_submit_ioctl(filp, cmd, arg);
2131 case NVAVP_IOCTL_SET_CLOCK:
2132 ret = nvavp_set_clock_ioctl(filp, cmd, arg);
2134 case NVAVP_IOCTL_GET_CLOCK:
2135 ret = nvavp_get_clock_ioctl(filp, cmd, arg);
2137 case NVAVP_IOCTL_WAKE_AVP:
2138 ret = nvavp_wake_avp_ioctl(filp, cmd, arg);
2140 case NVAVP_IOCTL_FORCE_CLOCK_STAY_ON:
2141 ret = nvavp_force_clock_stay_on_ioctl(filp, cmd, arg);
2143 case NVAVP_IOCTL_ENABLE_AUDIO_CLOCKS:
2144 if (copy_from_user(&config, (void __user *)arg,
2145 sizeof(struct nvavp_clock_args))) {
2149 ret = nvavp_enable_audio_clocks(clientctx, config.id);
2151 case NVAVP_IOCTL_DISABLE_AUDIO_CLOCKS:
2152 if (copy_from_user(&config, (void __user *)arg,
2153 sizeof(struct nvavp_clock_args))) {
2157 ret = nvavp_disable_audio_clocks(clientctx, config.id);
2159 case NVAVP_IOCTL_SET_MIN_ONLINE_CPUS:
2160 ret = nvavp_set_min_online_cpus_ioctl(filp, cmd, arg);
2162 case NVAVP_IOCTL_MAP_IOVA:
2163 ret = nvavp_map_iova(filp, cmd, arg);
2165 case NVAVP_IOCTL_UNMAP_IOVA:
2166 ret = nvavp_unmap_iova(filp, arg);
2168 case NVAVP_IOCTL_CHANNEL_OPEN:
2169 ret = nvavp_channel_open(filp, (void *)buf);
2171 ret = copy_to_user((void __user *)arg, buf,
2181 #ifdef CONFIG_COMPAT
2182 static long tegra_nvavp_compat_ioctl(struct file *filp, unsigned int cmd,
2187 if (_IOC_TYPE(cmd) != NVAVP_IOCTL_MAGIC ||
2188 _IOC_NR(cmd) < NVAVP_IOCTL_MIN_NR ||
2189 _IOC_NR(cmd) > NVAVP_IOCTL_MAX_NR)
2193 case NVAVP_IOCTL_PUSH_BUFFER_SUBMIT32:
2194 ret = nvavp_pushbuffer_submit_compat_ioctl(filp, cmd, arg);
2197 ret = tegra_nvavp_ioctl(filp, cmd, arg);
2204 static const struct file_operations tegra_video_nvavp_fops = {
2205 .owner = THIS_MODULE,
2206 .open = tegra_nvavp_video_open,
2207 .release = tegra_nvavp_video_release,
2208 .unlocked_ioctl = tegra_nvavp_ioctl,
2209 #ifdef CONFIG_COMPAT
2210 .compat_ioctl = tegra_nvavp_compat_ioctl,
2214 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
2215 static const struct file_operations tegra_audio_nvavp_fops = {
2216 .owner = THIS_MODULE,
2217 .open = tegra_nvavp_audio_open,
2218 .release = tegra_nvavp_audio_release,
2219 .unlocked_ioctl = tegra_nvavp_ioctl,
2220 #ifdef CONFIG_COMPAT
2221 .compat_ioctl = tegra_nvavp_compat_ioctl,
2226 static ssize_t boost_sclk_show(struct device *dev,
2227 struct device_attribute *attr, char *buf)
2229 return snprintf(buf, PAGE_SIZE, "%d\n", boost_sclk);
2232 static ssize_t boost_sclk_store(struct device *dev,
2233 struct device_attribute *attr, const char *buf, size_t count)
2235 struct platform_device *ndev = to_platform_device(dev);
2236 struct nvavp_info *nvavp = platform_get_drvdata(ndev);
2237 unsigned long val = 0;
2239 if (kstrtoul(buf, 10, &val) < 0)
2243 clk_set_rate(nvavp->sclk, SCLK_BOOST_RATE);
2245 clk_set_rate(nvavp->sclk, 0);
2252 DEVICE_ATTR(boost_sclk, S_IRUGO | S_IWUSR, boost_sclk_show, boost_sclk_store);
2255 NVAVP_USE_SMMU = (1 << 0),
2256 NVAVP_USE_CARVEOUT = (1 << 1)
2259 static int nvavp_reserve_os_mem(struct nvavp_info *nvavp, dma_addr_t phys)
2262 if (!pfn_valid(__phys_to_pfn(phys))) {
2263 if (memblock_reserve(phys, SZ_1M)) {
2264 dev_err(&nvavp->nvhost_dev->dev,
2265 "failed to reserve mem block %lx\n",
2266 (unsigned long)phys);
2274 static struct of_device_id tegra_nvavp_of_match[] = {
2275 { .compatible = "nvidia,tegra30-nvavp", NULL },
2276 { .compatible = "nvidia,tegra114-nvavp", NULL },
2277 { .compatible = "nvidia,tegra124-nvavp", NULL },
2282 static int tegra_nvavp_probe(struct platform_device *ndev)
2284 struct nvavp_info *nvavp;
2286 enum nvavp_heap heap_mask;
2287 int ret = 0, channel_id;
2288 struct device_node *np;
2290 np = ndev->dev.of_node;
2292 irq = platform_get_irq(ndev, 0);
2293 nvavp_reg_base = of_iomap(np, 0);
2295 irq = platform_get_irq_byname(ndev, "mbox_from_nvavp_pending");
2299 dev_err(&ndev->dev, "invalid nvhost data\n");
2303 if (!nvavp_reg_base) {
2304 dev_err(&ndev->dev, "unable to map, memory mapped IO\n");
2308 /* Set the max segment size supported. */
2309 ndev->dev.dma_parms = &nvavp_dma_parameters;
2311 nvavp = kzalloc(sizeof(struct nvavp_info), GFP_KERNEL);
2313 dev_err(&ndev->dev, "cannot allocate avp_info\n");
2317 memset(nvavp, 0, sizeof(*nvavp));
2319 #if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU) /* Tegra2 with AVP MMU */
2320 heap_mask = NVAVP_USE_CARVEOUT;
2321 #elif defined(CONFIG_TEGRA_AVP_KERNEL_ON_SMMU) /* Tegra3 with SMMU */
2322 heap_mask = NVAVP_USE_SMMU;
2323 #else /* nvmem= carveout */
2324 heap_mask = NVAVP_USE_CARVEOUT;
2326 switch (heap_mask) {
2327 case NVAVP_USE_SMMU:
2329 nvavp->os_info.phys = 0x8ff00000;
2330 nvavp->os_info.data = dma_alloc_at_coherent(
2333 &nvavp->os_info.phys,
2336 if (!nvavp->os_info.data || nvavp->os_info.phys != 0x8ff00000) {
2337 nvavp->os_info.phys = 0x0ff00000;
2338 nvavp->os_info.data = dma_alloc_at_coherent(
2341 &nvavp->os_info.phys,
2344 if (!nvavp->os_info.data ||
2345 nvavp->os_info.phys != 0x0ff00000) {
2346 dev_err(&ndev->dev, "cannot allocate IOVA memory\n");
2351 dev_info(&ndev->dev,
2352 "allocated IOVA at %lx for AVP os\n",
2353 (unsigned long)nvavp->os_info.phys);
2355 case NVAVP_USE_CARVEOUT:
2356 if (!nvavp_reserve_os_mem(nvavp, 0x8e000000))
2357 nvavp->os_info.phys = 0x8e000000;
2358 else if (!nvavp_reserve_os_mem(nvavp, 0xf7e00000))
2359 nvavp->os_info.phys = 0xf7e00000;
2360 else if (!nvavp_reserve_os_mem(nvavp, 0x9e000000))
2361 nvavp->os_info.phys = 0x9e000000;
2362 else if (!nvavp_reserve_os_mem(nvavp, 0xbe000000))
2363 nvavp->os_info.phys = 0xbe000000;
2365 dev_err(&nvavp->nvhost_dev->dev,
2366 "cannot find nvmem= carveout to load AVP os\n");
2367 dev_err(&nvavp->nvhost_dev->dev,
2368 "check kernel command line "
2369 "to see if nvmem= is defined\n");
2374 dev_info(&ndev->dev,
2375 "allocated carveout memory at %lx for AVP os\n",
2376 (unsigned long)nvavp->os_info.phys);
2379 dev_err(&ndev->dev, "invalid/non-supported heap for AVP os\n");
2381 goto err_get_syncpt;
2384 nvavp->mbox_from_avp_pend_irq = irq;
2385 mutex_init(&nvavp->open_lock);
2387 for (channel_id = 0; channel_id < NVAVP_NUM_CHANNELS; channel_id++)
2388 mutex_init(&nvavp->channel_info[channel_id].pushbuffer_lock);
2390 /* TODO DO NOT USE NVAVP DEVICE */
2391 nvavp->cop_clk = clk_get(&ndev->dev, "cop");
2392 if (IS_ERR(nvavp->cop_clk)) {
2393 dev_err(&ndev->dev, "cannot get cop clock\n");
2395 goto err_get_cop_clk;
2398 nvavp->vde_clk = clk_get(&ndev->dev, "vde");
2399 if (IS_ERR(nvavp->vde_clk)) {
2400 dev_err(&ndev->dev, "cannot get vde clock\n");
2402 goto err_get_vde_clk;
2405 nvavp->bsev_clk = clk_get(&ndev->dev, "bsev");
2406 if (IS_ERR(nvavp->bsev_clk)) {
2407 dev_err(&ndev->dev, "cannot get bsev clock\n");
2409 goto err_get_bsev_clk;
2412 nvavp->sclk = clk_get(&ndev->dev, "sclk");
2413 if (IS_ERR(nvavp->sclk)) {
2414 dev_err(&ndev->dev, "cannot get avp.sclk clock\n");
2419 nvavp->emc_clk = clk_get(&ndev->dev, "emc");
2420 if (IS_ERR(nvavp->emc_clk)) {
2421 dev_err(&ndev->dev, "cannot get emc clock\n");
2423 goto err_get_emc_clk;
2426 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
2427 nvavp->bsea_clk = clk_get(&ndev->dev, "bsea");
2428 if (IS_ERR(nvavp->bsea_clk)) {
2429 dev_err(&ndev->dev, "cannot get bsea clock\n");
2431 goto err_get_bsea_clk;
2434 nvavp->vcp_clk = clk_get(&ndev->dev, "vcp");
2435 if (IS_ERR(nvavp->vcp_clk)) {
2436 dev_err(&ndev->dev, "cannot get vcp clock\n");
2438 goto err_get_vcp_clk;
2442 nvavp->clk_enabled = 0;
2443 nvavp_halt_avp(nvavp);
2445 INIT_WORK(&nvavp->clock_disable_work, clock_disable_handler);
2447 nvavp->video_misc_dev.minor = MISC_DYNAMIC_MINOR;
2448 nvavp->video_misc_dev.name = "tegra_avpchannel";
2449 nvavp->video_misc_dev.fops = &tegra_video_nvavp_fops;
2450 nvavp->video_misc_dev.mode = S_IRWXUGO;
2451 nvavp->video_misc_dev.parent = &ndev->dev;
2453 ret = misc_register(&nvavp->video_misc_dev);
2455 dev_err(&ndev->dev, "unable to register misc device!\n");
2459 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
2460 INIT_WORK(&nvavp->app_notify_work, app_notify_handler);
2461 nvavp->audio_misc_dev.minor = MISC_DYNAMIC_MINOR;
2462 nvavp->audio_misc_dev.name = "tegra_audio_avpchannel";
2463 nvavp->audio_misc_dev.fops = &tegra_audio_nvavp_fops;
2464 nvavp->audio_misc_dev.mode = S_IRWXUGO;
2465 nvavp->audio_misc_dev.parent = &ndev->dev;
2467 ret = misc_register(&nvavp->audio_misc_dev);
2469 dev_err(&ndev->dev, "unable to register misc device!\n");
2470 goto err_audio_misc_reg;
2474 ret = request_irq(irq, nvavp_mbox_pending_isr, 0,
2475 TEGRA_NVAVP_NAME, nvavp);
2477 dev_err(&ndev->dev, "cannot register irq handler\n");
2478 goto err_req_irq_pend;
2480 disable_irq(nvavp->mbox_from_avp_pend_irq);
2482 nvavp->nvhost_dev = ndev;
2483 platform_set_drvdata(ndev, nvavp);
2485 tegra_pd_add_device(&ndev->dev);
2486 pm_runtime_use_autosuspend(&ndev->dev);
2487 pm_runtime_set_autosuspend_delay(&ndev->dev, 2000);
2488 pm_runtime_enable(&ndev->dev);
2490 ret = device_create_file(&ndev->dev, &dev_attr_boost_sclk);
2493 "%s: device_create_file failed\n", __func__);
2494 goto err_req_irq_pend;
2496 nvavp_info_ctx = nvavp;
2498 /* Add PM QoS request but leave it as default value */
2499 pm_qos_add_request(&nvavp->min_cpu_freq_req,
2500 PM_QOS_CPU_FREQ_MIN,
2501 PM_QOS_DEFAULT_VALUE);
2502 pm_qos_add_request(&nvavp->min_online_cpus_req,
2503 PM_QOS_MIN_ONLINE_CPUS,
2504 PM_QOS_DEFAULT_VALUE);
2509 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
2510 misc_deregister(&nvavp->audio_misc_dev);
2513 misc_deregister(&nvavp->video_misc_dev);
2515 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
2516 clk_put(nvavp->vcp_clk);
2518 clk_put(nvavp->bsea_clk);
2521 clk_put(nvavp->emc_clk);
2523 clk_put(nvavp->sclk);
2525 clk_put(nvavp->bsev_clk);
2527 clk_put(nvavp->vde_clk);
2529 clk_put(nvavp->cop_clk);
2536 static int tegra_nvavp_remove(struct platform_device *ndev)
2538 struct nvavp_info *nvavp = platform_get_drvdata(ndev);
2543 mutex_lock(&nvavp->open_lock);
2544 if (nvavp->refcount) {
2545 mutex_unlock(&nvavp->open_lock);
2548 mutex_unlock(&nvavp->open_lock);
2550 nvavp_unload_ucode(nvavp);
2551 nvavp_unload_os(nvavp);
2553 device_remove_file(&ndev->dev, &dev_attr_boost_sclk);
2555 misc_deregister(&nvavp->video_misc_dev);
2557 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
2558 misc_deregister(&nvavp->audio_misc_dev);
2559 clk_put(nvavp->vcp_clk);
2560 clk_put(nvavp->bsea_clk);
2562 clk_put(nvavp->bsev_clk);
2563 clk_put(nvavp->vde_clk);
2564 clk_put(nvavp->cop_clk);
2566 clk_put(nvavp->emc_clk);
2567 clk_put(nvavp->sclk);
2569 if (!IS_ERR_OR_NULL(&nvavp->min_cpu_freq_req)) {
2570 pm_qos_update_request(&nvavp->min_cpu_freq_req,
2571 PM_QOS_CPU_FREQ_MIN_DEFAULT_VALUE);
2572 pm_qos_remove_request(&nvavp->min_cpu_freq_req);
2574 if (!IS_ERR_OR_NULL(&nvavp->min_online_cpus_req)) {
2575 pm_qos_update_request(&nvavp->min_online_cpus_req,
2576 PM_QOS_CPU_FREQ_MIN_DEFAULT_VALUE);
2577 pm_qos_remove_request(&nvavp->min_online_cpus_req);
2585 static int tegra_nvavp_runtime_suspend(struct device *dev)
2587 struct platform_device *pdev = to_platform_device(dev);
2588 struct nvavp_info *nvavp = platform_get_drvdata(pdev);
2591 mutex_lock(&nvavp->open_lock);
2593 if (nvavp->refcount) {
2594 if (!nvavp->clk_enabled) {
2595 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
2596 if (nvavp_check_idle(nvavp, NVAVP_AUDIO_CHANNEL))
2597 nvavp_uninit(nvavp);
2601 nvavp_uninit(nvavp);
2609 trace_tegra_nvavp_runtime_suspend(nvavp->refcount, nvavp->video_refcnt,
2610 nvavp->audio_refcnt);
2612 mutex_unlock(&nvavp->open_lock);
2617 static int tegra_nvavp_runtime_resume(struct device *dev)
2619 struct platform_device *pdev = to_platform_device(dev);
2620 struct nvavp_info *nvavp = platform_get_drvdata(pdev);
2622 mutex_lock(&nvavp->open_lock);
2624 if (nvavp->video_refcnt)
2625 nvavp_init(nvavp, NVAVP_VIDEO_CHANNEL);
2626 #if defined(CONFIG_TEGRA_NVAVP_AUDIO)
2627 if (nvavp->audio_refcnt)
2628 nvavp_init(nvavp, NVAVP_AUDIO_CHANNEL);
2631 trace_tegra_nvavp_runtime_resume(nvavp->refcount, nvavp->video_refcnt,
2632 nvavp->audio_refcnt);
2634 mutex_unlock(&nvavp->open_lock);
2639 static int tegra_nvavp_resume(struct device *dev)
2641 struct platform_device *pdev = to_platform_device(dev);
2642 struct nvavp_info *nvavp = platform_get_drvdata(pdev);
2644 /* To balance the unpowergate in suspend routine */
2645 nvavp_powergate_vde(nvavp);
2647 tegra_nvavp_runtime_resume(dev);
2652 static int tegra_nvavp_suspend(struct device *dev)
2654 struct platform_device *pdev = to_platform_device(dev);
2655 struct nvavp_info *nvavp = platform_get_drvdata(pdev);
2658 ret = tegra_nvavp_runtime_suspend(dev);
2662 /* WAR: Leave partition vde on before suspend so that access
2663 * to BSEV registers immediatly after LP0 exit won't fail.
2665 nvavp_unpowergate_vde(nvavp);
2670 static const struct dev_pm_ops nvavp_pm_ops = {
2671 .runtime_suspend = tegra_nvavp_runtime_suspend,
2672 .runtime_resume = tegra_nvavp_runtime_resume,
2673 .suspend = tegra_nvavp_suspend,
2674 .resume = tegra_nvavp_resume,
2677 #define NVAVP_PM_OPS (&nvavp_pm_ops)
2679 #else /* CONFIG_PM */
2681 #define NVAVP_PM_OPS NULL
2683 #endif /* CONFIG_PM */
2685 static struct platform_driver tegra_nvavp_driver = {
2687 .name = TEGRA_NVAVP_NAME,
2688 .owner = THIS_MODULE,
2690 .of_match_table = of_match_ptr(tegra_nvavp_of_match),
2692 .probe = tegra_nvavp_probe,
2693 .remove = tegra_nvavp_remove,
2696 static int __init tegra_nvavp_init(void)
2698 return platform_driver_register(&tegra_nvavp_driver);
2701 static void __exit tegra_nvavp_exit(void)
2703 platform_driver_unregister(&tegra_nvavp_driver);
2706 module_init(tegra_nvavp_init);
2707 module_exit(tegra_nvavp_exit);
2709 MODULE_AUTHOR("NVIDIA");
2710 MODULE_DESCRIPTION("Channel based AVP driver for Tegra");
2711 MODULE_VERSION("1.0");
2712 MODULE_LICENSE("Dual BSD/GPL");