2 * Tegra Graphics Host Client Module
4 * Copyright (c) 2010-2016, NVIDIA Corporation. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/slab.h>
20 #include <linux/string.h>
21 #include <linux/spinlock.h>
23 #include <linux/cdev.h>
24 #include <linux/uaccess.h>
25 #include <linux/file.h>
26 #include <linux/clk.h>
27 #include <linux/hrtimer.h>
28 #include <linux/export.h>
29 #include <linux/firmware.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/tegra-soc.h>
32 #include <linux/anon_inodes.h>
34 #include <trace/events/nvhost.h>
37 #include <linux/string.h>
39 #include <linux/nvhost.h>
40 #include <linux/nvhost_ioctl.h>
42 #include <mach/gpufuse.h>
45 #include "bus_client.h"
47 #include "class_ids.h"
48 #include "chip_support.h"
49 #include "nvhost_acm.h"
51 #include "nvhost_syncpt.h"
52 #include "nvhost_channel.h"
53 #include "nvhost_job.h"
54 #include "nvhost_sync.h"
55 #include "vhost/vhost.h"
57 int nvhost_check_bondout(unsigned int id)
59 #ifdef CONFIG_NVHOST_BONDOUT_CHECK
60 if (!tegra_platform_is_silicon())
61 return tegra_bonded_out_dev(id);
65 EXPORT_SYMBOL(nvhost_check_bondout);
67 static int validate_reg(struct platform_device *ndev, u32 offset, int count)
72 /* check if offset is u32 aligned */
76 r = platform_get_resource(ndev, IORESOURCE_MEM, 0);
78 dev_err(&ndev->dev, "failed to get memory resource\n");
82 if (offset + 4 * count > resource_size(r)
83 || (offset + 4 * count < offset))
89 void __iomem *get_aperture(struct platform_device *pdev, int index)
91 struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
93 return pdata->aperture[index];
96 void host1x_writel(struct platform_device *pdev, u32 r, u32 v)
98 void __iomem *addr = get_aperture(pdev, 0) + r;
99 nvhost_dbg(dbg_reg, " d=%s r=0x%x v=0x%x", pdev->name, r, v);
102 EXPORT_SYMBOL_GPL(host1x_writel);
104 u32 host1x_readl(struct platform_device *pdev, u32 r)
106 void __iomem *addr = get_aperture(pdev, 0) + r;
109 nvhost_dbg(dbg_reg, " d=%s r=0x%x", pdev->name, r);
111 nvhost_dbg(dbg_reg, " d=%s r=0x%x v=0x%x", pdev->name, r, v);
115 EXPORT_SYMBOL_GPL(host1x_readl);
117 void host1x_channel_writel(struct nvhost_channel *ch, u32 r, u32 v)
119 void __iomem *addr = ch->aperture + r;
120 nvhost_dbg(dbg_reg, " chid=%d r=0x%x v=0x%x", ch->chid, r, v);
123 EXPORT_SYMBOL_GPL(host1x_channel_writel);
125 u32 host1x_channel_readl(struct nvhost_channel *ch, u32 r)
127 void __iomem *addr = ch->aperture + r;
130 nvhost_dbg(dbg_reg, " chid=%d r=0x%x", ch->chid, r);
132 nvhost_dbg(dbg_reg, " chid=%d r=0x%x v=0x%x", ch->chid, r, v);
136 EXPORT_SYMBOL_GPL(host1x_channel_readl);
138 void host1x_sync_writel(struct nvhost_master *dev, u32 r, u32 v)
140 void __iomem *addr = dev->sync_aperture + r;
141 nvhost_dbg(dbg_reg, " d=%s r=0x%x v=0x%x", dev->dev->name, r, v);
144 EXPORT_SYMBOL_GPL(host1x_sync_writel);
146 u32 host1x_sync_readl(struct nvhost_master *dev, u32 r)
148 void __iomem *addr = dev->sync_aperture + r;
151 nvhost_dbg(dbg_reg, " d=%s r=0x%x", dev->dev->name, r);
153 nvhost_dbg(dbg_reg, " d=%s r=0x%x v=0x%x", dev->dev->name, r, v);
157 EXPORT_SYMBOL_GPL(host1x_sync_readl);
159 int nvhost_read_module_regs(struct platform_device *ndev,
160 u32 offset, int count, u32 *values)
165 err = validate_reg(ndev, offset, count);
169 err = nvhost_module_busy(ndev);
174 *(values++) = host1x_readl(ndev, offset);
178 nvhost_module_idle(ndev);
183 int nvhost_write_module_regs(struct platform_device *ndev,
184 u32 offset, int count, const u32 *values)
189 err = validate_reg(ndev, offset, count);
193 err = nvhost_module_busy(ndev);
198 host1x_writel(ndev, offset, *(values++));
202 nvhost_module_idle(ndev);
207 struct nvhost_channel_userctx {
208 struct nvhost_channel *ch;
212 bool timeout_debug_dump;
213 struct platform_device *pdev;
214 u32 syncpts[NVHOST_MODULE_MAX_SYNCPTS];
215 u32 client_managed_syncpt;
217 /* error notificatiers used channel submit timeout */
218 struct dma_buf *error_notifier_ref;
219 u64 error_notifier_offset;
221 /* lock to protect this structure from concurrent ioctl usage */
222 struct mutex ioctl_lock;
224 /* used for attaching to ctx list in device pdata */
225 struct list_head node;
228 static int nvhost_channelrelease(struct inode *inode, struct file *filp)
230 struct nvhost_channel_userctx *priv = filp->private_data;
231 struct nvhost_device_data *pdata = platform_get_drvdata(priv->pdev);
232 struct nvhost_master *host = nvhost_get_host(pdata->pdev);
236 trace_nvhost_channel_release(dev_name(&priv->pdev->dev));
238 mutex_lock(&pdata->userctx_list_lock);
239 list_del(&priv->node);
240 mutex_unlock(&pdata->userctx_list_lock);
242 /* remove this client from acm */
243 nvhost_module_remove_client(priv->pdev, priv);
245 /* drop error notifier reference */
246 if (priv->error_notifier_ref)
247 dma_buf_put(priv->error_notifier_ref);
249 /* Clear the identifier */
250 if (pdata->resource_policy == RESOURCE_PER_DEVICE &&
252 identifier = (void *)pdata;
254 identifier = (void *)priv;
255 nvhost_channel_remove_identifier(pdata, identifier);
257 /* If the device is in exclusive mode, drop the reference */
258 if (pdata->exclusive)
259 pdata->num_mapped_chs--;
261 /* drop channel reference if we took one at open time */
262 if (pdata->resource_policy == RESOURCE_PER_DEVICE) {
263 nvhost_putchannel(priv->ch, 1);
265 /* drop instance syncpoints reference */
266 for (i = 0; i < NVHOST_MODULE_MAX_SYNCPTS; ++i) {
267 if (priv->syncpts[i]) {
268 nvhost_syncpt_put_ref(&host->syncpt,
270 priv->syncpts[i] = 0;
274 if (priv->client_managed_syncpt) {
275 nvhost_syncpt_put_ref(&host->syncpt,
276 priv->client_managed_syncpt);
277 priv->client_managed_syncpt = 0;
281 if (pdata->keepalive)
282 nvhost_module_enable_poweroff(priv->pdev);
288 static int __nvhost_channelopen(struct inode *inode,
289 struct platform_device *pdev,
292 struct nvhost_channel_userctx *priv;
293 struct nvhost_device_data *pdata, *host1x_pdata;
294 struct nvhost_master *host;
297 /* grab pdev and pdata based on inputs */
299 pdata = platform_get_drvdata(pdev);
301 pdata = container_of(inode->i_cdev,
302 struct nvhost_device_data, cdev);
307 /* ..and host1x specific data */
308 host1x_pdata = dev_get_drvdata(pdev->dev.parent);
309 host = nvhost_get_host(pdev);
311 trace_nvhost_channel_open(dev_name(&pdev->dev));
313 /* If the device is in exclusive mode, make channel reservation here */
314 if (pdata->exclusive) {
315 if (pdata->num_mapped_chs == pdata->num_channels)
317 pdata->num_mapped_chs++;
320 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
322 goto fail_allocate_priv;
323 filp->private_data = priv;
325 /* Register this client to acm */
326 if (nvhost_module_add_client(pdev, priv))
327 goto fail_add_client;
329 /* Keep devices with keepalive flag powered */
330 if (pdata->keepalive)
331 nvhost_module_disable_poweroff(pdev);
333 /* Check that the device can be powered */
334 ret = nvhost_module_busy(pdev);
337 nvhost_module_idle(pdev);
339 if (nvhost_dev_is_virtual(pdev)) {
340 /* If virtual, allocate a client id on the server side. This is
341 * needed for channel recovery, to distinguish which clients
345 int virt_moduleid = vhost_virt_moduleid(pdata->moduleid);
346 struct nvhost_virt_ctx *virt_ctx =
347 nvhost_get_virt_data(pdev);
349 if (virt_moduleid < 0) {
351 goto fail_virt_clientid;
355 vhost_channel_alloc_clientid(virt_ctx->handle,
357 if (priv->clientid == 0) {
359 "vhost_channel_alloc_clientid failed\n");
361 goto fail_virt_clientid;
365 priv->clientid = atomic_add_return(1, &host->clientid);
367 priv->clientid = atomic_add_return(1, &host->clientid);
370 /* Initialize private structure */
371 priv->timeout = host1x_pdata->nvhost_timeout_default;
372 priv->priority = NVHOST_PRIORITY_MEDIUM;
373 priv->timeout_debug_dump = true;
374 mutex_init(&priv->ioctl_lock);
377 if (!tegra_platform_is_silicon())
380 /* if we run in map-at-submit mode but device has override
381 * flag set, respect the override flag */
382 if (pdata->resource_policy == RESOURCE_PER_DEVICE) {
383 if (pdata->exclusive)
384 ret = nvhost_channel_map(pdata, &priv->ch, priv);
386 ret = nvhost_channel_map(pdata, &priv->ch, pdata);
388 pr_err("%s: failed to map channel, error: %d\n",
390 goto fail_get_channel;
394 INIT_LIST_HEAD(&priv->node);
395 mutex_lock(&pdata->userctx_list_lock);
396 list_add_tail(&priv->node, &pdata->userctx_list);
397 mutex_unlock(&pdata->userctx_list_lock);
404 if (pdata->keepalive)
405 nvhost_module_enable_poweroff(pdev);
406 nvhost_module_remove_client(pdev, priv);
410 if (pdata->exclusive)
411 pdata->num_mapped_chs--;
416 static int nvhost_channelopen(struct inode *inode, struct file *filp)
418 return __nvhost_channelopen(inode, NULL, filp);
421 static int nvhost_init_error_notifier(struct nvhost_channel_userctx *ctx,
422 struct nvhost_set_error_notifier *args)
424 struct dma_buf *dmabuf;
426 u64 end = args->offset + sizeof(struct nvhost_notification);
428 /* are we releasing old reference? */
430 if (ctx->error_notifier_ref)
431 dma_buf_put(ctx->error_notifier_ref);
432 ctx->error_notifier_ref = NULL;
436 /* take reference for the userctx */
437 dmabuf = dma_buf_get(args->mem);
438 if (IS_ERR(dmabuf)) {
439 pr_err("%s: Invalid handle: %d\n", __func__, args->mem);
443 if (end > dmabuf->size || end < sizeof(struct nvhost_notification)) {
445 pr_err("%s: invalid offset\n", __func__);
449 /* map handle and clear error notifier struct */
450 va = dma_buf_vmap(dmabuf);
453 pr_err("%s: Cannot map notifier handle\n", __func__);
457 memset(va + args->offset, 0, sizeof(struct nvhost_notification));
458 dma_buf_vunmap(dmabuf, va);
460 /* release old reference */
461 if (ctx->error_notifier_ref)
462 dma_buf_put(ctx->error_notifier_ref);
464 /* finally, store error notifier data */
465 ctx->error_notifier_ref = dmabuf;
466 ctx->error_notifier_offset = args->offset;
471 static inline u32 get_job_fence(struct nvhost_job *job, u32 id)
473 struct nvhost_channel *ch = job->ch;
474 struct nvhost_device_data *pdata = platform_get_drvdata(ch->dev);
475 u32 fence = job->sp[id].fence;
477 /* take into account work done increment */
478 if (pdata->push_work_done && id == 0)
481 /* otherwise the fence is valid "as is" */
485 static int nvhost_ioctl_channel_submit(struct nvhost_channel_userctx *ctx,
486 struct nvhost_submit_args *args)
488 struct nvhost_job *job;
489 int num_cmdbufs = args->num_cmdbufs;
490 int num_relocs = args->num_relocs;
491 int num_waitchks = args->num_waitchks;
492 int num_syncpt_incrs = args->num_syncpt_incrs;
493 struct nvhost_cmdbuf __user *cmdbufs =
494 (struct nvhost_cmdbuf __user *)(uintptr_t)args->cmdbufs;
495 struct nvhost_cmdbuf __user *cmdbuf_exts =
496 (struct nvhost_cmdbuf __user *)(uintptr_t)args->cmdbuf_exts;
497 struct nvhost_reloc __user *relocs =
498 (struct nvhost_reloc __user *)(uintptr_t)args->relocs;
499 struct nvhost_reloc_shift __user *reloc_shifts =
500 (struct nvhost_reloc_shift __user *)
501 (uintptr_t)args->reloc_shifts;
502 struct nvhost_waitchk __user *waitchks =
503 (struct nvhost_waitchk __user *)(uintptr_t)args->waitchks;
504 struct nvhost_syncpt_incr __user *syncpt_incrs =
505 (struct nvhost_syncpt_incr __user *)
506 (uintptr_t)args->syncpt_incrs;
507 u32 __user *fences = (u32 __user *)(uintptr_t)args->fences;
508 u32 __user *class_ids = (u32 __user *)(uintptr_t)args->class_ids;
509 struct nvhost_device_data *pdata = platform_get_drvdata(ctx->pdev);
511 const u32 *syncpt_array =
512 (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) ?
515 u32 *local_class_ids = NULL;
518 job = nvhost_job_alloc(ctx->ch,
526 job->num_relocs = args->num_relocs;
527 job->num_waitchk = args->num_waitchks;
528 job->num_syncpts = args->num_syncpt_incrs;
529 job->priority = ctx->priority;
530 job->clientid = ctx->clientid;
531 job->client_managed_syncpt =
532 (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) ?
533 ctx->client_managed_syncpt : ctx->ch->client_managed_syncpt;
535 /* copy error notifier settings for this job */
536 if (ctx->error_notifier_ref) {
537 get_dma_buf(ctx->error_notifier_ref);
538 job->error_notifier_ref = ctx->error_notifier_ref;
539 job->error_notifier_offset = ctx->error_notifier_offset;
542 /* mass copy class_ids */
543 if (args->class_ids) {
544 local_class_ids = kzalloc(sizeof(u32) * num_cmdbufs,
546 if (!local_class_ids) {
550 err = copy_from_user(local_class_ids, class_ids,
551 sizeof(u32) * num_cmdbufs);
558 for (i = 0; i < num_cmdbufs; ++i) {
559 struct nvhost_cmdbuf cmdbuf;
560 struct nvhost_cmdbuf_ext cmdbuf_ext;
561 u32 class_id = class_ids ? local_class_ids[i] : 0;
563 err = copy_from_user(&cmdbuf, cmdbufs + i, sizeof(cmdbuf));
567 cmdbuf_ext.pre_fence = -1;
569 err = copy_from_user(&cmdbuf_ext,
570 cmdbuf_exts + i, sizeof(cmdbuf_ext));
572 cmdbuf_ext.pre_fence = -1;
574 /* verify that the given class id is valid for this engine */
576 class_id != pdata->class &&
577 class_id != NV_HOST1X_CLASS_ID) {
582 nvhost_job_add_gather(job, cmdbuf.mem, cmdbuf.words,
583 cmdbuf.offset, class_id,
584 cmdbuf_ext.pre_fence);
587 kfree(local_class_ids);
588 local_class_ids = NULL;
590 err = copy_from_user(job->relocarray,
591 relocs, sizeof(*relocs) * num_relocs);
595 err = copy_from_user(job->relocshiftarray,
596 reloc_shifts, sizeof(*reloc_shifts) * num_relocs);
600 err = copy_from_user(job->waitchk,
601 waitchks, sizeof(*waitchks) * num_waitchks);
606 * Go through each syncpoint from userspace. Here we:
607 * - Copy syncpoint information
608 * - Validate each syncpoint
609 * - Determine the index of hwctx syncpoint in the table
612 for (i = 0; i < num_syncpt_incrs; ++i) {
613 struct nvhost_syncpt_incr sp;
618 err = copy_from_user(&sp, syncpt_incrs + i, sizeof(sp));
622 /* Validate the trivial case */
623 if (sp.syncpt_id == 0) {
628 /* ..and then ensure that the syncpoints have been reserved
630 for (j = 0; j < NVHOST_MODULE_MAX_SYNCPTS; j++) {
631 if (syncpt_array[j] == sp.syncpt_id) {
642 /* Store and get a reference */
643 job->sp[i].id = sp.syncpt_id;
644 job->sp[i].incrs = sp.syncpt_incrs;
647 trace_nvhost_channel_submit(ctx->pdev->name,
648 job->num_gathers, job->num_relocs, job->num_waitchk,
652 err = nvhost_module_busy(ctx->pdev);
656 err = nvhost_job_pin(job, &nvhost_get_host(ctx->pdev)->syncpt);
657 nvhost_module_idle(ctx->pdev);
662 job->timeout = min(ctx->timeout, args->timeout);
664 job->timeout = ctx->timeout;
665 job->timeout_debug_dump = ctx->timeout_debug_dump;
667 err = nvhost_channel_submit(job);
671 /* Deliver multiple fences back to the userspace */
673 for (i = 0; i < num_syncpt_incrs; ++i) {
674 u32 fence = get_job_fence(job, i);
675 err = copy_to_user(fences, &fence, sizeof(u32));
681 /* Deliver the fence using the old mechanism _only_ if a single
682 * syncpoint is used. */
684 if (args->flags & BIT(NVHOST_SUBMIT_FLAG_SYNC_FENCE_FD)) {
685 struct nvhost_ctrl_sync_fence_info pts[num_syncpt_incrs];
687 for (i = 0; i < num_syncpt_incrs; i++) {
688 pts[i].id = job->sp[i].id;
689 pts[i].thresh = get_job_fence(job, i);
692 err = nvhost_sync_create_fence_fd(ctx->pdev,
693 pts, num_syncpt_incrs, "fence", &args->fence);
696 } else if (num_syncpt_incrs == 1)
697 args->fence = get_job_fence(job, 0);
706 nvhost_job_unpin(job);
709 kfree(local_class_ids);
711 nvhost_err(&pdata->pdev->dev, "failed with err %d\n", err);
716 static int moduleid_to_index(struct platform_device *dev, u32 moduleid)
719 struct nvhost_device_data *pdata = platform_get_drvdata(dev);
721 for (i = 0; i < NVHOST_MODULE_MAX_CLOCKS; i++) {
722 if (pdata->clocks[i].moduleid == moduleid)
726 /* Old user space is sending a random number in args. Return clock
727 * zero in these cases. */
731 static int nvhost_ioctl_channel_set_rate(struct nvhost_channel_userctx *ctx,
732 struct nvhost_clk_rate_args *arg)
734 u32 moduleid = (arg->moduleid >> NVHOST_MODULE_ID_BIT_POS)
735 & ((1 << NVHOST_MODULE_ID_BIT_WIDTH) - 1);
736 u32 attr = (arg->moduleid >> NVHOST_CLOCK_ATTR_BIT_POS)
737 & ((1 << NVHOST_CLOCK_ATTR_BIT_WIDTH) - 1);
738 int index = moduleid ?
739 moduleid_to_index(ctx->pdev, moduleid) : 0;
742 err = nvhost_module_set_rate(ctx->pdev, ctx, arg->rate, index, attr);
743 if (!tegra_platform_is_silicon() && err) {
744 nvhost_dbg(dbg_clk, "ignoring error: module=%u, attr=%u, index=%d, err=%d",
745 moduleid, attr, index, err);
752 static int nvhost_ioctl_channel_get_rate(struct nvhost_channel_userctx *ctx,
753 u32 moduleid, u32 *rate)
755 int index = moduleid ? moduleid_to_index(ctx->pdev, moduleid) : 0;
758 err = nvhost_module_get_rate(ctx->pdev, (unsigned long *)rate, index);
759 if (!tegra_platform_is_silicon() && err) {
760 nvhost_dbg(dbg_clk, "ignoring error: module=%u, rate=%u, error=%d",
761 moduleid, *rate, err);
763 /* fake the return value */
770 static int nvhost_ioctl_channel_module_regrdwr(
771 struct nvhost_channel_userctx *ctx,
772 struct nvhost_ctrl_module_regrdwr_args *args)
774 u32 num_offsets = args->num_offsets;
775 u32 __user *offsets = (u32 __user *)(uintptr_t)args->offsets;
776 u32 __user *values = (u32 __user *)(uintptr_t)args->values;
778 struct platform_device *ndev;
780 trace_nvhost_ioctl_channel_module_regrdwr(args->id,
781 args->num_offsets, args->write);
783 /* Check that there is something to read and that block size is
785 if (num_offsets == 0 || args->block_size & 3)
790 if (nvhost_dev_is_virtual(ndev))
791 return vhost_rdwr_module_regs(ndev, num_offsets,
792 args->block_size, offsets, values, args->write);
794 while (num_offsets--) {
797 int remaining = args->block_size >> 2;
799 if (get_user(offs, offsets))
804 int batch = min(remaining, 64);
806 if (copy_from_user(vals, values,
807 batch * sizeof(u32)))
810 err = nvhost_write_module_regs(ndev,
815 err = nvhost_read_module_regs(ndev,
820 if (copy_to_user(values, vals,
821 batch * sizeof(u32)))
826 offs += batch * sizeof(u32);
834 static u32 create_mask(u32 *words, int num)
838 for (i = 0; i < num; i++) {
839 if (!words[i] || words[i] > 31)
841 word |= BIT(words[i]);
847 static u32 nvhost_ioctl_channel_get_syncpt_mask(
848 struct nvhost_channel_userctx *priv)
850 struct nvhost_device_data *pdata = platform_get_drvdata(priv->pdev);
853 if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE)
854 mask = create_mask(priv->syncpts, NVHOST_MODULE_MAX_SYNCPTS);
856 mask = create_mask(priv->ch->syncpts,
857 NVHOST_MODULE_MAX_SYNCPTS);
862 static u32 nvhost_ioctl_channel_get_syncpt_channel(struct nvhost_channel *ch,
863 struct nvhost_device_data *pdata, u32 index)
867 mutex_lock(&ch->syncpts_lock);
869 /* if we already have required syncpt then return it ... */
870 id = ch->syncpts[index];
874 /* ... otherwise get a new syncpt dynamically */
875 id = nvhost_get_syncpt_host_managed(pdata->pdev, index, NULL);
879 /* ... and store it for further references */
880 ch->syncpts[index] = id;
883 mutex_unlock(&ch->syncpts_lock);
887 static u32 nvhost_ioctl_channel_get_syncpt_instance(
888 struct nvhost_channel_userctx *ctx,
889 struct nvhost_device_data *pdata, u32 index)
893 /* if we already have required syncpt then return it ... */
894 if (ctx->syncpts[index]) {
895 id = ctx->syncpts[index];
899 /* ... otherwise get a new syncpt dynamically */
900 id = nvhost_get_syncpt_host_managed(pdata->pdev, index, NULL);
904 /* ... and store it for further references */
905 ctx->syncpts[index] = id;
910 static int nvhost_ioctl_channel_get_client_syncpt(
911 struct nvhost_channel_userctx *ctx,
912 struct nvhost_get_client_managed_syncpt_arg *args)
914 struct nvhost_device_data *pdata = platform_get_drvdata(ctx->pdev);
915 const char __user *args_name =
916 (const char __user *)(uintptr_t)args->name;
920 /* prepare syncpoint name (in case it is needed) */
922 if (strncpy_from_user(name, args_name, sizeof(name)) < 0)
924 name[sizeof(name) - 1] = '\0';
929 snprintf(set_name, sizeof(set_name),
930 "%s_%s", dev_name(&ctx->pdev->dev), name);
932 if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) {
933 if (!ctx->client_managed_syncpt)
934 ctx->client_managed_syncpt =
935 nvhost_get_syncpt_client_managed(pdata->pdev,
937 args->value = ctx->client_managed_syncpt;
939 struct nvhost_channel *ch = ctx->ch;
940 mutex_lock(&ch->syncpts_lock);
941 if (!ch->client_managed_syncpt)
942 ch->client_managed_syncpt =
943 nvhost_get_syncpt_client_managed(pdata->pdev,
945 mutex_unlock(&ch->syncpts_lock);
946 args->value = ch->client_managed_syncpt;
955 static long nvhost_channelctl(struct file *filp,
956 unsigned int cmd, unsigned long arg)
958 struct nvhost_channel_userctx *priv = filp->private_data;
959 struct nvhost_master *host;
961 u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
964 if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
965 (_IOC_NR(cmd) == 0) ||
966 (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST) ||
967 (_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE))
970 if (_IOC_DIR(cmd) & _IOC_WRITE) {
971 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
975 /* serialize calls from this fd */
976 mutex_lock(&priv->ioctl_lock);
978 pr_warn("Channel already unmapped\n");
979 mutex_unlock(&priv->ioctl_lock);
983 host = nvhost_get_host(priv->pdev);
984 dev = &priv->pdev->dev;
986 case NVHOST_IOCTL_CHANNEL_OPEN:
992 err = get_unused_fd_flags(O_RDWR);
997 name = kasprintf(GFP_KERNEL, "nvhost-%s-fd%d",
1005 file = anon_inode_getfile(name, filp->f_op, NULL, O_RDWR);
1008 err = PTR_ERR(file);
1012 fd_install(fd, file);
1014 err = __nvhost_channelopen(NULL, priv->pdev, file);
1021 ((struct nvhost_channel_open_args *)buf)->channel_fd = fd;
1024 case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
1026 ((struct nvhost_get_param_args *)buf)->value =
1027 nvhost_ioctl_channel_get_syncpt_mask(priv);
1030 case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINT:
1032 struct nvhost_device_data *pdata =
1033 platform_get_drvdata(priv->pdev);
1034 struct nvhost_get_param_arg *arg =
1035 (struct nvhost_get_param_arg *)buf;
1037 if (arg->param >= NVHOST_MODULE_MAX_SYNCPTS) {
1042 if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE)
1043 arg->value = nvhost_ioctl_channel_get_syncpt_instance(
1044 priv, pdata, arg->param);
1046 arg->value = nvhost_ioctl_channel_get_syncpt_channel(
1047 priv->ch, pdata, arg->param);
1054 case NVHOST_IOCTL_CHANNEL_GET_CLIENT_MANAGED_SYNCPOINT:
1056 err = nvhost_ioctl_channel_get_client_syncpt(priv,
1057 (struct nvhost_get_client_managed_syncpt_arg *)buf);
1060 case NVHOST_IOCTL_CHANNEL_FREE_CLIENT_MANAGED_SYNCPOINT:
1062 case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
1064 ((struct nvhost_get_param_args *)buf)->value = 0;
1067 case NVHOST_IOCTL_CHANNEL_GET_WAITBASE:
1072 case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
1074 struct nvhost_device_data *pdata = \
1075 platform_get_drvdata(priv->pdev);
1076 ((struct nvhost_get_param_args *)buf)->value =
1077 create_mask(pdata->modulemutexes,
1078 NVHOST_MODULE_MAX_MODMUTEXES);
1081 case NVHOST_IOCTL_CHANNEL_GET_MODMUTEX:
1083 struct nvhost_device_data *pdata = \
1084 platform_get_drvdata(priv->pdev);
1085 struct nvhost_get_param_arg *arg =
1086 (struct nvhost_get_param_arg *)buf;
1088 if (arg->param >= NVHOST_MODULE_MAX_MODMUTEXES ||
1089 !pdata->modulemutexes[arg->param]) {
1094 arg->value = pdata->modulemutexes[arg->param];
1097 case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
1099 case NVHOST_IOCTL_CHANNEL_GET_CLK_RATE:
1101 struct nvhost_clk_rate_args *arg =
1102 (struct nvhost_clk_rate_args *)buf;
1104 err = nvhost_ioctl_channel_get_rate(priv,
1105 arg->moduleid, &arg->rate);
1108 case NVHOST_IOCTL_CHANNEL_SET_CLK_RATE:
1110 struct nvhost_clk_rate_args *arg =
1111 (struct nvhost_clk_rate_args *)buf;
1113 /* if virtualized, client requests to change clock rate
1116 if (nvhost_dev_is_virtual(priv->pdev))
1119 err = nvhost_ioctl_channel_set_rate(priv, arg);
1122 case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT:
1125 (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
1127 priv->timeout = timeout;
1128 dev_dbg(&priv->pdev->dev,
1129 "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
1130 __func__, priv->timeout, priv);
1133 case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT:
1134 ((struct nvhost_get_param_args *)buf)->value = false;
1136 case NVHOST_IOCTL_CHANNEL_SET_PRIORITY:
1138 (u32)((struct nvhost_set_priority_args *)buf)->priority;
1140 case NVHOST32_IOCTL_CHANNEL_MODULE_REGRDWR:
1142 struct nvhost32_ctrl_module_regrdwr_args *args32 =
1143 (struct nvhost32_ctrl_module_regrdwr_args *)buf;
1144 struct nvhost_ctrl_module_regrdwr_args args;
1145 args.id = args32->id;
1146 args.num_offsets = args32->num_offsets;
1147 args.block_size = args32->block_size;
1148 args.offsets = args32->offsets;
1149 args.values = args32->values;
1150 args.write = args32->write;
1151 err = nvhost_ioctl_channel_module_regrdwr(priv, &args);
1154 case NVHOST_IOCTL_CHANNEL_MODULE_REGRDWR:
1155 err = nvhost_ioctl_channel_module_regrdwr(priv, (void *)buf);
1157 case NVHOST32_IOCTL_CHANNEL_SUBMIT:
1159 struct nvhost_device_data *pdata =
1160 platform_get_drvdata(priv->pdev);
1161 struct nvhost32_submit_args *args32 = (void *)buf;
1162 struct nvhost_submit_args args;
1165 if (pdata->resource_policy == RESOURCE_PER_DEVICE &&
1167 identifier = (void *)pdata;
1169 identifier = (void *)priv;
1171 memset(&args, 0, sizeof(args));
1172 args.submit_version = args32->submit_version;
1173 args.num_syncpt_incrs = args32->num_syncpt_incrs;
1174 args.num_cmdbufs = args32->num_cmdbufs;
1175 args.num_relocs = args32->num_relocs;
1176 args.num_waitchks = args32->num_waitchks;
1177 args.timeout = args32->timeout;
1178 args.syncpt_incrs = args32->syncpt_incrs;
1179 args.fence = args32->fence;
1181 args.cmdbufs = args32->cmdbufs;
1182 args.relocs = args32->relocs;
1183 args.reloc_shifts = args32->reloc_shifts;
1184 args.waitchks = args32->waitchks;
1185 args.class_ids = args32->class_ids;
1186 args.fences = args32->fences;
1188 /* first, get a channel */
1189 err = nvhost_channel_map(pdata, &priv->ch, identifier);
1193 /* ..then, synchronize syncpoint information.
1195 * This information is updated only in this ioctl and
1196 * channel destruction. We already hold channel
1197 * reference and this ioctl is serialized => no-one is
1198 * modifying the syncpoint field concurrently.
1200 * Synchronization is not destructing anything
1201 * in the structure; We can only allocate new
1202 * syncpoints, and hence old ones cannot be released
1203 * by following operation. If some syncpoint is stored
1204 * into the channel structure, it remains there. */
1206 if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) {
1207 memcpy(priv->ch->syncpts, priv->syncpts,
1208 sizeof(priv->syncpts));
1209 priv->ch->client_managed_syncpt =
1210 priv->client_managed_syncpt;
1214 err = nvhost_ioctl_channel_submit(priv, &args);
1216 /* ..and drop the local reference */
1217 nvhost_putchannel(priv->ch, 1);
1219 args32->fence = args.fence;
1223 case NVHOST_IOCTL_CHANNEL_SUBMIT:
1225 struct nvhost_device_data *pdata =
1226 platform_get_drvdata(priv->pdev);
1229 if (pdata->resource_policy == RESOURCE_PER_DEVICE &&
1231 identifier = (void *)pdata;
1233 identifier = (void *)priv;
1235 /* first, get a channel */
1236 err = nvhost_channel_map(pdata, &priv->ch, identifier);
1240 /* ..then, synchronize syncpoint information.
1242 * This information is updated only in this ioctl and
1243 * channel destruction. We already hold channel
1244 * reference and this ioctl is serialized => no-one is
1245 * modifying the syncpoint field concurrently.
1247 * Synchronization is not destructing anything
1248 * in the structure; We can only allocate new
1249 * syncpoints, and hence old ones cannot be released
1250 * by following operation. If some syncpoint is stored
1251 * into the channel structure, it remains there. */
1253 if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) {
1254 memcpy(priv->ch->syncpts, priv->syncpts,
1255 sizeof(priv->syncpts));
1256 priv->ch->client_managed_syncpt =
1257 priv->client_managed_syncpt;
1261 err = nvhost_ioctl_channel_submit(priv, (void *)buf);
1263 /* ..and drop the local reference */
1264 nvhost_putchannel(priv->ch, 1);
1268 case NVHOST_IOCTL_CHANNEL_SET_ERROR_NOTIFIER:
1269 err = nvhost_init_error_notifier(priv,
1270 (struct nvhost_set_error_notifier *)buf);
1272 case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT_EX:
1275 (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
1276 bool timeout_debug_dump = !((u32)
1277 ((struct nvhost_set_timeout_ex_args *)buf)->flags &
1278 (1 << NVHOST_TIMEOUT_FLAG_DISABLE_DUMP));
1279 priv->timeout = timeout;
1280 priv->timeout_debug_dump = timeout_debug_dump;
1281 dev_dbg(&priv->pdev->dev,
1282 "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
1283 __func__, priv->timeout, priv);
1287 nvhost_dbg_info("unrecognized ioctl cmd: 0x%x", cmd);
1292 mutex_unlock(&priv->ioctl_lock);
1294 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
1295 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
1300 static const struct file_operations nvhost_channelops = {
1301 .owner = THIS_MODULE,
1302 .release = nvhost_channelrelease,
1303 .open = nvhost_channelopen,
1304 #ifdef CONFIG_COMPAT
1305 .compat_ioctl = nvhost_channelctl,
1307 .unlocked_ioctl = nvhost_channelctl
1310 static const char *get_device_name_for_dev(struct platform_device *dev)
1312 struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
1314 if (pdata->devfs_name)
1315 return pdata->devfs_name;
1320 static struct device *nvhost_client_device_create(
1321 struct platform_device *pdev, struct cdev *cdev,
1322 const char *cdev_name, dev_t devno,
1323 const struct file_operations *ops)
1325 struct nvhost_master *host = nvhost_get_host(pdev);
1326 const char *use_dev_name;
1334 cdev_init(cdev, ops);
1335 cdev->owner = THIS_MODULE;
1337 err = cdev_add(cdev, devno, 1);
1340 "failed to add cdev\n");
1343 use_dev_name = get_device_name_for_dev(pdev);
1345 dev = device_create(host->nvhost_class,
1348 IFACE_NAME "-%s%s" :
1349 IFACE_NAME "-%s%s.%d",
1350 cdev_name, use_dev_name, pdev->id);
1355 "failed to create %s %s device for %s\n",
1356 use_dev_name, cdev_name, pdev->name);
1363 #define NVHOST_NUM_CDEV 4
1364 int nvhost_client_user_init(struct platform_device *dev)
1368 struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1370 /* reserve 3 minor #s for <dev>, and ctrl-<dev> */
1372 err = alloc_chrdev_region(&devno, 0, NVHOST_NUM_CDEV, IFACE_NAME);
1374 dev_err(&dev->dev, "failed to allocate devno\n");
1377 pdata->cdev_region = devno;
1379 pdata->node = nvhost_client_device_create(dev, &pdata->cdev,
1380 "", devno, &nvhost_channelops);
1381 if (pdata->node == NULL)
1384 /* module control (npn-channel based, global) interface */
1385 if (pdata->ctrl_ops) {
1387 pdata->ctrl_node = nvhost_client_device_create(dev,
1388 &pdata->ctrl_cdev, "ctrl-",
1389 devno, pdata->ctrl_ops);
1390 if (pdata->ctrl_node == NULL)
1399 static void nvhost_client_user_deinit(struct platform_device *dev)
1401 struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1402 struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1405 device_destroy(nvhost_master->nvhost_class, pdata->cdev.dev);
1406 cdev_del(&pdata->cdev);
1409 if (pdata->as_node) {
1410 device_destroy(nvhost_master->nvhost_class, pdata->as_cdev.dev);
1411 cdev_del(&pdata->as_cdev);
1414 if (pdata->ctrl_node) {
1415 device_destroy(nvhost_master->nvhost_class,
1416 pdata->ctrl_cdev.dev);
1417 cdev_del(&pdata->ctrl_cdev);
1420 unregister_chrdev_region(pdata->cdev_region, NVHOST_NUM_CDEV);
1423 int nvhost_client_device_init(struct platform_device *dev)
1426 struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1427 struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1429 mutex_init(&pdata->userctx_list_lock);
1430 INIT_LIST_HEAD(&pdata->userctx_list);
1432 /* Create debugfs directory for the device */
1433 nvhost_device_debug_init(dev);
1435 err = nvhost_client_user_init(dev);
1439 err = nvhost_device_list_add(dev);
1443 if (pdata->scaling_init)
1444 pdata->scaling_init(dev);
1446 /* reset syncpoint values for this unit */
1447 err = nvhost_module_busy(nvhost_master->dev);
1451 nvhost_module_idle(nvhost_master->dev);
1453 /* Initialize dma parameters */
1454 dev->dev.dma_parms = &pdata->dma_parms;
1455 dma_set_max_seg_size(&dev->dev, UINT_MAX);
1457 dev_info(&dev->dev, "initialized\n");
1459 if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) {
1460 nvhost_master->info.channel_policy = MAP_CHANNEL_ON_SUBMIT;
1461 nvhost_update_characteristics(dev);
1465 return pdata->hw_init(dev);
1470 /* Remove from nvhost device list */
1471 nvhost_device_list_remove(dev);
1474 dev_err(&dev->dev, "failed to init client device\n");
1475 nvhost_client_user_deinit(dev);
1476 nvhost_device_debug_deinit(dev);
1479 EXPORT_SYMBOL(nvhost_client_device_init);
1481 int nvhost_client_device_release(struct platform_device *dev)
1483 /* Release nvhost module resources */
1484 nvhost_module_deinit(dev);
1486 /* Remove from nvhost device list */
1487 nvhost_device_list_remove(dev);
1489 /* Release chardev and device node for user space */
1490 nvhost_client_user_deinit(dev);
1492 /* Remove debugFS */
1493 nvhost_device_debug_deinit(dev);
1497 EXPORT_SYMBOL(nvhost_client_device_release);
1499 int nvhost_device_get_resources(struct platform_device *dev)
1502 void __iomem *regs = NULL;
1503 struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1505 for (i = 0; i < dev->num_resources; i++) {
1506 struct resource *r = NULL;
1508 r = platform_get_resource(dev, IORESOURCE_MEM, i);
1509 /* We've run out of mem resources */
1513 regs = devm_request_and_ioremap(&dev->dev, r);
1517 pdata->aperture[i] = regs;
1523 dev_err(&dev->dev, "failed to get register memory\n");
1528 int nvhost_client_device_get_resources(struct platform_device *dev)
1530 return nvhost_device_get_resources(dev);
1532 EXPORT_SYMBOL(nvhost_client_device_get_resources);
1534 /* This is a simple wrapper around request_firmware that takes
1535 * 'fw_name' and if available applies a SOC relative path prefix to it.
1536 * The caller is responsible for calling release_firmware later.
1538 const struct firmware *
1539 nvhost_client_request_firmware(struct platform_device *dev, const char *fw_name)
1541 struct nvhost_chip_support *op = nvhost_get_chip_ops();
1542 const struct firmware *fw;
1543 char *fw_path = NULL;
1546 /* This field is NULL when calling from SYS_EXIT.
1547 Add a check here to prevent crash in request_firmware */
1557 path_len = strlen(fw_name) + strlen(op->soc_name);
1558 path_len += 2; /* for the path separator and zero terminator*/
1560 fw_path = kzalloc(sizeof(*fw_path) * path_len,
1565 sprintf(fw_path, "%s/%s", op->soc_name, fw_name);
1569 err = request_firmware(&fw, fw_name, &dev->dev);
1572 dev_err(&dev->dev, "failed to get firmware\n");
1576 /* note: caller must release_firmware */
1579 EXPORT_SYMBOL(nvhost_client_request_firmware);
1581 struct nvhost_channel *nvhost_find_chan_by_clientid(
1582 struct platform_device *pdev,
1585 struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
1586 struct nvhost_channel_userctx *ctx;
1587 struct nvhost_channel *ch = NULL;
1589 mutex_lock(&pdata->userctx_list_lock);
1590 list_for_each_entry(ctx, &pdata->userctx_list, node) {
1591 if (ctx->clientid == clientid) {
1596 mutex_unlock(&pdata->userctx_list_lock);