2 * Tegra Graphics Host Client Module
4 * Copyright (c) 2010-2016, NVIDIA Corporation. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/slab.h>
20 #include <linux/string.h>
21 #include <linux/spinlock.h>
23 #include <linux/cdev.h>
24 #include <linux/uaccess.h>
25 #include <linux/file.h>
26 #include <linux/clk.h>
27 #include <linux/hrtimer.h>
28 #include <linux/export.h>
29 #include <linux/firmware.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/tegra-soc.h>
32 #include <linux/anon_inodes.h>
34 #include <trace/events/nvhost.h>
37 #include <linux/string.h>
39 #include <linux/nvhost.h>
40 #include <linux/nvhost_ioctl.h>
42 #include <mach/gpufuse.h>
45 #include "bus_client.h"
47 #include "class_ids.h"
48 #include "chip_support.h"
49 #include "nvhost_acm.h"
51 #include "nvhost_syncpt.h"
52 #include "nvhost_channel.h"
53 #include "nvhost_job.h"
54 #include "nvhost_sync.h"
55 #include "vhost/vhost.h"
57 int nvhost_check_bondout(unsigned int id)
59 #ifdef CONFIG_NVHOST_BONDOUT_CHECK
60 if (!tegra_platform_is_silicon())
61 return tegra_bonded_out_dev(id);
65 EXPORT_SYMBOL(nvhost_check_bondout);
67 static int validate_reg(struct platform_device *ndev, u32 offset, int count)
72 r = platform_get_resource(ndev, IORESOURCE_MEM, 0);
74 dev_err(&ndev->dev, "failed to get memory resource\n");
78 if (offset + 4 * count > resource_size(r)
79 || (offset + 4 * count < offset))
85 void __iomem *get_aperture(struct platform_device *pdev, int index)
87 struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
89 return pdata->aperture[index];
92 void host1x_writel(struct platform_device *pdev, u32 r, u32 v)
94 void __iomem *addr = get_aperture(pdev, 0) + r;
95 nvhost_dbg(dbg_reg, " d=%s r=0x%x v=0x%x", pdev->name, r, v);
98 EXPORT_SYMBOL_GPL(host1x_writel);
100 u32 host1x_readl(struct platform_device *pdev, u32 r)
102 void __iomem *addr = get_aperture(pdev, 0) + r;
105 nvhost_dbg(dbg_reg, " d=%s r=0x%x", pdev->name, r);
107 nvhost_dbg(dbg_reg, " d=%s r=0x%x v=0x%x", pdev->name, r, v);
111 EXPORT_SYMBOL_GPL(host1x_readl);
113 void host1x_channel_writel(struct nvhost_channel *ch, u32 r, u32 v)
115 void __iomem *addr = ch->aperture + r;
116 nvhost_dbg(dbg_reg, " chid=%d r=0x%x v=0x%x", ch->chid, r, v);
119 EXPORT_SYMBOL_GPL(host1x_channel_writel);
121 u32 host1x_channel_readl(struct nvhost_channel *ch, u32 r)
123 void __iomem *addr = ch->aperture + r;
126 nvhost_dbg(dbg_reg, " chid=%d r=0x%x", ch->chid, r);
128 nvhost_dbg(dbg_reg, " chid=%d r=0x%x v=0x%x", ch->chid, r, v);
132 EXPORT_SYMBOL_GPL(host1x_channel_readl);
134 void host1x_sync_writel(struct nvhost_master *dev, u32 r, u32 v)
136 void __iomem *addr = dev->sync_aperture + r;
137 nvhost_dbg(dbg_reg, " d=%s r=0x%x v=0x%x", dev->dev->name, r, v);
140 EXPORT_SYMBOL_GPL(host1x_sync_writel);
142 u32 host1x_sync_readl(struct nvhost_master *dev, u32 r)
144 void __iomem *addr = dev->sync_aperture + r;
147 nvhost_dbg(dbg_reg, " d=%s r=0x%x", dev->dev->name, r);
149 nvhost_dbg(dbg_reg, " d=%s r=0x%x v=0x%x", dev->dev->name, r, v);
153 EXPORT_SYMBOL_GPL(host1x_sync_readl);
155 int nvhost_read_module_regs(struct platform_device *ndev,
156 u32 offset, int count, u32 *values)
161 err = validate_reg(ndev, offset, count);
165 err = nvhost_module_busy(ndev);
170 *(values++) = host1x_readl(ndev, offset);
174 nvhost_module_idle(ndev);
179 int nvhost_write_module_regs(struct platform_device *ndev,
180 u32 offset, int count, const u32 *values)
185 err = validate_reg(ndev, offset, count);
189 err = nvhost_module_busy(ndev);
194 host1x_writel(ndev, offset, *(values++));
198 nvhost_module_idle(ndev);
203 struct nvhost_channel_userctx {
204 struct nvhost_channel *ch;
208 bool timeout_debug_dump;
209 struct platform_device *pdev;
210 u32 syncpts[NVHOST_MODULE_MAX_SYNCPTS];
211 u32 client_managed_syncpt;
213 /* error notificatiers used channel submit timeout */
214 struct dma_buf *error_notifier_ref;
215 u64 error_notifier_offset;
217 /* lock to protect this structure from concurrent ioctl usage */
218 struct mutex ioctl_lock;
220 /* used for attaching to ctx list in device pdata */
221 struct list_head node;
224 static int nvhost_channelrelease(struct inode *inode, struct file *filp)
226 struct nvhost_channel_userctx *priv = filp->private_data;
227 struct nvhost_device_data *pdata = platform_get_drvdata(priv->pdev);
228 struct nvhost_master *host = nvhost_get_host(pdata->pdev);
232 trace_nvhost_channel_release(dev_name(&priv->pdev->dev));
234 mutex_lock(&pdata->userctx_list_lock);
235 list_del(&priv->node);
236 mutex_unlock(&pdata->userctx_list_lock);
238 /* remove this client from acm */
239 nvhost_module_remove_client(priv->pdev, priv);
241 /* drop error notifier reference */
242 if (priv->error_notifier_ref)
243 dma_buf_put(priv->error_notifier_ref);
245 /* Clear the identifier */
246 if (pdata->resource_policy == RESOURCE_PER_DEVICE &&
248 identifier = (void *)pdata;
250 identifier = (void *)priv;
251 nvhost_channel_remove_identifier(pdata, identifier);
253 /* If the device is in exclusive mode, drop the reference */
254 if (pdata->exclusive)
255 pdata->num_mapped_chs--;
257 /* drop channel reference if we took one at open time */
258 if (pdata->resource_policy == RESOURCE_PER_DEVICE) {
259 nvhost_putchannel(priv->ch, 1);
261 /* drop instance syncpoints reference */
262 for (i = 0; i < NVHOST_MODULE_MAX_SYNCPTS; ++i) {
263 if (priv->syncpts[i]) {
264 nvhost_syncpt_put_ref(&host->syncpt,
266 priv->syncpts[i] = 0;
270 if (priv->client_managed_syncpt) {
271 nvhost_syncpt_put_ref(&host->syncpt,
272 priv->client_managed_syncpt);
273 priv->client_managed_syncpt = 0;
277 if (pdata->keepalive)
278 nvhost_module_enable_poweroff(priv->pdev);
284 static int __nvhost_channelopen(struct inode *inode,
285 struct platform_device *pdev,
288 struct nvhost_channel_userctx *priv;
289 struct nvhost_device_data *pdata, *host1x_pdata;
290 struct nvhost_master *host;
293 /* grab pdev and pdata based on inputs */
295 pdata = platform_get_drvdata(pdev);
297 pdata = container_of(inode->i_cdev,
298 struct nvhost_device_data, cdev);
303 /* ..and host1x specific data */
304 host1x_pdata = dev_get_drvdata(pdev->dev.parent);
305 host = nvhost_get_host(pdev);
307 trace_nvhost_channel_open(dev_name(&pdev->dev));
309 /* If the device is in exclusive mode, make channel reservation here */
310 if (pdata->exclusive) {
311 if (pdata->num_mapped_chs == pdata->num_channels)
313 pdata->num_mapped_chs++;
316 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
318 goto fail_allocate_priv;
319 filp->private_data = priv;
321 /* Register this client to acm */
322 if (nvhost_module_add_client(pdev, priv))
323 goto fail_add_client;
325 /* Keep devices with keepalive flag powered */
326 if (pdata->keepalive)
327 nvhost_module_disable_poweroff(pdev);
329 /* Check that the device can be powered */
330 ret = nvhost_module_busy(pdev);
333 nvhost_module_idle(pdev);
335 if (nvhost_dev_is_virtual(pdev)) {
336 /* If virtual, allocate a client id on the server side. This is
337 * needed for channel recovery, to distinguish which clients
341 int virt_moduleid = vhost_virt_moduleid(pdata->moduleid);
342 struct nvhost_virt_ctx *virt_ctx =
343 nvhost_get_virt_data(pdev);
345 if (virt_moduleid < 0) {
347 goto fail_virt_clientid;
351 vhost_channel_alloc_clientid(virt_ctx->handle,
353 if (priv->clientid == 0) {
355 "vhost_channel_alloc_clientid failed\n");
357 goto fail_virt_clientid;
361 priv->clientid = atomic_add_return(1, &host->clientid);
363 priv->clientid = atomic_add_return(1, &host->clientid);
366 /* Initialize private structure */
367 priv->timeout = host1x_pdata->nvhost_timeout_default;
368 priv->priority = NVHOST_PRIORITY_MEDIUM;
369 priv->timeout_debug_dump = true;
370 mutex_init(&priv->ioctl_lock);
373 if (!tegra_platform_is_silicon())
376 /* if we run in map-at-submit mode but device has override
377 * flag set, respect the override flag */
378 if (pdata->resource_policy == RESOURCE_PER_DEVICE) {
379 if (pdata->exclusive)
380 ret = nvhost_channel_map(pdata, &priv->ch, priv);
382 ret = nvhost_channel_map(pdata, &priv->ch, pdata);
384 pr_err("%s: failed to map channel, error: %d\n",
386 goto fail_get_channel;
390 INIT_LIST_HEAD(&priv->node);
391 mutex_lock(&pdata->userctx_list_lock);
392 list_add_tail(&priv->node, &pdata->userctx_list);
393 mutex_unlock(&pdata->userctx_list_lock);
400 if (pdata->keepalive)
401 nvhost_module_enable_poweroff(pdev);
402 nvhost_module_remove_client(pdev, priv);
406 if (pdata->exclusive)
407 pdata->num_mapped_chs--;
412 static int nvhost_channelopen(struct inode *inode, struct file *filp)
414 return __nvhost_channelopen(inode, NULL, filp);
417 static int nvhost_init_error_notifier(struct nvhost_channel_userctx *ctx,
418 struct nvhost_set_error_notifier *args)
420 struct dma_buf *dmabuf;
422 u64 end = args->offset + sizeof(struct nvhost_notification);
424 /* are we releasing old reference? */
426 if (ctx->error_notifier_ref)
427 dma_buf_put(ctx->error_notifier_ref);
428 ctx->error_notifier_ref = NULL;
432 /* take reference for the userctx */
433 dmabuf = dma_buf_get(args->mem);
434 if (IS_ERR(dmabuf)) {
435 pr_err("%s: Invalid handle: %d\n", __func__, args->mem);
439 if (end > dmabuf->size || end < sizeof(struct nvhost_notification)) {
441 pr_err("%s: invalid offset\n", __func__);
445 /* map handle and clear error notifier struct */
446 va = dma_buf_vmap(dmabuf);
449 pr_err("%s: Cannot map notifier handle\n", __func__);
453 memset(va + args->offset, 0, sizeof(struct nvhost_notification));
454 dma_buf_vunmap(dmabuf, va);
456 /* release old reference */
457 if (ctx->error_notifier_ref)
458 dma_buf_put(ctx->error_notifier_ref);
460 /* finally, store error notifier data */
461 ctx->error_notifier_ref = dmabuf;
462 ctx->error_notifier_offset = args->offset;
467 static inline u32 get_job_fence(struct nvhost_job *job, u32 id)
469 struct nvhost_channel *ch = job->ch;
470 struct nvhost_device_data *pdata = platform_get_drvdata(ch->dev);
471 u32 fence = job->sp[id].fence;
473 /* take into account work done increment */
474 if (pdata->push_work_done && id == 0)
477 /* otherwise the fence is valid "as is" */
481 static int nvhost_ioctl_channel_submit(struct nvhost_channel_userctx *ctx,
482 struct nvhost_submit_args *args)
484 struct nvhost_job *job;
485 int num_cmdbufs = args->num_cmdbufs;
486 int num_relocs = args->num_relocs;
487 int num_waitchks = args->num_waitchks;
488 int num_syncpt_incrs = args->num_syncpt_incrs;
489 struct nvhost_cmdbuf __user *cmdbufs =
490 (struct nvhost_cmdbuf __user *)(uintptr_t)args->cmdbufs;
491 struct nvhost_cmdbuf __user *cmdbuf_exts =
492 (struct nvhost_cmdbuf __user *)(uintptr_t)args->cmdbuf_exts;
493 struct nvhost_reloc __user *relocs =
494 (struct nvhost_reloc __user *)(uintptr_t)args->relocs;
495 struct nvhost_reloc_shift __user *reloc_shifts =
496 (struct nvhost_reloc_shift __user *)
497 (uintptr_t)args->reloc_shifts;
498 struct nvhost_waitchk __user *waitchks =
499 (struct nvhost_waitchk __user *)(uintptr_t)args->waitchks;
500 struct nvhost_syncpt_incr __user *syncpt_incrs =
501 (struct nvhost_syncpt_incr __user *)
502 (uintptr_t)args->syncpt_incrs;
503 u32 __user *fences = (u32 __user *)(uintptr_t)args->fences;
504 u32 __user *class_ids = (u32 __user *)(uintptr_t)args->class_ids;
505 struct nvhost_device_data *pdata = platform_get_drvdata(ctx->pdev);
507 const u32 *syncpt_array =
508 (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) ?
511 u32 *local_class_ids = NULL;
514 job = nvhost_job_alloc(ctx->ch,
522 job->num_relocs = args->num_relocs;
523 job->num_waitchk = args->num_waitchks;
524 job->num_syncpts = args->num_syncpt_incrs;
525 job->priority = ctx->priority;
526 job->clientid = ctx->clientid;
527 job->client_managed_syncpt =
528 (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) ?
529 ctx->client_managed_syncpt : ctx->ch->client_managed_syncpt;
531 /* copy error notifier settings for this job */
532 if (ctx->error_notifier_ref) {
533 get_dma_buf(ctx->error_notifier_ref);
534 job->error_notifier_ref = ctx->error_notifier_ref;
535 job->error_notifier_offset = ctx->error_notifier_offset;
538 /* mass copy class_ids */
539 if (args->class_ids) {
540 local_class_ids = kzalloc(sizeof(u32) * num_cmdbufs,
542 if (!local_class_ids) {
546 err = copy_from_user(local_class_ids, class_ids,
547 sizeof(u32) * num_cmdbufs);
554 for (i = 0; i < num_cmdbufs; ++i) {
555 struct nvhost_cmdbuf cmdbuf;
556 struct nvhost_cmdbuf_ext cmdbuf_ext;
557 u32 class_id = class_ids ? local_class_ids[i] : 0;
559 err = copy_from_user(&cmdbuf, cmdbufs + i, sizeof(cmdbuf));
563 cmdbuf_ext.pre_fence = -1;
565 err = copy_from_user(&cmdbuf_ext,
566 cmdbuf_exts + i, sizeof(cmdbuf_ext));
568 cmdbuf_ext.pre_fence = -1;
570 /* verify that the given class id is valid for this engine */
572 class_id != pdata->class &&
573 class_id != NV_HOST1X_CLASS_ID) {
578 nvhost_job_add_gather(job, cmdbuf.mem, cmdbuf.words,
579 cmdbuf.offset, class_id,
580 cmdbuf_ext.pre_fence);
583 kfree(local_class_ids);
584 local_class_ids = NULL;
586 err = copy_from_user(job->relocarray,
587 relocs, sizeof(*relocs) * num_relocs);
591 err = copy_from_user(job->relocshiftarray,
592 reloc_shifts, sizeof(*reloc_shifts) * num_relocs);
596 err = copy_from_user(job->waitchk,
597 waitchks, sizeof(*waitchks) * num_waitchks);
602 * Go through each syncpoint from userspace. Here we:
603 * - Copy syncpoint information
604 * - Validate each syncpoint
605 * - Determine the index of hwctx syncpoint in the table
608 for (i = 0; i < num_syncpt_incrs; ++i) {
609 struct nvhost_syncpt_incr sp;
614 err = copy_from_user(&sp, syncpt_incrs + i, sizeof(sp));
618 /* Validate the trivial case */
619 if (sp.syncpt_id == 0) {
624 /* ..and then ensure that the syncpoints have been reserved
626 for (j = 0; j < NVHOST_MODULE_MAX_SYNCPTS; j++) {
627 if (syncpt_array[j] == sp.syncpt_id) {
638 /* Store and get a reference */
639 job->sp[i].id = sp.syncpt_id;
640 job->sp[i].incrs = sp.syncpt_incrs;
643 trace_nvhost_channel_submit(ctx->pdev->name,
644 job->num_gathers, job->num_relocs, job->num_waitchk,
648 err = nvhost_module_busy(ctx->pdev);
652 err = nvhost_job_pin(job, &nvhost_get_host(ctx->pdev)->syncpt);
653 nvhost_module_idle(ctx->pdev);
658 job->timeout = min(ctx->timeout, args->timeout);
660 job->timeout = ctx->timeout;
661 job->timeout_debug_dump = ctx->timeout_debug_dump;
663 err = nvhost_channel_submit(job);
667 /* Deliver multiple fences back to the userspace */
669 for (i = 0; i < num_syncpt_incrs; ++i) {
670 u32 fence = get_job_fence(job, i);
671 err = copy_to_user(fences, &fence, sizeof(u32));
677 /* Deliver the fence using the old mechanism _only_ if a single
678 * syncpoint is used. */
680 if (args->flags & BIT(NVHOST_SUBMIT_FLAG_SYNC_FENCE_FD)) {
681 struct nvhost_ctrl_sync_fence_info pts[num_syncpt_incrs];
683 for (i = 0; i < num_syncpt_incrs; i++) {
684 pts[i].id = job->sp[i].id;
685 pts[i].thresh = get_job_fence(job, i);
688 err = nvhost_sync_create_fence_fd(ctx->pdev,
689 pts, num_syncpt_incrs, "fence", &args->fence);
692 } else if (num_syncpt_incrs == 1)
693 args->fence = get_job_fence(job, 0);
702 nvhost_job_unpin(job);
705 kfree(local_class_ids);
707 nvhost_err(&pdata->pdev->dev, "failed with err %d\n", err);
712 static int moduleid_to_index(struct platform_device *dev, u32 moduleid)
715 struct nvhost_device_data *pdata = platform_get_drvdata(dev);
717 for (i = 0; i < NVHOST_MODULE_MAX_CLOCKS; i++) {
718 if (pdata->clocks[i].moduleid == moduleid)
722 /* Old user space is sending a random number in args. Return clock
723 * zero in these cases. */
727 static int nvhost_ioctl_channel_set_rate(struct nvhost_channel_userctx *ctx,
728 struct nvhost_clk_rate_args *arg)
730 u32 moduleid = (arg->moduleid >> NVHOST_MODULE_ID_BIT_POS)
731 & ((1 << NVHOST_MODULE_ID_BIT_WIDTH) - 1);
732 u32 attr = (arg->moduleid >> NVHOST_CLOCK_ATTR_BIT_POS)
733 & ((1 << NVHOST_CLOCK_ATTR_BIT_WIDTH) - 1);
734 int index = moduleid ?
735 moduleid_to_index(ctx->pdev, moduleid) : 0;
738 err = nvhost_module_set_rate(ctx->pdev, ctx, arg->rate, index, attr);
739 if (!tegra_platform_is_silicon() && err) {
740 nvhost_dbg(dbg_clk, "ignoring error: module=%u, attr=%u, index=%d, err=%d",
741 moduleid, attr, index, err);
748 static int nvhost_ioctl_channel_get_rate(struct nvhost_channel_userctx *ctx,
749 u32 moduleid, u32 *rate)
751 int index = moduleid ? moduleid_to_index(ctx->pdev, moduleid) : 0;
754 err = nvhost_module_get_rate(ctx->pdev, (unsigned long *)rate, index);
755 if (!tegra_platform_is_silicon() && err) {
756 nvhost_dbg(dbg_clk, "ignoring error: module=%u, rate=%u, error=%d",
757 moduleid, *rate, err);
759 /* fake the return value */
766 static int nvhost_ioctl_channel_module_regrdwr(
767 struct nvhost_channel_userctx *ctx,
768 struct nvhost_ctrl_module_regrdwr_args *args)
770 u32 num_offsets = args->num_offsets;
771 u32 __user *offsets = (u32 __user *)(uintptr_t)args->offsets;
772 u32 __user *values = (u32 __user *)(uintptr_t)args->values;
774 struct platform_device *ndev;
776 trace_nvhost_ioctl_channel_module_regrdwr(args->id,
777 args->num_offsets, args->write);
779 /* Check that there is something to read and that block size is
781 if (num_offsets == 0 || args->block_size & 3)
786 if (nvhost_dev_is_virtual(ndev))
787 return vhost_rdwr_module_regs(ndev, num_offsets,
788 args->block_size, offsets, values, args->write);
790 while (num_offsets--) {
793 int remaining = args->block_size >> 2;
795 if (get_user(offs, offsets))
800 int batch = min(remaining, 64);
802 if (copy_from_user(vals, values,
803 batch * sizeof(u32)))
806 err = nvhost_write_module_regs(ndev,
811 err = nvhost_read_module_regs(ndev,
816 if (copy_to_user(values, vals,
817 batch * sizeof(u32)))
822 offs += batch * sizeof(u32);
830 static u32 create_mask(u32 *words, int num)
834 for (i = 0; i < num; i++) {
835 if (!words[i] || words[i] > 31)
837 word |= BIT(words[i]);
843 static u32 nvhost_ioctl_channel_get_syncpt_mask(
844 struct nvhost_channel_userctx *priv)
846 struct nvhost_device_data *pdata = platform_get_drvdata(priv->pdev);
849 if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE)
850 mask = create_mask(priv->syncpts, NVHOST_MODULE_MAX_SYNCPTS);
852 mask = create_mask(priv->ch->syncpts,
853 NVHOST_MODULE_MAX_SYNCPTS);
858 static u32 nvhost_ioctl_channel_get_syncpt_channel(struct nvhost_channel *ch,
859 struct nvhost_device_data *pdata, u32 index)
863 mutex_lock(&ch->syncpts_lock);
865 /* if we already have required syncpt then return it ... */
866 id = ch->syncpts[index];
870 /* ... otherwise get a new syncpt dynamically */
871 id = nvhost_get_syncpt_host_managed(pdata->pdev, index, NULL);
875 /* ... and store it for further references */
876 ch->syncpts[index] = id;
879 mutex_unlock(&ch->syncpts_lock);
883 static u32 nvhost_ioctl_channel_get_syncpt_instance(
884 struct nvhost_channel_userctx *ctx,
885 struct nvhost_device_data *pdata, u32 index)
889 /* if we already have required syncpt then return it ... */
890 if (ctx->syncpts[index]) {
891 id = ctx->syncpts[index];
895 /* ... otherwise get a new syncpt dynamically */
896 id = nvhost_get_syncpt_host_managed(pdata->pdev, index, NULL);
900 /* ... and store it for further references */
901 ctx->syncpts[index] = id;
906 static int nvhost_ioctl_channel_get_client_syncpt(
907 struct nvhost_channel_userctx *ctx,
908 struct nvhost_get_client_managed_syncpt_arg *args)
910 struct nvhost_device_data *pdata = platform_get_drvdata(ctx->pdev);
911 const char __user *args_name =
912 (const char __user *)(uintptr_t)args->name;
916 /* prepare syncpoint name (in case it is needed) */
918 if (strncpy_from_user(name, args_name, sizeof(name)) < 0)
920 name[sizeof(name) - 1] = '\0';
925 snprintf(set_name, sizeof(set_name),
926 "%s_%s", dev_name(&ctx->pdev->dev), name);
928 if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) {
929 if (!ctx->client_managed_syncpt)
930 ctx->client_managed_syncpt =
931 nvhost_get_syncpt_client_managed(pdata->pdev,
933 args->value = ctx->client_managed_syncpt;
935 struct nvhost_channel *ch = ctx->ch;
936 mutex_lock(&ch->syncpts_lock);
937 if (!ch->client_managed_syncpt)
938 ch->client_managed_syncpt =
939 nvhost_get_syncpt_client_managed(pdata->pdev,
941 mutex_unlock(&ch->syncpts_lock);
942 args->value = ch->client_managed_syncpt;
951 static long nvhost_channelctl(struct file *filp,
952 unsigned int cmd, unsigned long arg)
954 struct nvhost_channel_userctx *priv = filp->private_data;
955 struct nvhost_master *host;
957 u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
960 if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
961 (_IOC_NR(cmd) == 0) ||
962 (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST) ||
963 (_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE))
966 if (_IOC_DIR(cmd) & _IOC_WRITE) {
967 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
971 /* serialize calls from this fd */
972 mutex_lock(&priv->ioctl_lock);
974 pr_warn("Channel already unmapped\n");
975 mutex_unlock(&priv->ioctl_lock);
979 host = nvhost_get_host(priv->pdev);
980 dev = &priv->pdev->dev;
982 case NVHOST_IOCTL_CHANNEL_OPEN:
988 err = get_unused_fd_flags(O_RDWR);
993 name = kasprintf(GFP_KERNEL, "nvhost-%s-fd%d",
1001 file = anon_inode_getfile(name, filp->f_op, NULL, O_RDWR);
1004 err = PTR_ERR(file);
1008 fd_install(fd, file);
1010 err = __nvhost_channelopen(NULL, priv->pdev, file);
1017 ((struct nvhost_channel_open_args *)buf)->channel_fd = fd;
1020 case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
1022 ((struct nvhost_get_param_args *)buf)->value =
1023 nvhost_ioctl_channel_get_syncpt_mask(priv);
1026 case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINT:
1028 struct nvhost_device_data *pdata =
1029 platform_get_drvdata(priv->pdev);
1030 struct nvhost_get_param_arg *arg =
1031 (struct nvhost_get_param_arg *)buf;
1033 if (arg->param >= NVHOST_MODULE_MAX_SYNCPTS) {
1038 if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE)
1039 arg->value = nvhost_ioctl_channel_get_syncpt_instance(
1040 priv, pdata, arg->param);
1042 arg->value = nvhost_ioctl_channel_get_syncpt_channel(
1043 priv->ch, pdata, arg->param);
1050 case NVHOST_IOCTL_CHANNEL_GET_CLIENT_MANAGED_SYNCPOINT:
1052 err = nvhost_ioctl_channel_get_client_syncpt(priv,
1053 (struct nvhost_get_client_managed_syncpt_arg *)buf);
1056 case NVHOST_IOCTL_CHANNEL_FREE_CLIENT_MANAGED_SYNCPOINT:
1058 case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
1060 ((struct nvhost_get_param_args *)buf)->value = 0;
1063 case NVHOST_IOCTL_CHANNEL_GET_WAITBASE:
1068 case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
1070 struct nvhost_device_data *pdata = \
1071 platform_get_drvdata(priv->pdev);
1072 ((struct nvhost_get_param_args *)buf)->value =
1073 create_mask(pdata->modulemutexes,
1074 NVHOST_MODULE_MAX_MODMUTEXES);
1077 case NVHOST_IOCTL_CHANNEL_GET_MODMUTEX:
1079 struct nvhost_device_data *pdata = \
1080 platform_get_drvdata(priv->pdev);
1081 struct nvhost_get_param_arg *arg =
1082 (struct nvhost_get_param_arg *)buf;
1084 if (arg->param >= NVHOST_MODULE_MAX_MODMUTEXES ||
1085 !pdata->modulemutexes[arg->param]) {
1090 arg->value = pdata->modulemutexes[arg->param];
1093 case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
1095 case NVHOST_IOCTL_CHANNEL_GET_CLK_RATE:
1097 struct nvhost_clk_rate_args *arg =
1098 (struct nvhost_clk_rate_args *)buf;
1100 err = nvhost_ioctl_channel_get_rate(priv,
1101 arg->moduleid, &arg->rate);
1104 case NVHOST_IOCTL_CHANNEL_SET_CLK_RATE:
1106 struct nvhost_clk_rate_args *arg =
1107 (struct nvhost_clk_rate_args *)buf;
1109 /* if virtualized, client requests to change clock rate
1112 if (nvhost_dev_is_virtual(priv->pdev))
1115 err = nvhost_ioctl_channel_set_rate(priv, arg);
1118 case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT:
1121 (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
1123 priv->timeout = timeout;
1124 dev_dbg(&priv->pdev->dev,
1125 "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
1126 __func__, priv->timeout, priv);
1129 case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT:
1130 ((struct nvhost_get_param_args *)buf)->value = false;
1132 case NVHOST_IOCTL_CHANNEL_SET_PRIORITY:
1134 (u32)((struct nvhost_set_priority_args *)buf)->priority;
1136 case NVHOST32_IOCTL_CHANNEL_MODULE_REGRDWR:
1138 struct nvhost32_ctrl_module_regrdwr_args *args32 =
1139 (struct nvhost32_ctrl_module_regrdwr_args *)buf;
1140 struct nvhost_ctrl_module_regrdwr_args args;
1141 args.id = args32->id;
1142 args.num_offsets = args32->num_offsets;
1143 args.block_size = args32->block_size;
1144 args.offsets = args32->offsets;
1145 args.values = args32->values;
1146 args.write = args32->write;
1147 err = nvhost_ioctl_channel_module_regrdwr(priv, &args);
1150 case NVHOST_IOCTL_CHANNEL_MODULE_REGRDWR:
1151 err = nvhost_ioctl_channel_module_regrdwr(priv, (void *)buf);
1153 case NVHOST32_IOCTL_CHANNEL_SUBMIT:
1155 struct nvhost_device_data *pdata =
1156 platform_get_drvdata(priv->pdev);
1157 struct nvhost32_submit_args *args32 = (void *)buf;
1158 struct nvhost_submit_args args;
1161 if (pdata->resource_policy == RESOURCE_PER_DEVICE &&
1163 identifier = (void *)pdata;
1165 identifier = (void *)priv;
1167 memset(&args, 0, sizeof(args));
1168 args.submit_version = args32->submit_version;
1169 args.num_syncpt_incrs = args32->num_syncpt_incrs;
1170 args.num_cmdbufs = args32->num_cmdbufs;
1171 args.num_relocs = args32->num_relocs;
1172 args.num_waitchks = args32->num_waitchks;
1173 args.timeout = args32->timeout;
1174 args.syncpt_incrs = args32->syncpt_incrs;
1175 args.fence = args32->fence;
1177 args.cmdbufs = args32->cmdbufs;
1178 args.relocs = args32->relocs;
1179 args.reloc_shifts = args32->reloc_shifts;
1180 args.waitchks = args32->waitchks;
1181 args.class_ids = args32->class_ids;
1182 args.fences = args32->fences;
1184 /* first, get a channel */
1185 err = nvhost_channel_map(pdata, &priv->ch, identifier);
1189 /* ..then, synchronize syncpoint information.
1191 * This information is updated only in this ioctl and
1192 * channel destruction. We already hold channel
1193 * reference and this ioctl is serialized => no-one is
1194 * modifying the syncpoint field concurrently.
1196 * Synchronization is not destructing anything
1197 * in the structure; We can only allocate new
1198 * syncpoints, and hence old ones cannot be released
1199 * by following operation. If some syncpoint is stored
1200 * into the channel structure, it remains there. */
1202 if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) {
1203 memcpy(priv->ch->syncpts, priv->syncpts,
1204 sizeof(priv->syncpts));
1205 priv->ch->client_managed_syncpt =
1206 priv->client_managed_syncpt;
1210 err = nvhost_ioctl_channel_submit(priv, &args);
1212 /* ..and drop the local reference */
1213 nvhost_putchannel(priv->ch, 1);
1215 args32->fence = args.fence;
1219 case NVHOST_IOCTL_CHANNEL_SUBMIT:
1221 struct nvhost_device_data *pdata =
1222 platform_get_drvdata(priv->pdev);
1225 if (pdata->resource_policy == RESOURCE_PER_DEVICE &&
1227 identifier = (void *)pdata;
1229 identifier = (void *)priv;
1231 /* first, get a channel */
1232 err = nvhost_channel_map(pdata, &priv->ch, identifier);
1236 /* ..then, synchronize syncpoint information.
1238 * This information is updated only in this ioctl and
1239 * channel destruction. We already hold channel
1240 * reference and this ioctl is serialized => no-one is
1241 * modifying the syncpoint field concurrently.
1243 * Synchronization is not destructing anything
1244 * in the structure; We can only allocate new
1245 * syncpoints, and hence old ones cannot be released
1246 * by following operation. If some syncpoint is stored
1247 * into the channel structure, it remains there. */
1249 if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) {
1250 memcpy(priv->ch->syncpts, priv->syncpts,
1251 sizeof(priv->syncpts));
1252 priv->ch->client_managed_syncpt =
1253 priv->client_managed_syncpt;
1257 err = nvhost_ioctl_channel_submit(priv, (void *)buf);
1259 /* ..and drop the local reference */
1260 nvhost_putchannel(priv->ch, 1);
1264 case NVHOST_IOCTL_CHANNEL_SET_ERROR_NOTIFIER:
1265 err = nvhost_init_error_notifier(priv,
1266 (struct nvhost_set_error_notifier *)buf);
1268 case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT_EX:
1271 (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
1272 bool timeout_debug_dump = !((u32)
1273 ((struct nvhost_set_timeout_ex_args *)buf)->flags &
1274 (1 << NVHOST_TIMEOUT_FLAG_DISABLE_DUMP));
1275 priv->timeout = timeout;
1276 priv->timeout_debug_dump = timeout_debug_dump;
1277 dev_dbg(&priv->pdev->dev,
1278 "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
1279 __func__, priv->timeout, priv);
1283 nvhost_dbg_info("unrecognized ioctl cmd: 0x%x", cmd);
1288 mutex_unlock(&priv->ioctl_lock);
1290 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
1291 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
1296 static const struct file_operations nvhost_channelops = {
1297 .owner = THIS_MODULE,
1298 .release = nvhost_channelrelease,
1299 .open = nvhost_channelopen,
1300 #ifdef CONFIG_COMPAT
1301 .compat_ioctl = nvhost_channelctl,
1303 .unlocked_ioctl = nvhost_channelctl
1306 static const char *get_device_name_for_dev(struct platform_device *dev)
1308 struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
1310 if (pdata->devfs_name)
1311 return pdata->devfs_name;
1316 static struct device *nvhost_client_device_create(
1317 struct platform_device *pdev, struct cdev *cdev,
1318 const char *cdev_name, dev_t devno,
1319 const struct file_operations *ops)
1321 struct nvhost_master *host = nvhost_get_host(pdev);
1322 const char *use_dev_name;
1330 cdev_init(cdev, ops);
1331 cdev->owner = THIS_MODULE;
1333 err = cdev_add(cdev, devno, 1);
1336 "failed to add cdev\n");
1339 use_dev_name = get_device_name_for_dev(pdev);
1341 dev = device_create(host->nvhost_class,
1344 IFACE_NAME "-%s%s" :
1345 IFACE_NAME "-%s%s.%d",
1346 cdev_name, use_dev_name, pdev->id);
1351 "failed to create %s %s device for %s\n",
1352 use_dev_name, cdev_name, pdev->name);
1359 #define NVHOST_NUM_CDEV 4
1360 int nvhost_client_user_init(struct platform_device *dev)
1364 struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1366 /* reserve 3 minor #s for <dev>, and ctrl-<dev> */
1368 err = alloc_chrdev_region(&devno, 0, NVHOST_NUM_CDEV, IFACE_NAME);
1370 dev_err(&dev->dev, "failed to allocate devno\n");
1373 pdata->cdev_region = devno;
1375 pdata->node = nvhost_client_device_create(dev, &pdata->cdev,
1376 "", devno, &nvhost_channelops);
1377 if (pdata->node == NULL)
1380 /* module control (npn-channel based, global) interface */
1381 if (pdata->ctrl_ops) {
1383 pdata->ctrl_node = nvhost_client_device_create(dev,
1384 &pdata->ctrl_cdev, "ctrl-",
1385 devno, pdata->ctrl_ops);
1386 if (pdata->ctrl_node == NULL)
1395 static void nvhost_client_user_deinit(struct platform_device *dev)
1397 struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1398 struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1401 device_destroy(nvhost_master->nvhost_class, pdata->cdev.dev);
1402 cdev_del(&pdata->cdev);
1405 if (pdata->as_node) {
1406 device_destroy(nvhost_master->nvhost_class, pdata->as_cdev.dev);
1407 cdev_del(&pdata->as_cdev);
1410 if (pdata->ctrl_node) {
1411 device_destroy(nvhost_master->nvhost_class,
1412 pdata->ctrl_cdev.dev);
1413 cdev_del(&pdata->ctrl_cdev);
1416 unregister_chrdev_region(pdata->cdev_region, NVHOST_NUM_CDEV);
1419 int nvhost_client_device_init(struct platform_device *dev)
1422 struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1423 struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1425 mutex_init(&pdata->userctx_list_lock);
1426 INIT_LIST_HEAD(&pdata->userctx_list);
1428 /* Create debugfs directory for the device */
1429 nvhost_device_debug_init(dev);
1431 err = nvhost_client_user_init(dev);
1435 err = nvhost_device_list_add(dev);
1439 if (pdata->scaling_init)
1440 pdata->scaling_init(dev);
1442 /* reset syncpoint values for this unit */
1443 err = nvhost_module_busy(nvhost_master->dev);
1447 nvhost_module_idle(nvhost_master->dev);
1449 /* Initialize dma parameters */
1450 dev->dev.dma_parms = &pdata->dma_parms;
1451 dma_set_max_seg_size(&dev->dev, UINT_MAX);
1453 dev_info(&dev->dev, "initialized\n");
1455 if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) {
1456 nvhost_master->info.channel_policy = MAP_CHANNEL_ON_SUBMIT;
1457 nvhost_update_characteristics(dev);
1461 return pdata->hw_init(dev);
1466 /* Remove from nvhost device list */
1467 nvhost_device_list_remove(dev);
1470 dev_err(&dev->dev, "failed to init client device\n");
1471 nvhost_client_user_deinit(dev);
1472 nvhost_device_debug_deinit(dev);
1475 EXPORT_SYMBOL(nvhost_client_device_init);
1477 int nvhost_client_device_release(struct platform_device *dev)
1479 /* Release nvhost module resources */
1480 nvhost_module_deinit(dev);
1482 /* Remove from nvhost device list */
1483 nvhost_device_list_remove(dev);
1485 /* Release chardev and device node for user space */
1486 nvhost_client_user_deinit(dev);
1488 /* Remove debugFS */
1489 nvhost_device_debug_deinit(dev);
1493 EXPORT_SYMBOL(nvhost_client_device_release);
1495 int nvhost_device_get_resources(struct platform_device *dev)
1498 void __iomem *regs = NULL;
1499 struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1501 for (i = 0; i < dev->num_resources; i++) {
1502 struct resource *r = NULL;
1504 r = platform_get_resource(dev, IORESOURCE_MEM, i);
1505 /* We've run out of mem resources */
1509 regs = devm_request_and_ioremap(&dev->dev, r);
1513 pdata->aperture[i] = regs;
1519 dev_err(&dev->dev, "failed to get register memory\n");
1524 int nvhost_client_device_get_resources(struct platform_device *dev)
1526 return nvhost_device_get_resources(dev);
1528 EXPORT_SYMBOL(nvhost_client_device_get_resources);
1530 /* This is a simple wrapper around request_firmware that takes
1531 * 'fw_name' and if available applies a SOC relative path prefix to it.
1532 * The caller is responsible for calling release_firmware later.
1534 const struct firmware *
1535 nvhost_client_request_firmware(struct platform_device *dev, const char *fw_name)
1537 struct nvhost_chip_support *op = nvhost_get_chip_ops();
1538 const struct firmware *fw;
1539 char *fw_path = NULL;
1542 /* This field is NULL when calling from SYS_EXIT.
1543 Add a check here to prevent crash in request_firmware */
1553 path_len = strlen(fw_name) + strlen(op->soc_name);
1554 path_len += 2; /* for the path separator and zero terminator*/
1556 fw_path = kzalloc(sizeof(*fw_path) * path_len,
1561 sprintf(fw_path, "%s/%s", op->soc_name, fw_name);
1565 err = request_firmware(&fw, fw_name, &dev->dev);
1568 dev_err(&dev->dev, "failed to get firmware\n");
1572 /* note: caller must release_firmware */
1575 EXPORT_SYMBOL(nvhost_client_request_firmware);
1577 struct nvhost_channel *nvhost_find_chan_by_clientid(
1578 struct platform_device *pdev,
1581 struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
1582 struct nvhost_channel_userctx *ctx;
1583 struct nvhost_channel *ch = NULL;
1585 mutex_lock(&pdata->userctx_list_lock);
1586 list_for_each_entry(ctx, &pdata->userctx_list, node) {
1587 if (ctx->clientid == clientid) {
1592 mutex_unlock(&pdata->userctx_list_lock);