]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/video/tegra/host/bus_client.c
video: tegra: host: validate error notifier offset
[sojka/nv-tegra/linux-3.10.git] / drivers / video / tegra / host / bus_client.c
1 /*
2  * Tegra Graphics Host Client Module
3  *
4  * Copyright (c) 2010-2016, NVIDIA Corporation. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #include <linux/slab.h>
20 #include <linux/string.h>
21 #include <linux/spinlock.h>
22 #include <linux/fs.h>
23 #include <linux/cdev.h>
24 #include <linux/uaccess.h>
25 #include <linux/file.h>
26 #include <linux/clk.h>
27 #include <linux/hrtimer.h>
28 #include <linux/export.h>
29 #include <linux/firmware.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/tegra-soc.h>
32 #include <linux/anon_inodes.h>
33
34 #include <trace/events/nvhost.h>
35
36 #include <linux/io.h>
37 #include <linux/string.h>
38
39 #include <linux/nvhost.h>
40 #include <linux/nvhost_ioctl.h>
41
42 #include <mach/gpufuse.h>
43
44 #include "debug.h"
45 #include "bus_client.h"
46 #include "dev.h"
47 #include "class_ids.h"
48 #include "chip_support.h"
49 #include "nvhost_acm.h"
50
51 #include "nvhost_syncpt.h"
52 #include "nvhost_channel.h"
53 #include "nvhost_job.h"
54 #include "nvhost_sync.h"
55 #include "vhost/vhost.h"
56
57 int nvhost_check_bondout(unsigned int id)
58 {
59 #ifdef CONFIG_NVHOST_BONDOUT_CHECK
60         if (!tegra_platform_is_silicon())
61                 return tegra_bonded_out_dev(id);
62 #endif
63         return 0;
64 }
65 EXPORT_SYMBOL(nvhost_check_bondout);
66
67 static int validate_reg(struct platform_device *ndev, u32 offset, int count)
68 {
69         int err = 0;
70         struct resource *r;
71
72         r = platform_get_resource(ndev, IORESOURCE_MEM, 0);
73         if (!r) {
74                 dev_err(&ndev->dev, "failed to get memory resource\n");
75                 return -ENODEV;
76         }
77
78         if (offset + 4 * count > resource_size(r)
79                         || (offset + 4 * count < offset))
80                 err = -EPERM;
81
82         return err;
83 }
84
85 void __iomem *get_aperture(struct platform_device *pdev, int index)
86 {
87         struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
88
89         return pdata->aperture[index];
90 }
91
92 void host1x_writel(struct platform_device *pdev, u32 r, u32 v)
93 {
94         void __iomem *addr = get_aperture(pdev, 0) + r;
95         nvhost_dbg(dbg_reg, " d=%s r=0x%x v=0x%x", pdev->name, r, v);
96         writel(v, addr);
97 }
98 EXPORT_SYMBOL_GPL(host1x_writel);
99
100 u32 host1x_readl(struct platform_device *pdev, u32 r)
101 {
102         void __iomem *addr = get_aperture(pdev, 0) + r;
103         u32 v;
104
105         nvhost_dbg(dbg_reg, " d=%s r=0x%x", pdev->name, r);
106         v = readl(addr);
107         nvhost_dbg(dbg_reg, " d=%s r=0x%x v=0x%x", pdev->name, r, v);
108
109         return v;
110 }
111 EXPORT_SYMBOL_GPL(host1x_readl);
112
113 void host1x_channel_writel(struct nvhost_channel *ch, u32 r, u32 v)
114 {
115         void __iomem *addr = ch->aperture + r;
116         nvhost_dbg(dbg_reg, " chid=%d r=0x%x v=0x%x", ch->chid, r, v);
117         writel(v, addr);
118 }
119 EXPORT_SYMBOL_GPL(host1x_channel_writel);
120
121 u32 host1x_channel_readl(struct nvhost_channel *ch, u32 r)
122 {
123         void __iomem *addr = ch->aperture + r;
124         u32 v;
125
126         nvhost_dbg(dbg_reg, " chid=%d r=0x%x", ch->chid, r);
127         v = readl(addr);
128         nvhost_dbg(dbg_reg, " chid=%d r=0x%x v=0x%x", ch->chid, r, v);
129
130         return v;
131 }
132 EXPORT_SYMBOL_GPL(host1x_channel_readl);
133
134 void host1x_sync_writel(struct nvhost_master *dev, u32 r, u32 v)
135 {
136         void __iomem *addr = dev->sync_aperture + r;
137         nvhost_dbg(dbg_reg, " d=%s r=0x%x v=0x%x", dev->dev->name, r, v);
138         writel(v, addr);
139 }
140 EXPORT_SYMBOL_GPL(host1x_sync_writel);
141
142 u32 host1x_sync_readl(struct nvhost_master *dev, u32 r)
143 {
144         void __iomem *addr = dev->sync_aperture + r;
145         u32 v;
146
147         nvhost_dbg(dbg_reg, " d=%s r=0x%x", dev->dev->name, r);
148         v = readl(addr);
149         nvhost_dbg(dbg_reg, " d=%s r=0x%x v=0x%x", dev->dev->name, r, v);
150
151         return v;
152 }
153 EXPORT_SYMBOL_GPL(host1x_sync_readl);
154
155 int nvhost_read_module_regs(struct platform_device *ndev,
156                         u32 offset, int count, u32 *values)
157 {
158         int err;
159
160         /* verify offset */
161         err = validate_reg(ndev, offset, count);
162         if (err)
163                 return err;
164
165         err = nvhost_module_busy(ndev);
166         if (err)
167                 return err;
168
169         while (count--) {
170                 *(values++) = host1x_readl(ndev, offset);
171                 offset += 4;
172         }
173         rmb();
174         nvhost_module_idle(ndev);
175
176         return 0;
177 }
178
179 int nvhost_write_module_regs(struct platform_device *ndev,
180                         u32 offset, int count, const u32 *values)
181 {
182         int err;
183
184         /* verify offset */
185         err = validate_reg(ndev, offset, count);
186         if (err)
187                 return err;
188
189         err = nvhost_module_busy(ndev);
190         if (err)
191                 return err;
192
193         while (count--) {
194                 host1x_writel(ndev, offset, *(values++));
195                 offset += 4;
196         }
197         wmb();
198         nvhost_module_idle(ndev);
199
200         return 0;
201 }
202
203 struct nvhost_channel_userctx {
204         struct nvhost_channel *ch;
205         u32 timeout;
206         u32 priority;
207         int clientid;
208         bool timeout_debug_dump;
209         struct platform_device *pdev;
210         u32 syncpts[NVHOST_MODULE_MAX_SYNCPTS];
211         u32 client_managed_syncpt;
212
213         /* error notificatiers used channel submit timeout */
214         struct dma_buf *error_notifier_ref;
215         u64 error_notifier_offset;
216
217         /* lock to protect this structure from concurrent ioctl usage */
218         struct mutex ioctl_lock;
219
220         /* used for attaching to ctx list in device pdata */
221         struct list_head node;
222 };
223
224 static int nvhost_channelrelease(struct inode *inode, struct file *filp)
225 {
226         struct nvhost_channel_userctx *priv = filp->private_data;
227         struct nvhost_device_data *pdata = platform_get_drvdata(priv->pdev);
228         struct nvhost_master *host = nvhost_get_host(pdata->pdev);
229         void *identifier;
230         int i = 0;
231
232         trace_nvhost_channel_release(dev_name(&priv->pdev->dev));
233
234         mutex_lock(&pdata->userctx_list_lock);
235         list_del(&priv->node);
236         mutex_unlock(&pdata->userctx_list_lock);
237
238         /* remove this client from acm */
239         nvhost_module_remove_client(priv->pdev, priv);
240
241         /* drop error notifier reference */
242         if (priv->error_notifier_ref)
243                 dma_buf_put(priv->error_notifier_ref);
244
245         /* Clear the identifier */
246         if (pdata->resource_policy == RESOURCE_PER_DEVICE &&
247             !pdata->exclusive)
248                 identifier = (void *)pdata;
249         else
250                 identifier = (void *)priv;
251         nvhost_channel_remove_identifier(pdata, identifier);
252
253         /* If the device is in exclusive mode, drop the reference */
254         if (pdata->exclusive)
255                 pdata->num_mapped_chs--;
256
257         /* drop channel reference if we took one at open time */
258         if (pdata->resource_policy == RESOURCE_PER_DEVICE) {
259                 nvhost_putchannel(priv->ch, 1);
260         } else {
261                 /* drop instance syncpoints reference */
262                 for (i = 0; i < NVHOST_MODULE_MAX_SYNCPTS; ++i) {
263                         if (priv->syncpts[i]) {
264                                 nvhost_syncpt_put_ref(&host->syncpt,
265                                                 priv->syncpts[i]);
266                                 priv->syncpts[i] = 0;
267                         }
268                 }
269
270                 if (priv->client_managed_syncpt) {
271                         nvhost_syncpt_put_ref(&host->syncpt,
272                                         priv->client_managed_syncpt);
273                         priv->client_managed_syncpt = 0;
274                 }
275         }
276
277         if (pdata->keepalive)
278                 nvhost_module_enable_poweroff(priv->pdev);
279
280         kfree(priv);
281         return 0;
282 }
283
284 static int __nvhost_channelopen(struct inode *inode,
285                 struct platform_device *pdev,
286                 struct file *filp)
287 {
288         struct nvhost_channel_userctx *priv;
289         struct nvhost_device_data *pdata, *host1x_pdata;
290         struct nvhost_master *host;
291         int ret;
292
293         /* grab pdev and pdata based on inputs */
294         if (pdev) {
295                 pdata = platform_get_drvdata(pdev);
296         } else if (inode) {
297                 pdata = container_of(inode->i_cdev,
298                                 struct nvhost_device_data, cdev);
299                 pdev = pdata->pdev;
300         } else
301                 return -EINVAL;
302
303         /* ..and host1x specific data */
304         host1x_pdata = dev_get_drvdata(pdev->dev.parent);
305         host = nvhost_get_host(pdev);
306
307         trace_nvhost_channel_open(dev_name(&pdev->dev));
308
309         /* If the device is in exclusive mode, make channel reservation here */
310         if (pdata->exclusive) {
311                 if (pdata->num_mapped_chs == pdata->num_channels)
312                         goto fail_mark_used;
313                 pdata->num_mapped_chs++;
314         }
315
316         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
317         if (!priv)
318                 goto fail_allocate_priv;
319         filp->private_data = priv;
320
321         /* Register this client to acm */
322         if (nvhost_module_add_client(pdev, priv))
323                 goto fail_add_client;
324
325         /* Keep devices with keepalive flag powered */
326         if (pdata->keepalive)
327                 nvhost_module_disable_poweroff(pdev);
328
329         /* Check that the device can be powered */
330         ret = nvhost_module_busy(pdev);
331         if (ret)
332                 goto fail_power_on;
333         nvhost_module_idle(pdev);
334
335         if (nvhost_dev_is_virtual(pdev)) {
336                 /* If virtual, allocate a client id on the server side. This is
337                  * needed for channel recovery, to distinguish which clients
338                  * own which gathers.
339                  */
340
341                 int virt_moduleid = vhost_virt_moduleid(pdata->moduleid);
342                 struct nvhost_virt_ctx *virt_ctx =
343                                         nvhost_get_virt_data(pdev);
344
345                 if (virt_moduleid < 0) {
346                         ret = -EINVAL;
347                         goto fail_virt_clientid;
348                 }
349
350                 priv->clientid =
351                         vhost_channel_alloc_clientid(virt_ctx->handle,
352                                                         virt_moduleid);
353                 if (priv->clientid == 0) {
354                         dev_err(&pdev->dev,
355                                 "vhost_channel_alloc_clientid failed\n");
356                         ret = -ENOMEM;
357                         goto fail_virt_clientid;
358                 }
359         } else {
360                 /* Get client id */
361                 priv->clientid = atomic_add_return(1, &host->clientid);
362                 if (!priv->clientid)
363                         priv->clientid = atomic_add_return(1, &host->clientid);
364         }
365
366         /* Initialize private structure */
367         priv->timeout = host1x_pdata->nvhost_timeout_default;
368         priv->priority = NVHOST_PRIORITY_MEDIUM;
369         priv->timeout_debug_dump = true;
370         mutex_init(&priv->ioctl_lock);
371         priv->pdev = pdev;
372
373         if (!tegra_platform_is_silicon())
374                 priv->timeout = 0;
375
376         /* if we run in map-at-submit mode but device has override
377          * flag set, respect the override flag */
378         if (pdata->resource_policy == RESOURCE_PER_DEVICE) {
379                 if (pdata->exclusive)
380                         ret = nvhost_channel_map(pdata, &priv->ch, priv);
381                 else
382                         ret = nvhost_channel_map(pdata, &priv->ch, pdata);
383                 if (ret) {
384                         pr_err("%s: failed to map channel, error: %d\n",
385                                __func__, ret);
386                         goto fail_get_channel;
387                 }
388         }
389
390         INIT_LIST_HEAD(&priv->node);
391         mutex_lock(&pdata->userctx_list_lock);
392         list_add_tail(&priv->node, &pdata->userctx_list);
393         mutex_unlock(&pdata->userctx_list_lock);
394
395         return 0;
396
397 fail_get_channel:
398 fail_virt_clientid:
399 fail_power_on:
400         if (pdata->keepalive)
401                 nvhost_module_enable_poweroff(pdev);
402         nvhost_module_remove_client(pdev, priv);
403 fail_add_client:
404         kfree(priv);
405 fail_allocate_priv:
406         if  (pdata->exclusive)
407                 pdata->num_mapped_chs--;
408 fail_mark_used:
409         return -ENOMEM;
410 }
411
412 static int nvhost_channelopen(struct inode *inode, struct file *filp)
413 {
414         return __nvhost_channelopen(inode, NULL, filp);
415 }
416
417 static int nvhost_init_error_notifier(struct nvhost_channel_userctx *ctx,
418                                       struct nvhost_set_error_notifier *args)
419 {
420         struct dma_buf *dmabuf;
421         void *va;
422         u64 end = args->offset + sizeof(struct nvhost_notification);
423
424         /* are we releasing old reference? */
425         if (!args->mem) {
426                 if (ctx->error_notifier_ref)
427                         dma_buf_put(ctx->error_notifier_ref);
428                 ctx->error_notifier_ref = NULL;
429                 return 0;
430         }
431
432         /* take reference for the userctx */
433         dmabuf = dma_buf_get(args->mem);
434         if (IS_ERR(dmabuf)) {
435                 pr_err("%s: Invalid handle: %d\n", __func__, args->mem);
436                 return -EINVAL;
437         }
438
439         if (end > dmabuf->size || end < sizeof(struct nvhost_notification)) {
440                 dma_buf_put(dmabuf);
441                 pr_err("%s: invalid offset\n", __func__);
442                 return -EINVAL;
443         }
444
445         /* map handle and clear error notifier struct */
446         va = dma_buf_vmap(dmabuf);
447         if (!va) {
448                 dma_buf_put(dmabuf);
449                 pr_err("%s: Cannot map notifier handle\n", __func__);
450                 return -ENOMEM;
451         }
452
453         memset(va + args->offset, 0, sizeof(struct nvhost_notification));
454         dma_buf_vunmap(dmabuf, va);
455
456         /* release old reference */
457         if (ctx->error_notifier_ref)
458                 dma_buf_put(ctx->error_notifier_ref);
459
460         /* finally, store error notifier data */
461         ctx->error_notifier_ref = dmabuf;
462         ctx->error_notifier_offset = args->offset;
463
464         return 0;
465 }
466
467 static inline u32 get_job_fence(struct nvhost_job *job, u32 id)
468 {
469         struct nvhost_channel *ch = job->ch;
470         struct nvhost_device_data *pdata = platform_get_drvdata(ch->dev);
471         u32 fence = job->sp[id].fence;
472
473         /* take into account work done increment */
474         if (pdata->push_work_done && id == 0)
475                 return fence - 1;
476
477         /* otherwise the fence is valid "as is" */
478         return fence;
479 }
480
481 static int nvhost_ioctl_channel_submit(struct nvhost_channel_userctx *ctx,
482                 struct nvhost_submit_args *args)
483 {
484         struct nvhost_job *job;
485         int num_cmdbufs = args->num_cmdbufs;
486         int num_relocs = args->num_relocs;
487         int num_waitchks = args->num_waitchks;
488         int num_syncpt_incrs = args->num_syncpt_incrs;
489         struct nvhost_cmdbuf __user *cmdbufs =
490                 (struct nvhost_cmdbuf __user *)(uintptr_t)args->cmdbufs;
491         struct nvhost_cmdbuf __user *cmdbuf_exts =
492                 (struct nvhost_cmdbuf __user *)(uintptr_t)args->cmdbuf_exts;
493         struct nvhost_reloc __user *relocs =
494                 (struct nvhost_reloc __user *)(uintptr_t)args->relocs;
495         struct nvhost_reloc_shift __user *reloc_shifts =
496                 (struct nvhost_reloc_shift __user *)
497                                 (uintptr_t)args->reloc_shifts;
498         struct nvhost_waitchk __user *waitchks =
499                 (struct nvhost_waitchk __user *)(uintptr_t)args->waitchks;
500         struct nvhost_syncpt_incr __user *syncpt_incrs =
501                 (struct nvhost_syncpt_incr __user *)
502                                 (uintptr_t)args->syncpt_incrs;
503         u32 __user *fences = (u32 __user *)(uintptr_t)args->fences;
504         u32 __user *class_ids = (u32 __user *)(uintptr_t)args->class_ids;
505         struct nvhost_device_data *pdata = platform_get_drvdata(ctx->pdev);
506
507         const u32 *syncpt_array =
508                 (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) ?
509                 ctx->syncpts :
510                 ctx->ch->syncpts;
511         u32 *local_class_ids = NULL;
512         int err, i;
513
514         job = nvhost_job_alloc(ctx->ch,
515                         num_cmdbufs,
516                         num_relocs,
517                         num_waitchks,
518                         num_syncpt_incrs);
519         if (!job)
520                 return -ENOMEM;
521
522         job->num_relocs = args->num_relocs;
523         job->num_waitchk = args->num_waitchks;
524         job->num_syncpts = args->num_syncpt_incrs;
525         job->priority = ctx->priority;
526         job->clientid = ctx->clientid;
527         job->client_managed_syncpt =
528                 (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) ?
529                 ctx->client_managed_syncpt : ctx->ch->client_managed_syncpt;
530
531         /* copy error notifier settings for this job */
532         if (ctx->error_notifier_ref) {
533                 get_dma_buf(ctx->error_notifier_ref);
534                 job->error_notifier_ref = ctx->error_notifier_ref;
535                 job->error_notifier_offset = ctx->error_notifier_offset;
536         }
537
538         /* mass copy class_ids */
539         if (args->class_ids) {
540                 local_class_ids = kzalloc(sizeof(u32) * num_cmdbufs,
541                         GFP_KERNEL);
542                 if (!local_class_ids) {
543                         err = -ENOMEM;
544                         goto fail;
545                 }
546                 err = copy_from_user(local_class_ids, class_ids,
547                         sizeof(u32) * num_cmdbufs);
548                 if (err) {
549                         err = -EINVAL;
550                         goto fail;
551                 }
552         }
553
554         for (i = 0; i < num_cmdbufs; ++i) {
555                 struct nvhost_cmdbuf cmdbuf;
556                 struct nvhost_cmdbuf_ext cmdbuf_ext;
557                 u32 class_id = class_ids ? local_class_ids[i] : 0;
558
559                 err = copy_from_user(&cmdbuf, cmdbufs + i, sizeof(cmdbuf));
560                 if (err)
561                         goto fail;
562
563                 cmdbuf_ext.pre_fence = -1;
564                 if (cmdbuf_exts)
565                         err = copy_from_user(&cmdbuf_ext,
566                                         cmdbuf_exts + i, sizeof(cmdbuf_ext));
567                 if (err)
568                         cmdbuf_ext.pre_fence = -1;
569
570                 /* verify that the given class id is valid for this engine */
571                 if (class_id &&
572                     class_id != pdata->class &&
573                     class_id != NV_HOST1X_CLASS_ID) {
574                         err = -EINVAL;
575                         goto fail;
576                 }
577
578                 nvhost_job_add_gather(job, cmdbuf.mem, cmdbuf.words,
579                                       cmdbuf.offset, class_id,
580                                       cmdbuf_ext.pre_fence);
581         }
582
583         kfree(local_class_ids);
584         local_class_ids = NULL;
585
586         err = copy_from_user(job->relocarray,
587                         relocs, sizeof(*relocs) * num_relocs);
588         if (err)
589                 goto fail;
590
591         err = copy_from_user(job->relocshiftarray,
592                         reloc_shifts, sizeof(*reloc_shifts) * num_relocs);
593         if (err)
594                 goto fail;
595
596         err = copy_from_user(job->waitchk,
597                         waitchks, sizeof(*waitchks) * num_waitchks);
598         if (err)
599                 goto fail;
600
601         /*
602          * Go through each syncpoint from userspace. Here we:
603          * - Copy syncpoint information
604          * - Validate each syncpoint
605          * - Determine the index of hwctx syncpoint in the table
606          */
607
608         for (i = 0; i < num_syncpt_incrs; ++i) {
609                 struct nvhost_syncpt_incr sp;
610                 bool found = false;
611                 int j;
612
613                 /* Copy */
614                 err = copy_from_user(&sp, syncpt_incrs + i, sizeof(sp));
615                 if (err)
616                         goto fail;
617
618                 /* Validate the trivial case */
619                 if (sp.syncpt_id == 0) {
620                         err = -EINVAL;
621                         goto fail;
622                 }
623
624                 /* ..and then ensure that the syncpoints have been reserved
625                  * for this client */
626                 for (j = 0; j < NVHOST_MODULE_MAX_SYNCPTS; j++) {
627                         if (syncpt_array[j] == sp.syncpt_id) {
628                                 found = true;
629                                 break;
630                         }
631                 }
632
633                 if (!found) {
634                         err = -EINVAL;
635                         goto fail;
636                 }
637
638                 /* Store and get a reference */
639                 job->sp[i].id = sp.syncpt_id;
640                 job->sp[i].incrs = sp.syncpt_incrs;
641         }
642
643         trace_nvhost_channel_submit(ctx->pdev->name,
644                 job->num_gathers, job->num_relocs, job->num_waitchk,
645                 job->sp[0].id,
646                 job->sp[0].incrs);
647
648         err = nvhost_module_busy(ctx->pdev);
649         if (err)
650                 goto fail;
651
652         err = nvhost_job_pin(job, &nvhost_get_host(ctx->pdev)->syncpt);
653         nvhost_module_idle(ctx->pdev);
654         if (err)
655                 goto fail;
656
657         if (args->timeout)
658                 job->timeout = min(ctx->timeout, args->timeout);
659         else
660                 job->timeout = ctx->timeout;
661         job->timeout_debug_dump = ctx->timeout_debug_dump;
662
663         err = nvhost_channel_submit(job);
664         if (err)
665                 goto fail_submit;
666
667         /* Deliver multiple fences back to the userspace */
668         if (fences)
669                 for (i = 0; i < num_syncpt_incrs; ++i) {
670                         u32 fence = get_job_fence(job, i);
671                         err = copy_to_user(fences, &fence, sizeof(u32));
672                         if (err)
673                                 break;
674                         fences++;
675                 }
676
677         /* Deliver the fence using the old mechanism _only_ if a single
678          * syncpoint is used. */
679
680         if (args->flags & BIT(NVHOST_SUBMIT_FLAG_SYNC_FENCE_FD)) {
681                 struct nvhost_ctrl_sync_fence_info pts[num_syncpt_incrs];
682
683                 for (i = 0; i < num_syncpt_incrs; i++) {
684                         pts[i].id = job->sp[i].id;
685                         pts[i].thresh = get_job_fence(job, i);
686                 }
687
688                 err = nvhost_sync_create_fence_fd(ctx->pdev,
689                                 pts, num_syncpt_incrs, "fence", &args->fence);
690                 if (err)
691                         goto fail;
692         } else if (num_syncpt_incrs == 1)
693                 args->fence =  get_job_fence(job, 0);
694         else
695                 args->fence = 0;
696
697         nvhost_job_put(job);
698
699         return 0;
700
701 fail_submit:
702         nvhost_job_unpin(job);
703 fail:
704         nvhost_job_put(job);
705         kfree(local_class_ids);
706
707         nvhost_err(&pdata->pdev->dev, "failed with err %d\n", err);
708
709         return err;
710 }
711
712 static int moduleid_to_index(struct platform_device *dev, u32 moduleid)
713 {
714         int i;
715         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
716
717         for (i = 0; i < NVHOST_MODULE_MAX_CLOCKS; i++) {
718                 if (pdata->clocks[i].moduleid == moduleid)
719                         return i;
720         }
721
722         /* Old user space is sending a random number in args. Return clock
723          * zero in these cases. */
724         return 0;
725 }
726
727 static int nvhost_ioctl_channel_set_rate(struct nvhost_channel_userctx *ctx,
728         struct nvhost_clk_rate_args *arg)
729 {
730         u32 moduleid = (arg->moduleid >> NVHOST_MODULE_ID_BIT_POS)
731                         & ((1 << NVHOST_MODULE_ID_BIT_WIDTH) - 1);
732         u32 attr = (arg->moduleid >> NVHOST_CLOCK_ATTR_BIT_POS)
733                         & ((1 << NVHOST_CLOCK_ATTR_BIT_WIDTH) - 1);
734         int index = moduleid ?
735                         moduleid_to_index(ctx->pdev, moduleid) : 0;
736         int err;
737
738         err = nvhost_module_set_rate(ctx->pdev, ctx, arg->rate, index, attr);
739         if (!tegra_platform_is_silicon() && err) {
740                 nvhost_dbg(dbg_clk, "ignoring error: module=%u, attr=%u, index=%d, err=%d",
741                            moduleid, attr, index, err);
742                 err = 0;
743         }
744
745         return err;
746 }
747
748 static int nvhost_ioctl_channel_get_rate(struct nvhost_channel_userctx *ctx,
749         u32 moduleid, u32 *rate)
750 {
751         int index = moduleid ? moduleid_to_index(ctx->pdev, moduleid) : 0;
752         int err;
753
754         err = nvhost_module_get_rate(ctx->pdev, (unsigned long *)rate, index);
755         if (!tegra_platform_is_silicon() && err) {
756                 nvhost_dbg(dbg_clk, "ignoring error: module=%u, rate=%u, error=%d",
757                            moduleid, *rate, err);
758                 err = 0;
759                 /* fake the return value */
760                 *rate = 32 * 1024;
761         }
762
763         return err;
764 }
765
766 static int nvhost_ioctl_channel_module_regrdwr(
767         struct nvhost_channel_userctx *ctx,
768         struct nvhost_ctrl_module_regrdwr_args *args)
769 {
770         u32 num_offsets = args->num_offsets;
771         u32 __user *offsets = (u32 __user *)(uintptr_t)args->offsets;
772         u32 __user *values = (u32 __user *)(uintptr_t)args->values;
773         u32 vals[64];
774         struct platform_device *ndev;
775
776         trace_nvhost_ioctl_channel_module_regrdwr(args->id,
777                 args->num_offsets, args->write);
778
779         /* Check that there is something to read and that block size is
780          * u32 aligned */
781         if (num_offsets == 0 || args->block_size & 3)
782                 return -EINVAL;
783
784         ndev = ctx->pdev;
785
786         if (nvhost_dev_is_virtual(ndev))
787                 return vhost_rdwr_module_regs(ndev, num_offsets,
788                                 args->block_size, offsets, values, args->write);
789
790         while (num_offsets--) {
791                 int err;
792                 u32 offs;
793                 int remaining = args->block_size >> 2;
794
795                 if (get_user(offs, offsets))
796                         return -EFAULT;
797
798                 offsets++;
799                 while (remaining) {
800                         int batch = min(remaining, 64);
801                         if (args->write) {
802                                 if (copy_from_user(vals, values,
803                                                 batch * sizeof(u32)))
804                                         return -EFAULT;
805
806                                 err = nvhost_write_module_regs(ndev,
807                                         offs, batch, vals);
808                                 if (err)
809                                         return err;
810                         } else {
811                                 err = nvhost_read_module_regs(ndev,
812                                                 offs, batch, vals);
813                                 if (err)
814                                         return err;
815
816                                 if (copy_to_user(values, vals,
817                                                 batch * sizeof(u32)))
818                                         return -EFAULT;
819                         }
820
821                         remaining -= batch;
822                         offs += batch * sizeof(u32);
823                         values += batch;
824                 }
825         }
826
827         return 0;
828 }
829
830 static u32 create_mask(u32 *words, int num)
831 {
832         int i;
833         u32 word = 0;
834         for (i = 0; i < num; i++) {
835                 if (!words[i] || words[i] > 31)
836                         continue;
837                 word |= BIT(words[i]);
838         }
839
840         return word;
841 }
842
843 static u32 nvhost_ioctl_channel_get_syncpt_mask(
844                 struct nvhost_channel_userctx *priv)
845 {
846         struct nvhost_device_data *pdata = platform_get_drvdata(priv->pdev);
847         u32 mask;
848
849         if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE)
850                 mask = create_mask(priv->syncpts, NVHOST_MODULE_MAX_SYNCPTS);
851         else
852                 mask = create_mask(priv->ch->syncpts,
853                                                 NVHOST_MODULE_MAX_SYNCPTS);
854
855         return mask;
856 }
857
858 static u32 nvhost_ioctl_channel_get_syncpt_channel(struct nvhost_channel *ch,
859                 struct nvhost_device_data *pdata, u32 index)
860 {
861         u32 id;
862
863         mutex_lock(&ch->syncpts_lock);
864
865         /* if we already have required syncpt then return it ... */
866         id = ch->syncpts[index];
867         if (id)
868                 goto exit_unlock;
869
870         /* ... otherwise get a new syncpt dynamically */
871         id = nvhost_get_syncpt_host_managed(pdata->pdev, index, NULL);
872         if (!id)
873                 goto exit_unlock;
874
875         /* ... and store it for further references */
876         ch->syncpts[index] = id;
877
878 exit_unlock:
879         mutex_unlock(&ch->syncpts_lock);
880         return id;
881 }
882
883 static u32 nvhost_ioctl_channel_get_syncpt_instance(
884                 struct nvhost_channel_userctx *ctx,
885                 struct nvhost_device_data *pdata, u32 index)
886 {
887         u32 id;
888
889         /* if we already have required syncpt then return it ... */
890         if (ctx->syncpts[index]) {
891                 id = ctx->syncpts[index];
892                 return id;
893         }
894
895         /* ... otherwise get a new syncpt dynamically */
896         id = nvhost_get_syncpt_host_managed(pdata->pdev, index, NULL);
897         if (!id)
898                 return 0;
899
900         /* ... and store it for further references */
901         ctx->syncpts[index] = id;
902
903         return id;
904 }
905
906 static int nvhost_ioctl_channel_get_client_syncpt(
907                 struct nvhost_channel_userctx *ctx,
908                 struct nvhost_get_client_managed_syncpt_arg *args)
909 {
910         struct nvhost_device_data *pdata = platform_get_drvdata(ctx->pdev);
911         const char __user *args_name =
912                 (const char __user *)(uintptr_t)args->name;
913         char name[32];
914         char set_name[32];
915
916         /* prepare syncpoint name (in case it is needed) */
917         if (args_name) {
918                 if (strncpy_from_user(name, args_name, sizeof(name)) < 0)
919                         return -EFAULT;
920                 name[sizeof(name) - 1] = '\0';
921         } else {
922                 name[0] = '\0';
923         }
924
925         snprintf(set_name, sizeof(set_name),
926                 "%s_%s", dev_name(&ctx->pdev->dev), name);
927
928         if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) {
929                 if (!ctx->client_managed_syncpt)
930                         ctx->client_managed_syncpt =
931                                 nvhost_get_syncpt_client_managed(pdata->pdev,
932                                                                 set_name);
933                 args->value = ctx->client_managed_syncpt;
934         } else {
935                 struct nvhost_channel *ch = ctx->ch;
936                 mutex_lock(&ch->syncpts_lock);
937                 if (!ch->client_managed_syncpt)
938                         ch->client_managed_syncpt =
939                                 nvhost_get_syncpt_client_managed(pdata->pdev,
940                                                                 set_name);
941                 mutex_unlock(&ch->syncpts_lock);
942                 args->value = ch->client_managed_syncpt;
943         }
944
945         if (!args->value)
946                 return -EAGAIN;
947
948         return 0;
949 }
950
951 static long nvhost_channelctl(struct file *filp,
952         unsigned int cmd, unsigned long arg)
953 {
954         struct nvhost_channel_userctx *priv = filp->private_data;
955         struct nvhost_master *host;
956         struct device *dev;
957         u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
958         int err = 0;
959
960         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
961                 (_IOC_NR(cmd) == 0) ||
962                 (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST) ||
963                 (_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE))
964                 return -EFAULT;
965
966         if (_IOC_DIR(cmd) & _IOC_WRITE) {
967                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
968                         return -EFAULT;
969         }
970
971         /* serialize calls from this fd */
972         mutex_lock(&priv->ioctl_lock);
973         if (!priv->pdev) {
974                 pr_warn("Channel already unmapped\n");
975                 mutex_unlock(&priv->ioctl_lock);
976                 return -EFAULT;
977         }
978
979         host = nvhost_get_host(priv->pdev);
980         dev = &priv->pdev->dev;
981         switch (cmd) {
982         case NVHOST_IOCTL_CHANNEL_OPEN:
983         {
984                 int fd;
985                 struct file *file;
986                 char *name;
987
988                 err = get_unused_fd_flags(O_RDWR);
989                 if (err < 0)
990                         break;
991                 fd = err;
992
993                 name = kasprintf(GFP_KERNEL, "nvhost-%s-fd%d",
994                                 dev_name(dev), fd);
995                 if (!name) {
996                         err = -ENOMEM;
997                         put_unused_fd(fd);
998                         break;
999                 }
1000
1001                 file = anon_inode_getfile(name, filp->f_op, NULL, O_RDWR);
1002                 kfree(name);
1003                 if (IS_ERR(file)) {
1004                         err = PTR_ERR(file);
1005                         put_unused_fd(fd);
1006                         break;
1007                 }
1008                 fd_install(fd, file);
1009
1010                 err = __nvhost_channelopen(NULL, priv->pdev, file);
1011                 if (err) {
1012                         put_unused_fd(fd);
1013                         fput(file);
1014                         break;
1015                 }
1016
1017                 ((struct nvhost_channel_open_args *)buf)->channel_fd = fd;
1018                 break;
1019         }
1020         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
1021         {
1022                 ((struct nvhost_get_param_args *)buf)->value =
1023                         nvhost_ioctl_channel_get_syncpt_mask(priv);
1024                 break;
1025         }
1026         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINT:
1027         {
1028                 struct nvhost_device_data *pdata =
1029                         platform_get_drvdata(priv->pdev);
1030                 struct nvhost_get_param_arg *arg =
1031                         (struct nvhost_get_param_arg *)buf;
1032
1033                 if (arg->param >= NVHOST_MODULE_MAX_SYNCPTS) {
1034                         err = -EINVAL;
1035                         break;
1036                 }
1037
1038                 if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE)
1039                         arg->value = nvhost_ioctl_channel_get_syncpt_instance(
1040                                                 priv, pdata, arg->param);
1041                 else
1042                         arg->value = nvhost_ioctl_channel_get_syncpt_channel(
1043                                                 priv->ch, pdata, arg->param);
1044                 if (!arg->value) {
1045                         err = -EAGAIN;
1046                         break;
1047                 }
1048                 break;
1049         }
1050         case NVHOST_IOCTL_CHANNEL_GET_CLIENT_MANAGED_SYNCPOINT:
1051         {
1052                 err = nvhost_ioctl_channel_get_client_syncpt(priv,
1053                         (struct nvhost_get_client_managed_syncpt_arg *)buf);
1054                 break;
1055         }
1056         case NVHOST_IOCTL_CHANNEL_FREE_CLIENT_MANAGED_SYNCPOINT:
1057                 break;
1058         case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
1059         {
1060                 ((struct nvhost_get_param_args *)buf)->value = 0;
1061                 break;
1062         }
1063         case NVHOST_IOCTL_CHANNEL_GET_WAITBASE:
1064         {
1065                 err = -EINVAL;
1066                 break;
1067         }
1068         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
1069         {
1070                 struct nvhost_device_data *pdata = \
1071                         platform_get_drvdata(priv->pdev);
1072                 ((struct nvhost_get_param_args *)buf)->value =
1073                         create_mask(pdata->modulemutexes,
1074                                         NVHOST_MODULE_MAX_MODMUTEXES);
1075                 break;
1076         }
1077         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEX:
1078         {
1079                 struct nvhost_device_data *pdata = \
1080                         platform_get_drvdata(priv->pdev);
1081                 struct nvhost_get_param_arg *arg =
1082                         (struct nvhost_get_param_arg *)buf;
1083
1084                 if (arg->param >= NVHOST_MODULE_MAX_MODMUTEXES ||
1085                     !pdata->modulemutexes[arg->param]) {
1086                         err = -EINVAL;
1087                         break;
1088                 }
1089
1090                 arg->value = pdata->modulemutexes[arg->param];
1091                 break;
1092         }
1093         case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
1094                 break;
1095         case NVHOST_IOCTL_CHANNEL_GET_CLK_RATE:
1096         {
1097                 struct nvhost_clk_rate_args *arg =
1098                                 (struct nvhost_clk_rate_args *)buf;
1099
1100                 err = nvhost_ioctl_channel_get_rate(priv,
1101                                 arg->moduleid, &arg->rate);
1102                 break;
1103         }
1104         case NVHOST_IOCTL_CHANNEL_SET_CLK_RATE:
1105         {
1106                 struct nvhost_clk_rate_args *arg =
1107                                 (struct nvhost_clk_rate_args *)buf;
1108
1109                 /* if virtualized, client requests to change clock rate
1110                  * are ignored
1111                  */
1112                 if (nvhost_dev_is_virtual(priv->pdev))
1113                         break;
1114
1115                 err = nvhost_ioctl_channel_set_rate(priv, arg);
1116                 break;
1117         }
1118         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT:
1119         {
1120                 u32 timeout =
1121                         (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
1122
1123                 priv->timeout = timeout;
1124                 dev_dbg(&priv->pdev->dev,
1125                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
1126                         __func__, priv->timeout, priv);
1127                 break;
1128         }
1129         case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT:
1130                 ((struct nvhost_get_param_args *)buf)->value = false;
1131                 break;
1132         case NVHOST_IOCTL_CHANNEL_SET_PRIORITY:
1133                 priv->priority =
1134                         (u32)((struct nvhost_set_priority_args *)buf)->priority;
1135                 break;
1136         case NVHOST32_IOCTL_CHANNEL_MODULE_REGRDWR:
1137         {
1138                 struct nvhost32_ctrl_module_regrdwr_args *args32 =
1139                         (struct nvhost32_ctrl_module_regrdwr_args *)buf;
1140                 struct nvhost_ctrl_module_regrdwr_args args;
1141                 args.id = args32->id;
1142                 args.num_offsets = args32->num_offsets;
1143                 args.block_size = args32->block_size;
1144                 args.offsets = args32->offsets;
1145                 args.values = args32->values;
1146                 args.write = args32->write;
1147                 err = nvhost_ioctl_channel_module_regrdwr(priv, &args);
1148                 break;
1149         }
1150         case NVHOST_IOCTL_CHANNEL_MODULE_REGRDWR:
1151                 err = nvhost_ioctl_channel_module_regrdwr(priv, (void *)buf);
1152                 break;
1153         case NVHOST32_IOCTL_CHANNEL_SUBMIT:
1154         {
1155                 struct nvhost_device_data *pdata =
1156                         platform_get_drvdata(priv->pdev);
1157                 struct nvhost32_submit_args *args32 = (void *)buf;
1158                 struct nvhost_submit_args args;
1159                 void *identifier;
1160
1161                 if (pdata->resource_policy == RESOURCE_PER_DEVICE &&
1162                     !pdata->exclusive)
1163                         identifier = (void *)pdata;
1164                 else
1165                         identifier = (void *)priv;
1166
1167                 memset(&args, 0, sizeof(args));
1168                 args.submit_version = args32->submit_version;
1169                 args.num_syncpt_incrs = args32->num_syncpt_incrs;
1170                 args.num_cmdbufs = args32->num_cmdbufs;
1171                 args.num_relocs = args32->num_relocs;
1172                 args.num_waitchks = args32->num_waitchks;
1173                 args.timeout = args32->timeout;
1174                 args.syncpt_incrs = args32->syncpt_incrs;
1175                 args.fence = args32->fence;
1176
1177                 args.cmdbufs = args32->cmdbufs;
1178                 args.relocs = args32->relocs;
1179                 args.reloc_shifts = args32->reloc_shifts;
1180                 args.waitchks = args32->waitchks;
1181                 args.class_ids = args32->class_ids;
1182                 args.fences = args32->fences;
1183
1184                 /* first, get a channel */
1185                 err = nvhost_channel_map(pdata, &priv->ch, identifier);
1186                 if (err)
1187                         break;
1188
1189                 /* ..then, synchronize syncpoint information.
1190                  *
1191                  * This information is updated only in this ioctl and
1192                  * channel destruction. We already hold channel
1193                  * reference and this ioctl is serialized => no-one is
1194                  * modifying the syncpoint field concurrently.
1195                  *
1196                  * Synchronization is not destructing anything
1197                  * in the structure; We can only allocate new
1198                  * syncpoints, and hence old ones cannot be released
1199                  * by following operation. If some syncpoint is stored
1200                  * into the channel structure, it remains there. */
1201
1202                 if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) {
1203                         memcpy(priv->ch->syncpts, priv->syncpts,
1204                                sizeof(priv->syncpts));
1205                         priv->ch->client_managed_syncpt =
1206                                 priv->client_managed_syncpt;
1207                 }
1208
1209                 /* submit work */
1210                 err = nvhost_ioctl_channel_submit(priv, &args);
1211
1212                 /* ..and drop the local reference */
1213                 nvhost_putchannel(priv->ch, 1);
1214
1215                 args32->fence = args.fence;
1216
1217                 break;
1218         }
1219         case NVHOST_IOCTL_CHANNEL_SUBMIT:
1220         {
1221                 struct nvhost_device_data *pdata =
1222                         platform_get_drvdata(priv->pdev);
1223                 void *identifier;
1224
1225                 if (pdata->resource_policy == RESOURCE_PER_DEVICE &&
1226                     !pdata->exclusive)
1227                         identifier = (void *)pdata;
1228                 else
1229                         identifier = (void *)priv;
1230
1231                 /* first, get a channel */
1232                 err = nvhost_channel_map(pdata, &priv->ch, identifier);
1233                 if (err)
1234                         break;
1235
1236                 /* ..then, synchronize syncpoint information.
1237                  *
1238                  * This information is updated only in this ioctl and
1239                  * channel destruction. We already hold channel
1240                  * reference and this ioctl is serialized => no-one is
1241                  * modifying the syncpoint field concurrently.
1242                  *
1243                  * Synchronization is not destructing anything
1244                  * in the structure; We can only allocate new
1245                  * syncpoints, and hence old ones cannot be released
1246                  * by following operation. If some syncpoint is stored
1247                  * into the channel structure, it remains there. */
1248
1249                 if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) {
1250                         memcpy(priv->ch->syncpts, priv->syncpts,
1251                                sizeof(priv->syncpts));
1252                         priv->ch->client_managed_syncpt =
1253                                 priv->client_managed_syncpt;
1254                 }
1255
1256                 /* submit work */
1257                 err = nvhost_ioctl_channel_submit(priv, (void *)buf);
1258
1259                 /* ..and drop the local reference */
1260                 nvhost_putchannel(priv->ch, 1);
1261
1262                 break;
1263         }
1264         case NVHOST_IOCTL_CHANNEL_SET_ERROR_NOTIFIER:
1265                 err = nvhost_init_error_notifier(priv,
1266                         (struct nvhost_set_error_notifier *)buf);
1267                 break;
1268         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT_EX:
1269         {
1270                 u32 timeout =
1271                         (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
1272                 bool timeout_debug_dump = !((u32)
1273                         ((struct nvhost_set_timeout_ex_args *)buf)->flags &
1274                         (1 << NVHOST_TIMEOUT_FLAG_DISABLE_DUMP));
1275                 priv->timeout = timeout;
1276                 priv->timeout_debug_dump = timeout_debug_dump;
1277                 dev_dbg(&priv->pdev->dev,
1278                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
1279                         __func__, priv->timeout, priv);
1280                 break;
1281         }
1282         default:
1283                 nvhost_dbg_info("unrecognized ioctl cmd: 0x%x", cmd);
1284                 err = -ENOTTY;
1285                 break;
1286         }
1287
1288         mutex_unlock(&priv->ioctl_lock);
1289
1290         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
1291                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
1292
1293         return err;
1294 }
1295
1296 static const struct file_operations nvhost_channelops = {
1297         .owner = THIS_MODULE,
1298         .release = nvhost_channelrelease,
1299         .open = nvhost_channelopen,
1300 #ifdef CONFIG_COMPAT
1301         .compat_ioctl = nvhost_channelctl,
1302 #endif
1303         .unlocked_ioctl = nvhost_channelctl
1304 };
1305
1306 static const char *get_device_name_for_dev(struct platform_device *dev)
1307 {
1308         struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
1309
1310         if (pdata->devfs_name)
1311                 return pdata->devfs_name;
1312
1313         return dev->name;
1314 }
1315
1316 static struct device *nvhost_client_device_create(
1317         struct platform_device *pdev, struct cdev *cdev,
1318         const char *cdev_name, dev_t devno,
1319         const struct file_operations *ops)
1320 {
1321         struct nvhost_master *host = nvhost_get_host(pdev);
1322         const char *use_dev_name;
1323         struct device *dev;
1324         int err;
1325
1326         nvhost_dbg_fn("");
1327
1328         BUG_ON(!host);
1329
1330         cdev_init(cdev, ops);
1331         cdev->owner = THIS_MODULE;
1332
1333         err = cdev_add(cdev, devno, 1);
1334         if (err < 0) {
1335                 dev_err(&pdev->dev,
1336                         "failed to add cdev\n");
1337                 return NULL;
1338         }
1339         use_dev_name = get_device_name_for_dev(pdev);
1340
1341         dev = device_create(host->nvhost_class,
1342                         NULL, devno, NULL,
1343                         (pdev->id <= 0) ?
1344                         IFACE_NAME "-%s%s" :
1345                         IFACE_NAME "-%s%s.%d",
1346                         cdev_name, use_dev_name, pdev->id);
1347
1348         if (IS_ERR(dev)) {
1349                 err = PTR_ERR(dev);
1350                 dev_err(&pdev->dev,
1351                         "failed to create %s %s device for %s\n",
1352                         use_dev_name, cdev_name, pdev->name);
1353                 return NULL;
1354         }
1355
1356         return dev;
1357 }
1358
1359 #define NVHOST_NUM_CDEV 4
1360 int nvhost_client_user_init(struct platform_device *dev)
1361 {
1362         dev_t devno;
1363         int err;
1364         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1365
1366         /* reserve 3 minor #s for <dev>, and ctrl-<dev> */
1367
1368         err = alloc_chrdev_region(&devno, 0, NVHOST_NUM_CDEV, IFACE_NAME);
1369         if (err < 0) {
1370                 dev_err(&dev->dev, "failed to allocate devno\n");
1371                 goto fail;
1372         }
1373         pdata->cdev_region = devno;
1374
1375         pdata->node = nvhost_client_device_create(dev, &pdata->cdev,
1376                                 "", devno, &nvhost_channelops);
1377         if (pdata->node == NULL)
1378                 goto fail;
1379
1380         /* module control (npn-channel based, global) interface */
1381         if (pdata->ctrl_ops) {
1382                 ++devno;
1383                 pdata->ctrl_node = nvhost_client_device_create(dev,
1384                                         &pdata->ctrl_cdev, "ctrl-",
1385                                         devno, pdata->ctrl_ops);
1386                 if (pdata->ctrl_node == NULL)
1387                         goto fail;
1388         }
1389
1390         return 0;
1391 fail:
1392         return err;
1393 }
1394
1395 static void nvhost_client_user_deinit(struct platform_device *dev)
1396 {
1397         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1398         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1399
1400         if (pdata->node) {
1401                 device_destroy(nvhost_master->nvhost_class, pdata->cdev.dev);
1402                 cdev_del(&pdata->cdev);
1403         }
1404
1405         if (pdata->as_node) {
1406                 device_destroy(nvhost_master->nvhost_class, pdata->as_cdev.dev);
1407                 cdev_del(&pdata->as_cdev);
1408         }
1409
1410         if (pdata->ctrl_node) {
1411                 device_destroy(nvhost_master->nvhost_class,
1412                                pdata->ctrl_cdev.dev);
1413                 cdev_del(&pdata->ctrl_cdev);
1414         }
1415
1416         unregister_chrdev_region(pdata->cdev_region, NVHOST_NUM_CDEV);
1417 }
1418
1419 int nvhost_client_device_init(struct platform_device *dev)
1420 {
1421         int err;
1422         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1423         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1424
1425         mutex_init(&pdata->userctx_list_lock);
1426         INIT_LIST_HEAD(&pdata->userctx_list);
1427
1428         /* Create debugfs directory for the device */
1429         nvhost_device_debug_init(dev);
1430
1431         err = nvhost_client_user_init(dev);
1432         if (err)
1433                 goto fail;
1434
1435         err = nvhost_device_list_add(dev);
1436         if (err)
1437                 goto fail;
1438
1439         if (pdata->scaling_init)
1440                 pdata->scaling_init(dev);
1441
1442         /* reset syncpoint values for this unit */
1443         err = nvhost_module_busy(nvhost_master->dev);
1444         if (err)
1445                 goto fail_busy;
1446
1447         nvhost_module_idle(nvhost_master->dev);
1448
1449         /* Initialize dma parameters */
1450         dev->dev.dma_parms = &pdata->dma_parms;
1451         dma_set_max_seg_size(&dev->dev, UINT_MAX);
1452
1453         dev_info(&dev->dev, "initialized\n");
1454
1455         if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) {
1456                 nvhost_master->info.channel_policy = MAP_CHANNEL_ON_SUBMIT;
1457                 nvhost_update_characteristics(dev);
1458         }
1459
1460         if (pdata->hw_init)
1461                 return pdata->hw_init(dev);
1462
1463         return 0;
1464
1465 fail_busy:
1466         /* Remove from nvhost device list */
1467         nvhost_device_list_remove(dev);
1468 fail:
1469         /* Add clean-up */
1470         dev_err(&dev->dev, "failed to init client device\n");
1471         nvhost_client_user_deinit(dev);
1472         nvhost_device_debug_deinit(dev);
1473         return err;
1474 }
1475 EXPORT_SYMBOL(nvhost_client_device_init);
1476
1477 int nvhost_client_device_release(struct platform_device *dev)
1478 {
1479         /* Release nvhost module resources */
1480         nvhost_module_deinit(dev);
1481
1482         /* Remove from nvhost device list */
1483         nvhost_device_list_remove(dev);
1484
1485         /* Release chardev and device node for user space */
1486         nvhost_client_user_deinit(dev);
1487
1488         /* Remove debugFS */
1489         nvhost_device_debug_deinit(dev);
1490
1491         return 0;
1492 }
1493 EXPORT_SYMBOL(nvhost_client_device_release);
1494
1495 int nvhost_device_get_resources(struct platform_device *dev)
1496 {
1497         int i;
1498         void __iomem *regs = NULL;
1499         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1500
1501         for (i = 0; i < dev->num_resources; i++) {
1502                 struct resource *r = NULL;
1503
1504                 r = platform_get_resource(dev, IORESOURCE_MEM, i);
1505                 /* We've run out of mem resources */
1506                 if (!r)
1507                         break;
1508
1509                 regs = devm_request_and_ioremap(&dev->dev, r);
1510                 if (!regs)
1511                         goto fail;
1512
1513                 pdata->aperture[i] = regs;
1514         }
1515
1516         return 0;
1517
1518 fail:
1519         dev_err(&dev->dev, "failed to get register memory\n");
1520
1521         return -ENXIO;
1522 }
1523
1524 int nvhost_client_device_get_resources(struct platform_device *dev)
1525 {
1526         return nvhost_device_get_resources(dev);
1527 }
1528 EXPORT_SYMBOL(nvhost_client_device_get_resources);
1529
1530 /* This is a simple wrapper around request_firmware that takes
1531  * 'fw_name' and if available applies a SOC relative path prefix to it.
1532  * The caller is responsible for calling release_firmware later.
1533  */
1534 const struct firmware *
1535 nvhost_client_request_firmware(struct platform_device *dev, const char *fw_name)
1536 {
1537         struct nvhost_chip_support *op = nvhost_get_chip_ops();
1538         const struct firmware *fw;
1539         char *fw_path = NULL;
1540         int path_len, err;
1541
1542         /* This field is NULL when calling from SYS_EXIT.
1543            Add a check here to prevent crash in request_firmware */
1544         if (!current->fs) {
1545                 BUG();
1546                 return NULL;
1547         }
1548
1549         if (!fw_name)
1550                 return NULL;
1551
1552         if (op->soc_name) {
1553                 path_len = strlen(fw_name) + strlen(op->soc_name);
1554                 path_len += 2; /* for the path separator and zero terminator*/
1555
1556                 fw_path = kzalloc(sizeof(*fw_path) * path_len,
1557                                      GFP_KERNEL);
1558                 if (!fw_path)
1559                         return NULL;
1560
1561                 sprintf(fw_path, "%s/%s", op->soc_name, fw_name);
1562                 fw_name = fw_path;
1563         }
1564
1565         err = request_firmware(&fw, fw_name, &dev->dev);
1566         kfree(fw_path);
1567         if (err) {
1568                 dev_err(&dev->dev, "failed to get firmware\n");
1569                 return NULL;
1570         }
1571
1572         /* note: caller must release_firmware */
1573         return fw;
1574 }
1575 EXPORT_SYMBOL(nvhost_client_request_firmware);
1576
1577 struct nvhost_channel *nvhost_find_chan_by_clientid(
1578                                 struct platform_device *pdev,
1579                                 int clientid)
1580 {
1581         struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
1582         struct nvhost_channel_userctx *ctx;
1583         struct nvhost_channel *ch = NULL;
1584
1585         mutex_lock(&pdata->userctx_list_lock);
1586         list_for_each_entry(ctx, &pdata->userctx_list, node) {
1587                 if (ctx->clientid == clientid) {
1588                         ch = ctx->ch;
1589                         break;
1590                 }
1591         }
1592         mutex_unlock(&pdata->userctx_list_lock);
1593
1594         return ch;
1595 }