]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/video/tegra/host/bus_client.c
video: tegra: host: check if offset is u32 aligned
[sojka/nv-tegra/linux-3.10.git] / drivers / video / tegra / host / bus_client.c
1 /*
2  * Tegra Graphics Host Client Module
3  *
4  * Copyright (c) 2010-2016, NVIDIA Corporation. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #include <linux/slab.h>
20 #include <linux/string.h>
21 #include <linux/spinlock.h>
22 #include <linux/fs.h>
23 #include <linux/cdev.h>
24 #include <linux/uaccess.h>
25 #include <linux/file.h>
26 #include <linux/clk.h>
27 #include <linux/hrtimer.h>
28 #include <linux/export.h>
29 #include <linux/firmware.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/tegra-soc.h>
32 #include <linux/anon_inodes.h>
33
34 #include <trace/events/nvhost.h>
35
36 #include <linux/io.h>
37 #include <linux/string.h>
38
39 #include <linux/nvhost.h>
40 #include <linux/nvhost_ioctl.h>
41
42 #include <mach/gpufuse.h>
43
44 #include "debug.h"
45 #include "bus_client.h"
46 #include "dev.h"
47 #include "class_ids.h"
48 #include "chip_support.h"
49 #include "nvhost_acm.h"
50
51 #include "nvhost_syncpt.h"
52 #include "nvhost_channel.h"
53 #include "nvhost_job.h"
54 #include "nvhost_sync.h"
55 #include "vhost/vhost.h"
56
57 int nvhost_check_bondout(unsigned int id)
58 {
59 #ifdef CONFIG_NVHOST_BONDOUT_CHECK
60         if (!tegra_platform_is_silicon())
61                 return tegra_bonded_out_dev(id);
62 #endif
63         return 0;
64 }
65 EXPORT_SYMBOL(nvhost_check_bondout);
66
67 static int validate_reg(struct platform_device *ndev, u32 offset, int count)
68 {
69         int err = 0;
70         struct resource *r;
71
72         /* check if offset is u32 aligned */
73         if (offset & 3)
74                 return -EINVAL;
75
76         r = platform_get_resource(ndev, IORESOURCE_MEM, 0);
77         if (!r) {
78                 dev_err(&ndev->dev, "failed to get memory resource\n");
79                 return -ENODEV;
80         }
81
82         if (offset + 4 * count > resource_size(r)
83                         || (offset + 4 * count < offset))
84                 err = -EPERM;
85
86         return err;
87 }
88
89 void __iomem *get_aperture(struct platform_device *pdev, int index)
90 {
91         struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
92
93         return pdata->aperture[index];
94 }
95
96 void host1x_writel(struct platform_device *pdev, u32 r, u32 v)
97 {
98         void __iomem *addr = get_aperture(pdev, 0) + r;
99         nvhost_dbg(dbg_reg, " d=%s r=0x%x v=0x%x", pdev->name, r, v);
100         writel(v, addr);
101 }
102 EXPORT_SYMBOL_GPL(host1x_writel);
103
104 u32 host1x_readl(struct platform_device *pdev, u32 r)
105 {
106         void __iomem *addr = get_aperture(pdev, 0) + r;
107         u32 v;
108
109         nvhost_dbg(dbg_reg, " d=%s r=0x%x", pdev->name, r);
110         v = readl(addr);
111         nvhost_dbg(dbg_reg, " d=%s r=0x%x v=0x%x", pdev->name, r, v);
112
113         return v;
114 }
115 EXPORT_SYMBOL_GPL(host1x_readl);
116
117 void host1x_channel_writel(struct nvhost_channel *ch, u32 r, u32 v)
118 {
119         void __iomem *addr = ch->aperture + r;
120         nvhost_dbg(dbg_reg, " chid=%d r=0x%x v=0x%x", ch->chid, r, v);
121         writel(v, addr);
122 }
123 EXPORT_SYMBOL_GPL(host1x_channel_writel);
124
125 u32 host1x_channel_readl(struct nvhost_channel *ch, u32 r)
126 {
127         void __iomem *addr = ch->aperture + r;
128         u32 v;
129
130         nvhost_dbg(dbg_reg, " chid=%d r=0x%x", ch->chid, r);
131         v = readl(addr);
132         nvhost_dbg(dbg_reg, " chid=%d r=0x%x v=0x%x", ch->chid, r, v);
133
134         return v;
135 }
136 EXPORT_SYMBOL_GPL(host1x_channel_readl);
137
138 void host1x_sync_writel(struct nvhost_master *dev, u32 r, u32 v)
139 {
140         void __iomem *addr = dev->sync_aperture + r;
141         nvhost_dbg(dbg_reg, " d=%s r=0x%x v=0x%x", dev->dev->name, r, v);
142         writel(v, addr);
143 }
144 EXPORT_SYMBOL_GPL(host1x_sync_writel);
145
146 u32 host1x_sync_readl(struct nvhost_master *dev, u32 r)
147 {
148         void __iomem *addr = dev->sync_aperture + r;
149         u32 v;
150
151         nvhost_dbg(dbg_reg, " d=%s r=0x%x", dev->dev->name, r);
152         v = readl(addr);
153         nvhost_dbg(dbg_reg, " d=%s r=0x%x v=0x%x", dev->dev->name, r, v);
154
155         return v;
156 }
157 EXPORT_SYMBOL_GPL(host1x_sync_readl);
158
159 int nvhost_read_module_regs(struct platform_device *ndev,
160                         u32 offset, int count, u32 *values)
161 {
162         int err;
163
164         /* verify offset */
165         err = validate_reg(ndev, offset, count);
166         if (err)
167                 return err;
168
169         err = nvhost_module_busy(ndev);
170         if (err)
171                 return err;
172
173         while (count--) {
174                 *(values++) = host1x_readl(ndev, offset);
175                 offset += 4;
176         }
177         rmb();
178         nvhost_module_idle(ndev);
179
180         return 0;
181 }
182
183 int nvhost_write_module_regs(struct platform_device *ndev,
184                         u32 offset, int count, const u32 *values)
185 {
186         int err;
187
188         /* verify offset */
189         err = validate_reg(ndev, offset, count);
190         if (err)
191                 return err;
192
193         err = nvhost_module_busy(ndev);
194         if (err)
195                 return err;
196
197         while (count--) {
198                 host1x_writel(ndev, offset, *(values++));
199                 offset += 4;
200         }
201         wmb();
202         nvhost_module_idle(ndev);
203
204         return 0;
205 }
206
207 struct nvhost_channel_userctx {
208         struct nvhost_channel *ch;
209         u32 timeout;
210         u32 priority;
211         int clientid;
212         bool timeout_debug_dump;
213         struct platform_device *pdev;
214         u32 syncpts[NVHOST_MODULE_MAX_SYNCPTS];
215         u32 client_managed_syncpt;
216
217         /* error notificatiers used channel submit timeout */
218         struct dma_buf *error_notifier_ref;
219         u64 error_notifier_offset;
220
221         /* lock to protect this structure from concurrent ioctl usage */
222         struct mutex ioctl_lock;
223
224         /* used for attaching to ctx list in device pdata */
225         struct list_head node;
226 };
227
228 static int nvhost_channelrelease(struct inode *inode, struct file *filp)
229 {
230         struct nvhost_channel_userctx *priv = filp->private_data;
231         struct nvhost_device_data *pdata = platform_get_drvdata(priv->pdev);
232         struct nvhost_master *host = nvhost_get_host(pdata->pdev);
233         void *identifier;
234         int i = 0;
235
236         trace_nvhost_channel_release(dev_name(&priv->pdev->dev));
237
238         mutex_lock(&pdata->userctx_list_lock);
239         list_del(&priv->node);
240         mutex_unlock(&pdata->userctx_list_lock);
241
242         /* remove this client from acm */
243         nvhost_module_remove_client(priv->pdev, priv);
244
245         /* drop error notifier reference */
246         if (priv->error_notifier_ref)
247                 dma_buf_put(priv->error_notifier_ref);
248
249         /* Clear the identifier */
250         if (pdata->resource_policy == RESOURCE_PER_DEVICE &&
251             !pdata->exclusive)
252                 identifier = (void *)pdata;
253         else
254                 identifier = (void *)priv;
255         nvhost_channel_remove_identifier(pdata, identifier);
256
257         /* If the device is in exclusive mode, drop the reference */
258         if (pdata->exclusive)
259                 pdata->num_mapped_chs--;
260
261         /* drop channel reference if we took one at open time */
262         if (pdata->resource_policy == RESOURCE_PER_DEVICE) {
263                 nvhost_putchannel(priv->ch, 1);
264         } else {
265                 /* drop instance syncpoints reference */
266                 for (i = 0; i < NVHOST_MODULE_MAX_SYNCPTS; ++i) {
267                         if (priv->syncpts[i]) {
268                                 nvhost_syncpt_put_ref(&host->syncpt,
269                                                 priv->syncpts[i]);
270                                 priv->syncpts[i] = 0;
271                         }
272                 }
273
274                 if (priv->client_managed_syncpt) {
275                         nvhost_syncpt_put_ref(&host->syncpt,
276                                         priv->client_managed_syncpt);
277                         priv->client_managed_syncpt = 0;
278                 }
279         }
280
281         if (pdata->keepalive)
282                 nvhost_module_enable_poweroff(priv->pdev);
283
284         kfree(priv);
285         return 0;
286 }
287
288 static int __nvhost_channelopen(struct inode *inode,
289                 struct platform_device *pdev,
290                 struct file *filp)
291 {
292         struct nvhost_channel_userctx *priv;
293         struct nvhost_device_data *pdata, *host1x_pdata;
294         struct nvhost_master *host;
295         int ret;
296
297         /* grab pdev and pdata based on inputs */
298         if (pdev) {
299                 pdata = platform_get_drvdata(pdev);
300         } else if (inode) {
301                 pdata = container_of(inode->i_cdev,
302                                 struct nvhost_device_data, cdev);
303                 pdev = pdata->pdev;
304         } else
305                 return -EINVAL;
306
307         /* ..and host1x specific data */
308         host1x_pdata = dev_get_drvdata(pdev->dev.parent);
309         host = nvhost_get_host(pdev);
310
311         trace_nvhost_channel_open(dev_name(&pdev->dev));
312
313         /* If the device is in exclusive mode, make channel reservation here */
314         if (pdata->exclusive) {
315                 if (pdata->num_mapped_chs == pdata->num_channels)
316                         goto fail_mark_used;
317                 pdata->num_mapped_chs++;
318         }
319
320         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
321         if (!priv)
322                 goto fail_allocate_priv;
323         filp->private_data = priv;
324
325         /* Register this client to acm */
326         if (nvhost_module_add_client(pdev, priv))
327                 goto fail_add_client;
328
329         /* Keep devices with keepalive flag powered */
330         if (pdata->keepalive)
331                 nvhost_module_disable_poweroff(pdev);
332
333         /* Check that the device can be powered */
334         ret = nvhost_module_busy(pdev);
335         if (ret)
336                 goto fail_power_on;
337         nvhost_module_idle(pdev);
338
339         if (nvhost_dev_is_virtual(pdev)) {
340                 /* If virtual, allocate a client id on the server side. This is
341                  * needed for channel recovery, to distinguish which clients
342                  * own which gathers.
343                  */
344
345                 int virt_moduleid = vhost_virt_moduleid(pdata->moduleid);
346                 struct nvhost_virt_ctx *virt_ctx =
347                                         nvhost_get_virt_data(pdev);
348
349                 if (virt_moduleid < 0) {
350                         ret = -EINVAL;
351                         goto fail_virt_clientid;
352                 }
353
354                 priv->clientid =
355                         vhost_channel_alloc_clientid(virt_ctx->handle,
356                                                         virt_moduleid);
357                 if (priv->clientid == 0) {
358                         dev_err(&pdev->dev,
359                                 "vhost_channel_alloc_clientid failed\n");
360                         ret = -ENOMEM;
361                         goto fail_virt_clientid;
362                 }
363         } else {
364                 /* Get client id */
365                 priv->clientid = atomic_add_return(1, &host->clientid);
366                 if (!priv->clientid)
367                         priv->clientid = atomic_add_return(1, &host->clientid);
368         }
369
370         /* Initialize private structure */
371         priv->timeout = host1x_pdata->nvhost_timeout_default;
372         priv->priority = NVHOST_PRIORITY_MEDIUM;
373         priv->timeout_debug_dump = true;
374         mutex_init(&priv->ioctl_lock);
375         priv->pdev = pdev;
376
377         if (!tegra_platform_is_silicon())
378                 priv->timeout = 0;
379
380         /* if we run in map-at-submit mode but device has override
381          * flag set, respect the override flag */
382         if (pdata->resource_policy == RESOURCE_PER_DEVICE) {
383                 if (pdata->exclusive)
384                         ret = nvhost_channel_map(pdata, &priv->ch, priv);
385                 else
386                         ret = nvhost_channel_map(pdata, &priv->ch, pdata);
387                 if (ret) {
388                         pr_err("%s: failed to map channel, error: %d\n",
389                                __func__, ret);
390                         goto fail_get_channel;
391                 }
392         }
393
394         INIT_LIST_HEAD(&priv->node);
395         mutex_lock(&pdata->userctx_list_lock);
396         list_add_tail(&priv->node, &pdata->userctx_list);
397         mutex_unlock(&pdata->userctx_list_lock);
398
399         return 0;
400
401 fail_get_channel:
402 fail_virt_clientid:
403 fail_power_on:
404         if (pdata->keepalive)
405                 nvhost_module_enable_poweroff(pdev);
406         nvhost_module_remove_client(pdev, priv);
407 fail_add_client:
408         kfree(priv);
409 fail_allocate_priv:
410         if  (pdata->exclusive)
411                 pdata->num_mapped_chs--;
412 fail_mark_used:
413         return -ENOMEM;
414 }
415
416 static int nvhost_channelopen(struct inode *inode, struct file *filp)
417 {
418         return __nvhost_channelopen(inode, NULL, filp);
419 }
420
421 static int nvhost_init_error_notifier(struct nvhost_channel_userctx *ctx,
422                                       struct nvhost_set_error_notifier *args)
423 {
424         struct dma_buf *dmabuf;
425         void *va;
426         u64 end = args->offset + sizeof(struct nvhost_notification);
427
428         /* are we releasing old reference? */
429         if (!args->mem) {
430                 if (ctx->error_notifier_ref)
431                         dma_buf_put(ctx->error_notifier_ref);
432                 ctx->error_notifier_ref = NULL;
433                 return 0;
434         }
435
436         /* take reference for the userctx */
437         dmabuf = dma_buf_get(args->mem);
438         if (IS_ERR(dmabuf)) {
439                 pr_err("%s: Invalid handle: %d\n", __func__, args->mem);
440                 return -EINVAL;
441         }
442
443         if (end > dmabuf->size || end < sizeof(struct nvhost_notification)) {
444                 dma_buf_put(dmabuf);
445                 pr_err("%s: invalid offset\n", __func__);
446                 return -EINVAL;
447         }
448
449         /* map handle and clear error notifier struct */
450         va = dma_buf_vmap(dmabuf);
451         if (!va) {
452                 dma_buf_put(dmabuf);
453                 pr_err("%s: Cannot map notifier handle\n", __func__);
454                 return -ENOMEM;
455         }
456
457         memset(va + args->offset, 0, sizeof(struct nvhost_notification));
458         dma_buf_vunmap(dmabuf, va);
459
460         /* release old reference */
461         if (ctx->error_notifier_ref)
462                 dma_buf_put(ctx->error_notifier_ref);
463
464         /* finally, store error notifier data */
465         ctx->error_notifier_ref = dmabuf;
466         ctx->error_notifier_offset = args->offset;
467
468         return 0;
469 }
470
471 static inline u32 get_job_fence(struct nvhost_job *job, u32 id)
472 {
473         struct nvhost_channel *ch = job->ch;
474         struct nvhost_device_data *pdata = platform_get_drvdata(ch->dev);
475         u32 fence = job->sp[id].fence;
476
477         /* take into account work done increment */
478         if (pdata->push_work_done && id == 0)
479                 return fence - 1;
480
481         /* otherwise the fence is valid "as is" */
482         return fence;
483 }
484
485 static int nvhost_ioctl_channel_submit(struct nvhost_channel_userctx *ctx,
486                 struct nvhost_submit_args *args)
487 {
488         struct nvhost_job *job;
489         int num_cmdbufs = args->num_cmdbufs;
490         int num_relocs = args->num_relocs;
491         int num_waitchks = args->num_waitchks;
492         int num_syncpt_incrs = args->num_syncpt_incrs;
493         struct nvhost_cmdbuf __user *cmdbufs =
494                 (struct nvhost_cmdbuf __user *)(uintptr_t)args->cmdbufs;
495         struct nvhost_cmdbuf __user *cmdbuf_exts =
496                 (struct nvhost_cmdbuf __user *)(uintptr_t)args->cmdbuf_exts;
497         struct nvhost_reloc __user *relocs =
498                 (struct nvhost_reloc __user *)(uintptr_t)args->relocs;
499         struct nvhost_reloc_shift __user *reloc_shifts =
500                 (struct nvhost_reloc_shift __user *)
501                                 (uintptr_t)args->reloc_shifts;
502         struct nvhost_waitchk __user *waitchks =
503                 (struct nvhost_waitchk __user *)(uintptr_t)args->waitchks;
504         struct nvhost_syncpt_incr __user *syncpt_incrs =
505                 (struct nvhost_syncpt_incr __user *)
506                                 (uintptr_t)args->syncpt_incrs;
507         u32 __user *fences = (u32 __user *)(uintptr_t)args->fences;
508         u32 __user *class_ids = (u32 __user *)(uintptr_t)args->class_ids;
509         struct nvhost_device_data *pdata = platform_get_drvdata(ctx->pdev);
510
511         const u32 *syncpt_array =
512                 (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) ?
513                 ctx->syncpts :
514                 ctx->ch->syncpts;
515         u32 *local_class_ids = NULL;
516         int err, i;
517
518         job = nvhost_job_alloc(ctx->ch,
519                         num_cmdbufs,
520                         num_relocs,
521                         num_waitchks,
522                         num_syncpt_incrs);
523         if (!job)
524                 return -ENOMEM;
525
526         job->num_relocs = args->num_relocs;
527         job->num_waitchk = args->num_waitchks;
528         job->num_syncpts = args->num_syncpt_incrs;
529         job->priority = ctx->priority;
530         job->clientid = ctx->clientid;
531         job->client_managed_syncpt =
532                 (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) ?
533                 ctx->client_managed_syncpt : ctx->ch->client_managed_syncpt;
534
535         /* copy error notifier settings for this job */
536         if (ctx->error_notifier_ref) {
537                 get_dma_buf(ctx->error_notifier_ref);
538                 job->error_notifier_ref = ctx->error_notifier_ref;
539                 job->error_notifier_offset = ctx->error_notifier_offset;
540         }
541
542         /* mass copy class_ids */
543         if (args->class_ids) {
544                 local_class_ids = kzalloc(sizeof(u32) * num_cmdbufs,
545                         GFP_KERNEL);
546                 if (!local_class_ids) {
547                         err = -ENOMEM;
548                         goto fail;
549                 }
550                 err = copy_from_user(local_class_ids, class_ids,
551                         sizeof(u32) * num_cmdbufs);
552                 if (err) {
553                         err = -EINVAL;
554                         goto fail;
555                 }
556         }
557
558         for (i = 0; i < num_cmdbufs; ++i) {
559                 struct nvhost_cmdbuf cmdbuf;
560                 struct nvhost_cmdbuf_ext cmdbuf_ext;
561                 u32 class_id = class_ids ? local_class_ids[i] : 0;
562
563                 err = copy_from_user(&cmdbuf, cmdbufs + i, sizeof(cmdbuf));
564                 if (err)
565                         goto fail;
566
567                 cmdbuf_ext.pre_fence = -1;
568                 if (cmdbuf_exts)
569                         err = copy_from_user(&cmdbuf_ext,
570                                         cmdbuf_exts + i, sizeof(cmdbuf_ext));
571                 if (err)
572                         cmdbuf_ext.pre_fence = -1;
573
574                 /* verify that the given class id is valid for this engine */
575                 if (class_id &&
576                     class_id != pdata->class &&
577                     class_id != NV_HOST1X_CLASS_ID) {
578                         err = -EINVAL;
579                         goto fail;
580                 }
581
582                 nvhost_job_add_gather(job, cmdbuf.mem, cmdbuf.words,
583                                       cmdbuf.offset, class_id,
584                                       cmdbuf_ext.pre_fence);
585         }
586
587         kfree(local_class_ids);
588         local_class_ids = NULL;
589
590         err = copy_from_user(job->relocarray,
591                         relocs, sizeof(*relocs) * num_relocs);
592         if (err)
593                 goto fail;
594
595         err = copy_from_user(job->relocshiftarray,
596                         reloc_shifts, sizeof(*reloc_shifts) * num_relocs);
597         if (err)
598                 goto fail;
599
600         err = copy_from_user(job->waitchk,
601                         waitchks, sizeof(*waitchks) * num_waitchks);
602         if (err)
603                 goto fail;
604
605         /*
606          * Go through each syncpoint from userspace. Here we:
607          * - Copy syncpoint information
608          * - Validate each syncpoint
609          * - Determine the index of hwctx syncpoint in the table
610          */
611
612         for (i = 0; i < num_syncpt_incrs; ++i) {
613                 struct nvhost_syncpt_incr sp;
614                 bool found = false;
615                 int j;
616
617                 /* Copy */
618                 err = copy_from_user(&sp, syncpt_incrs + i, sizeof(sp));
619                 if (err)
620                         goto fail;
621
622                 /* Validate the trivial case */
623                 if (sp.syncpt_id == 0) {
624                         err = -EINVAL;
625                         goto fail;
626                 }
627
628                 /* ..and then ensure that the syncpoints have been reserved
629                  * for this client */
630                 for (j = 0; j < NVHOST_MODULE_MAX_SYNCPTS; j++) {
631                         if (syncpt_array[j] == sp.syncpt_id) {
632                                 found = true;
633                                 break;
634                         }
635                 }
636
637                 if (!found) {
638                         err = -EINVAL;
639                         goto fail;
640                 }
641
642                 /* Store and get a reference */
643                 job->sp[i].id = sp.syncpt_id;
644                 job->sp[i].incrs = sp.syncpt_incrs;
645         }
646
647         trace_nvhost_channel_submit(ctx->pdev->name,
648                 job->num_gathers, job->num_relocs, job->num_waitchk,
649                 job->sp[0].id,
650                 job->sp[0].incrs);
651
652         err = nvhost_module_busy(ctx->pdev);
653         if (err)
654                 goto fail;
655
656         err = nvhost_job_pin(job, &nvhost_get_host(ctx->pdev)->syncpt);
657         nvhost_module_idle(ctx->pdev);
658         if (err)
659                 goto fail;
660
661         if (args->timeout)
662                 job->timeout = min(ctx->timeout, args->timeout);
663         else
664                 job->timeout = ctx->timeout;
665         job->timeout_debug_dump = ctx->timeout_debug_dump;
666
667         err = nvhost_channel_submit(job);
668         if (err)
669                 goto fail_submit;
670
671         /* Deliver multiple fences back to the userspace */
672         if (fences)
673                 for (i = 0; i < num_syncpt_incrs; ++i) {
674                         u32 fence = get_job_fence(job, i);
675                         err = copy_to_user(fences, &fence, sizeof(u32));
676                         if (err)
677                                 break;
678                         fences++;
679                 }
680
681         /* Deliver the fence using the old mechanism _only_ if a single
682          * syncpoint is used. */
683
684         if (args->flags & BIT(NVHOST_SUBMIT_FLAG_SYNC_FENCE_FD)) {
685                 struct nvhost_ctrl_sync_fence_info pts[num_syncpt_incrs];
686
687                 for (i = 0; i < num_syncpt_incrs; i++) {
688                         pts[i].id = job->sp[i].id;
689                         pts[i].thresh = get_job_fence(job, i);
690                 }
691
692                 err = nvhost_sync_create_fence_fd(ctx->pdev,
693                                 pts, num_syncpt_incrs, "fence", &args->fence);
694                 if (err)
695                         goto fail;
696         } else if (num_syncpt_incrs == 1)
697                 args->fence =  get_job_fence(job, 0);
698         else
699                 args->fence = 0;
700
701         nvhost_job_put(job);
702
703         return 0;
704
705 fail_submit:
706         nvhost_job_unpin(job);
707 fail:
708         nvhost_job_put(job);
709         kfree(local_class_ids);
710
711         nvhost_err(&pdata->pdev->dev, "failed with err %d\n", err);
712
713         return err;
714 }
715
716 static int moduleid_to_index(struct platform_device *dev, u32 moduleid)
717 {
718         int i;
719         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
720
721         for (i = 0; i < NVHOST_MODULE_MAX_CLOCKS; i++) {
722                 if (pdata->clocks[i].moduleid == moduleid)
723                         return i;
724         }
725
726         /* Old user space is sending a random number in args. Return clock
727          * zero in these cases. */
728         return 0;
729 }
730
731 static int nvhost_ioctl_channel_set_rate(struct nvhost_channel_userctx *ctx,
732         struct nvhost_clk_rate_args *arg)
733 {
734         u32 moduleid = (arg->moduleid >> NVHOST_MODULE_ID_BIT_POS)
735                         & ((1 << NVHOST_MODULE_ID_BIT_WIDTH) - 1);
736         u32 attr = (arg->moduleid >> NVHOST_CLOCK_ATTR_BIT_POS)
737                         & ((1 << NVHOST_CLOCK_ATTR_BIT_WIDTH) - 1);
738         int index = moduleid ?
739                         moduleid_to_index(ctx->pdev, moduleid) : 0;
740         int err;
741
742         err = nvhost_module_set_rate(ctx->pdev, ctx, arg->rate, index, attr);
743         if (!tegra_platform_is_silicon() && err) {
744                 nvhost_dbg(dbg_clk, "ignoring error: module=%u, attr=%u, index=%d, err=%d",
745                            moduleid, attr, index, err);
746                 err = 0;
747         }
748
749         return err;
750 }
751
752 static int nvhost_ioctl_channel_get_rate(struct nvhost_channel_userctx *ctx,
753         u32 moduleid, u32 *rate)
754 {
755         int index = moduleid ? moduleid_to_index(ctx->pdev, moduleid) : 0;
756         int err;
757
758         err = nvhost_module_get_rate(ctx->pdev, (unsigned long *)rate, index);
759         if (!tegra_platform_is_silicon() && err) {
760                 nvhost_dbg(dbg_clk, "ignoring error: module=%u, rate=%u, error=%d",
761                            moduleid, *rate, err);
762                 err = 0;
763                 /* fake the return value */
764                 *rate = 32 * 1024;
765         }
766
767         return err;
768 }
769
770 static int nvhost_ioctl_channel_module_regrdwr(
771         struct nvhost_channel_userctx *ctx,
772         struct nvhost_ctrl_module_regrdwr_args *args)
773 {
774         u32 num_offsets = args->num_offsets;
775         u32 __user *offsets = (u32 __user *)(uintptr_t)args->offsets;
776         u32 __user *values = (u32 __user *)(uintptr_t)args->values;
777         u32 vals[64];
778         struct platform_device *ndev;
779
780         trace_nvhost_ioctl_channel_module_regrdwr(args->id,
781                 args->num_offsets, args->write);
782
783         /* Check that there is something to read and that block size is
784          * u32 aligned */
785         if (num_offsets == 0 || args->block_size & 3)
786                 return -EINVAL;
787
788         ndev = ctx->pdev;
789
790         if (nvhost_dev_is_virtual(ndev))
791                 return vhost_rdwr_module_regs(ndev, num_offsets,
792                                 args->block_size, offsets, values, args->write);
793
794         while (num_offsets--) {
795                 int err;
796                 u32 offs;
797                 int remaining = args->block_size >> 2;
798
799                 if (get_user(offs, offsets))
800                         return -EFAULT;
801
802                 offsets++;
803                 while (remaining) {
804                         int batch = min(remaining, 64);
805                         if (args->write) {
806                                 if (copy_from_user(vals, values,
807                                                 batch * sizeof(u32)))
808                                         return -EFAULT;
809
810                                 err = nvhost_write_module_regs(ndev,
811                                         offs, batch, vals);
812                                 if (err)
813                                         return err;
814                         } else {
815                                 err = nvhost_read_module_regs(ndev,
816                                                 offs, batch, vals);
817                                 if (err)
818                                         return err;
819
820                                 if (copy_to_user(values, vals,
821                                                 batch * sizeof(u32)))
822                                         return -EFAULT;
823                         }
824
825                         remaining -= batch;
826                         offs += batch * sizeof(u32);
827                         values += batch;
828                 }
829         }
830
831         return 0;
832 }
833
834 static u32 create_mask(u32 *words, int num)
835 {
836         int i;
837         u32 word = 0;
838         for (i = 0; i < num; i++) {
839                 if (!words[i] || words[i] > 31)
840                         continue;
841                 word |= BIT(words[i]);
842         }
843
844         return word;
845 }
846
847 static u32 nvhost_ioctl_channel_get_syncpt_mask(
848                 struct nvhost_channel_userctx *priv)
849 {
850         struct nvhost_device_data *pdata = platform_get_drvdata(priv->pdev);
851         u32 mask;
852
853         if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE)
854                 mask = create_mask(priv->syncpts, NVHOST_MODULE_MAX_SYNCPTS);
855         else
856                 mask = create_mask(priv->ch->syncpts,
857                                                 NVHOST_MODULE_MAX_SYNCPTS);
858
859         return mask;
860 }
861
862 static u32 nvhost_ioctl_channel_get_syncpt_channel(struct nvhost_channel *ch,
863                 struct nvhost_device_data *pdata, u32 index)
864 {
865         u32 id;
866
867         mutex_lock(&ch->syncpts_lock);
868
869         /* if we already have required syncpt then return it ... */
870         id = ch->syncpts[index];
871         if (id)
872                 goto exit_unlock;
873
874         /* ... otherwise get a new syncpt dynamically */
875         id = nvhost_get_syncpt_host_managed(pdata->pdev, index, NULL);
876         if (!id)
877                 goto exit_unlock;
878
879         /* ... and store it for further references */
880         ch->syncpts[index] = id;
881
882 exit_unlock:
883         mutex_unlock(&ch->syncpts_lock);
884         return id;
885 }
886
887 static u32 nvhost_ioctl_channel_get_syncpt_instance(
888                 struct nvhost_channel_userctx *ctx,
889                 struct nvhost_device_data *pdata, u32 index)
890 {
891         u32 id;
892
893         /* if we already have required syncpt then return it ... */
894         if (ctx->syncpts[index]) {
895                 id = ctx->syncpts[index];
896                 return id;
897         }
898
899         /* ... otherwise get a new syncpt dynamically */
900         id = nvhost_get_syncpt_host_managed(pdata->pdev, index, NULL);
901         if (!id)
902                 return 0;
903
904         /* ... and store it for further references */
905         ctx->syncpts[index] = id;
906
907         return id;
908 }
909
910 static int nvhost_ioctl_channel_get_client_syncpt(
911                 struct nvhost_channel_userctx *ctx,
912                 struct nvhost_get_client_managed_syncpt_arg *args)
913 {
914         struct nvhost_device_data *pdata = platform_get_drvdata(ctx->pdev);
915         const char __user *args_name =
916                 (const char __user *)(uintptr_t)args->name;
917         char name[32];
918         char set_name[32];
919
920         /* prepare syncpoint name (in case it is needed) */
921         if (args_name) {
922                 if (strncpy_from_user(name, args_name, sizeof(name)) < 0)
923                         return -EFAULT;
924                 name[sizeof(name) - 1] = '\0';
925         } else {
926                 name[0] = '\0';
927         }
928
929         snprintf(set_name, sizeof(set_name),
930                 "%s_%s", dev_name(&ctx->pdev->dev), name);
931
932         if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) {
933                 if (!ctx->client_managed_syncpt)
934                         ctx->client_managed_syncpt =
935                                 nvhost_get_syncpt_client_managed(pdata->pdev,
936                                                                 set_name);
937                 args->value = ctx->client_managed_syncpt;
938         } else {
939                 struct nvhost_channel *ch = ctx->ch;
940                 mutex_lock(&ch->syncpts_lock);
941                 if (!ch->client_managed_syncpt)
942                         ch->client_managed_syncpt =
943                                 nvhost_get_syncpt_client_managed(pdata->pdev,
944                                                                 set_name);
945                 mutex_unlock(&ch->syncpts_lock);
946                 args->value = ch->client_managed_syncpt;
947         }
948
949         if (!args->value)
950                 return -EAGAIN;
951
952         return 0;
953 }
954
955 static long nvhost_channelctl(struct file *filp,
956         unsigned int cmd, unsigned long arg)
957 {
958         struct nvhost_channel_userctx *priv = filp->private_data;
959         struct nvhost_master *host;
960         struct device *dev;
961         u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
962         int err = 0;
963
964         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
965                 (_IOC_NR(cmd) == 0) ||
966                 (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST) ||
967                 (_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE))
968                 return -EFAULT;
969
970         if (_IOC_DIR(cmd) & _IOC_WRITE) {
971                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
972                         return -EFAULT;
973         }
974
975         /* serialize calls from this fd */
976         mutex_lock(&priv->ioctl_lock);
977         if (!priv->pdev) {
978                 pr_warn("Channel already unmapped\n");
979                 mutex_unlock(&priv->ioctl_lock);
980                 return -EFAULT;
981         }
982
983         host = nvhost_get_host(priv->pdev);
984         dev = &priv->pdev->dev;
985         switch (cmd) {
986         case NVHOST_IOCTL_CHANNEL_OPEN:
987         {
988                 int fd;
989                 struct file *file;
990                 char *name;
991
992                 err = get_unused_fd_flags(O_RDWR);
993                 if (err < 0)
994                         break;
995                 fd = err;
996
997                 name = kasprintf(GFP_KERNEL, "nvhost-%s-fd%d",
998                                 dev_name(dev), fd);
999                 if (!name) {
1000                         err = -ENOMEM;
1001                         put_unused_fd(fd);
1002                         break;
1003                 }
1004
1005                 file = anon_inode_getfile(name, filp->f_op, NULL, O_RDWR);
1006                 kfree(name);
1007                 if (IS_ERR(file)) {
1008                         err = PTR_ERR(file);
1009                         put_unused_fd(fd);
1010                         break;
1011                 }
1012                 fd_install(fd, file);
1013
1014                 err = __nvhost_channelopen(NULL, priv->pdev, file);
1015                 if (err) {
1016                         put_unused_fd(fd);
1017                         fput(file);
1018                         break;
1019                 }
1020
1021                 ((struct nvhost_channel_open_args *)buf)->channel_fd = fd;
1022                 break;
1023         }
1024         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
1025         {
1026                 ((struct nvhost_get_param_args *)buf)->value =
1027                         nvhost_ioctl_channel_get_syncpt_mask(priv);
1028                 break;
1029         }
1030         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINT:
1031         {
1032                 struct nvhost_device_data *pdata =
1033                         platform_get_drvdata(priv->pdev);
1034                 struct nvhost_get_param_arg *arg =
1035                         (struct nvhost_get_param_arg *)buf;
1036
1037                 if (arg->param >= NVHOST_MODULE_MAX_SYNCPTS) {
1038                         err = -EINVAL;
1039                         break;
1040                 }
1041
1042                 if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE)
1043                         arg->value = nvhost_ioctl_channel_get_syncpt_instance(
1044                                                 priv, pdata, arg->param);
1045                 else
1046                         arg->value = nvhost_ioctl_channel_get_syncpt_channel(
1047                                                 priv->ch, pdata, arg->param);
1048                 if (!arg->value) {
1049                         err = -EAGAIN;
1050                         break;
1051                 }
1052                 break;
1053         }
1054         case NVHOST_IOCTL_CHANNEL_GET_CLIENT_MANAGED_SYNCPOINT:
1055         {
1056                 err = nvhost_ioctl_channel_get_client_syncpt(priv,
1057                         (struct nvhost_get_client_managed_syncpt_arg *)buf);
1058                 break;
1059         }
1060         case NVHOST_IOCTL_CHANNEL_FREE_CLIENT_MANAGED_SYNCPOINT:
1061                 break;
1062         case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
1063         {
1064                 ((struct nvhost_get_param_args *)buf)->value = 0;
1065                 break;
1066         }
1067         case NVHOST_IOCTL_CHANNEL_GET_WAITBASE:
1068         {
1069                 err = -EINVAL;
1070                 break;
1071         }
1072         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
1073         {
1074                 struct nvhost_device_data *pdata = \
1075                         platform_get_drvdata(priv->pdev);
1076                 ((struct nvhost_get_param_args *)buf)->value =
1077                         create_mask(pdata->modulemutexes,
1078                                         NVHOST_MODULE_MAX_MODMUTEXES);
1079                 break;
1080         }
1081         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEX:
1082         {
1083                 struct nvhost_device_data *pdata = \
1084                         platform_get_drvdata(priv->pdev);
1085                 struct nvhost_get_param_arg *arg =
1086                         (struct nvhost_get_param_arg *)buf;
1087
1088                 if (arg->param >= NVHOST_MODULE_MAX_MODMUTEXES ||
1089                     !pdata->modulemutexes[arg->param]) {
1090                         err = -EINVAL;
1091                         break;
1092                 }
1093
1094                 arg->value = pdata->modulemutexes[arg->param];
1095                 break;
1096         }
1097         case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
1098                 break;
1099         case NVHOST_IOCTL_CHANNEL_GET_CLK_RATE:
1100         {
1101                 struct nvhost_clk_rate_args *arg =
1102                                 (struct nvhost_clk_rate_args *)buf;
1103
1104                 err = nvhost_ioctl_channel_get_rate(priv,
1105                                 arg->moduleid, &arg->rate);
1106                 break;
1107         }
1108         case NVHOST_IOCTL_CHANNEL_SET_CLK_RATE:
1109         {
1110                 struct nvhost_clk_rate_args *arg =
1111                                 (struct nvhost_clk_rate_args *)buf;
1112
1113                 /* if virtualized, client requests to change clock rate
1114                  * are ignored
1115                  */
1116                 if (nvhost_dev_is_virtual(priv->pdev))
1117                         break;
1118
1119                 err = nvhost_ioctl_channel_set_rate(priv, arg);
1120                 break;
1121         }
1122         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT:
1123         {
1124                 u32 timeout =
1125                         (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
1126
1127                 priv->timeout = timeout;
1128                 dev_dbg(&priv->pdev->dev,
1129                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
1130                         __func__, priv->timeout, priv);
1131                 break;
1132         }
1133         case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT:
1134                 ((struct nvhost_get_param_args *)buf)->value = false;
1135                 break;
1136         case NVHOST_IOCTL_CHANNEL_SET_PRIORITY:
1137                 priv->priority =
1138                         (u32)((struct nvhost_set_priority_args *)buf)->priority;
1139                 break;
1140         case NVHOST32_IOCTL_CHANNEL_MODULE_REGRDWR:
1141         {
1142                 struct nvhost32_ctrl_module_regrdwr_args *args32 =
1143                         (struct nvhost32_ctrl_module_regrdwr_args *)buf;
1144                 struct nvhost_ctrl_module_regrdwr_args args;
1145                 args.id = args32->id;
1146                 args.num_offsets = args32->num_offsets;
1147                 args.block_size = args32->block_size;
1148                 args.offsets = args32->offsets;
1149                 args.values = args32->values;
1150                 args.write = args32->write;
1151                 err = nvhost_ioctl_channel_module_regrdwr(priv, &args);
1152                 break;
1153         }
1154         case NVHOST_IOCTL_CHANNEL_MODULE_REGRDWR:
1155                 err = nvhost_ioctl_channel_module_regrdwr(priv, (void *)buf);
1156                 break;
1157         case NVHOST32_IOCTL_CHANNEL_SUBMIT:
1158         {
1159                 struct nvhost_device_data *pdata =
1160                         platform_get_drvdata(priv->pdev);
1161                 struct nvhost32_submit_args *args32 = (void *)buf;
1162                 struct nvhost_submit_args args;
1163                 void *identifier;
1164
1165                 if (pdata->resource_policy == RESOURCE_PER_DEVICE &&
1166                     !pdata->exclusive)
1167                         identifier = (void *)pdata;
1168                 else
1169                         identifier = (void *)priv;
1170
1171                 memset(&args, 0, sizeof(args));
1172                 args.submit_version = args32->submit_version;
1173                 args.num_syncpt_incrs = args32->num_syncpt_incrs;
1174                 args.num_cmdbufs = args32->num_cmdbufs;
1175                 args.num_relocs = args32->num_relocs;
1176                 args.num_waitchks = args32->num_waitchks;
1177                 args.timeout = args32->timeout;
1178                 args.syncpt_incrs = args32->syncpt_incrs;
1179                 args.fence = args32->fence;
1180
1181                 args.cmdbufs = args32->cmdbufs;
1182                 args.relocs = args32->relocs;
1183                 args.reloc_shifts = args32->reloc_shifts;
1184                 args.waitchks = args32->waitchks;
1185                 args.class_ids = args32->class_ids;
1186                 args.fences = args32->fences;
1187
1188                 /* first, get a channel */
1189                 err = nvhost_channel_map(pdata, &priv->ch, identifier);
1190                 if (err)
1191                         break;
1192
1193                 /* ..then, synchronize syncpoint information.
1194                  *
1195                  * This information is updated only in this ioctl and
1196                  * channel destruction. We already hold channel
1197                  * reference and this ioctl is serialized => no-one is
1198                  * modifying the syncpoint field concurrently.
1199                  *
1200                  * Synchronization is not destructing anything
1201                  * in the structure; We can only allocate new
1202                  * syncpoints, and hence old ones cannot be released
1203                  * by following operation. If some syncpoint is stored
1204                  * into the channel structure, it remains there. */
1205
1206                 if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) {
1207                         memcpy(priv->ch->syncpts, priv->syncpts,
1208                                sizeof(priv->syncpts));
1209                         priv->ch->client_managed_syncpt =
1210                                 priv->client_managed_syncpt;
1211                 }
1212
1213                 /* submit work */
1214                 err = nvhost_ioctl_channel_submit(priv, &args);
1215
1216                 /* ..and drop the local reference */
1217                 nvhost_putchannel(priv->ch, 1);
1218
1219                 args32->fence = args.fence;
1220
1221                 break;
1222         }
1223         case NVHOST_IOCTL_CHANNEL_SUBMIT:
1224         {
1225                 struct nvhost_device_data *pdata =
1226                         platform_get_drvdata(priv->pdev);
1227                 void *identifier;
1228
1229                 if (pdata->resource_policy == RESOURCE_PER_DEVICE &&
1230                     !pdata->exclusive)
1231                         identifier = (void *)pdata;
1232                 else
1233                         identifier = (void *)priv;
1234
1235                 /* first, get a channel */
1236                 err = nvhost_channel_map(pdata, &priv->ch, identifier);
1237                 if (err)
1238                         break;
1239
1240                 /* ..then, synchronize syncpoint information.
1241                  *
1242                  * This information is updated only in this ioctl and
1243                  * channel destruction. We already hold channel
1244                  * reference and this ioctl is serialized => no-one is
1245                  * modifying the syncpoint field concurrently.
1246                  *
1247                  * Synchronization is not destructing anything
1248                  * in the structure; We can only allocate new
1249                  * syncpoints, and hence old ones cannot be released
1250                  * by following operation. If some syncpoint is stored
1251                  * into the channel structure, it remains there. */
1252
1253                 if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) {
1254                         memcpy(priv->ch->syncpts, priv->syncpts,
1255                                sizeof(priv->syncpts));
1256                         priv->ch->client_managed_syncpt =
1257                                 priv->client_managed_syncpt;
1258                 }
1259
1260                 /* submit work */
1261                 err = nvhost_ioctl_channel_submit(priv, (void *)buf);
1262
1263                 /* ..and drop the local reference */
1264                 nvhost_putchannel(priv->ch, 1);
1265
1266                 break;
1267         }
1268         case NVHOST_IOCTL_CHANNEL_SET_ERROR_NOTIFIER:
1269                 err = nvhost_init_error_notifier(priv,
1270                         (struct nvhost_set_error_notifier *)buf);
1271                 break;
1272         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT_EX:
1273         {
1274                 u32 timeout =
1275                         (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
1276                 bool timeout_debug_dump = !((u32)
1277                         ((struct nvhost_set_timeout_ex_args *)buf)->flags &
1278                         (1 << NVHOST_TIMEOUT_FLAG_DISABLE_DUMP));
1279                 priv->timeout = timeout;
1280                 priv->timeout_debug_dump = timeout_debug_dump;
1281                 dev_dbg(&priv->pdev->dev,
1282                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
1283                         __func__, priv->timeout, priv);
1284                 break;
1285         }
1286         default:
1287                 nvhost_dbg_info("unrecognized ioctl cmd: 0x%x", cmd);
1288                 err = -ENOTTY;
1289                 break;
1290         }
1291
1292         mutex_unlock(&priv->ioctl_lock);
1293
1294         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
1295                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
1296
1297         return err;
1298 }
1299
1300 static const struct file_operations nvhost_channelops = {
1301         .owner = THIS_MODULE,
1302         .release = nvhost_channelrelease,
1303         .open = nvhost_channelopen,
1304 #ifdef CONFIG_COMPAT
1305         .compat_ioctl = nvhost_channelctl,
1306 #endif
1307         .unlocked_ioctl = nvhost_channelctl
1308 };
1309
1310 static const char *get_device_name_for_dev(struct platform_device *dev)
1311 {
1312         struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
1313
1314         if (pdata->devfs_name)
1315                 return pdata->devfs_name;
1316
1317         return dev->name;
1318 }
1319
1320 static struct device *nvhost_client_device_create(
1321         struct platform_device *pdev, struct cdev *cdev,
1322         const char *cdev_name, dev_t devno,
1323         const struct file_operations *ops)
1324 {
1325         struct nvhost_master *host = nvhost_get_host(pdev);
1326         const char *use_dev_name;
1327         struct device *dev;
1328         int err;
1329
1330         nvhost_dbg_fn("");
1331
1332         BUG_ON(!host);
1333
1334         cdev_init(cdev, ops);
1335         cdev->owner = THIS_MODULE;
1336
1337         err = cdev_add(cdev, devno, 1);
1338         if (err < 0) {
1339                 dev_err(&pdev->dev,
1340                         "failed to add cdev\n");
1341                 return NULL;
1342         }
1343         use_dev_name = get_device_name_for_dev(pdev);
1344
1345         dev = device_create(host->nvhost_class,
1346                         NULL, devno, NULL,
1347                         (pdev->id <= 0) ?
1348                         IFACE_NAME "-%s%s" :
1349                         IFACE_NAME "-%s%s.%d",
1350                         cdev_name, use_dev_name, pdev->id);
1351
1352         if (IS_ERR(dev)) {
1353                 err = PTR_ERR(dev);
1354                 dev_err(&pdev->dev,
1355                         "failed to create %s %s device for %s\n",
1356                         use_dev_name, cdev_name, pdev->name);
1357                 return NULL;
1358         }
1359
1360         return dev;
1361 }
1362
1363 #define NVHOST_NUM_CDEV 4
1364 int nvhost_client_user_init(struct platform_device *dev)
1365 {
1366         dev_t devno;
1367         int err;
1368         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1369
1370         /* reserve 3 minor #s for <dev>, and ctrl-<dev> */
1371
1372         err = alloc_chrdev_region(&devno, 0, NVHOST_NUM_CDEV, IFACE_NAME);
1373         if (err < 0) {
1374                 dev_err(&dev->dev, "failed to allocate devno\n");
1375                 goto fail;
1376         }
1377         pdata->cdev_region = devno;
1378
1379         pdata->node = nvhost_client_device_create(dev, &pdata->cdev,
1380                                 "", devno, &nvhost_channelops);
1381         if (pdata->node == NULL)
1382                 goto fail;
1383
1384         /* module control (npn-channel based, global) interface */
1385         if (pdata->ctrl_ops) {
1386                 ++devno;
1387                 pdata->ctrl_node = nvhost_client_device_create(dev,
1388                                         &pdata->ctrl_cdev, "ctrl-",
1389                                         devno, pdata->ctrl_ops);
1390                 if (pdata->ctrl_node == NULL)
1391                         goto fail;
1392         }
1393
1394         return 0;
1395 fail:
1396         return err;
1397 }
1398
1399 static void nvhost_client_user_deinit(struct platform_device *dev)
1400 {
1401         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1402         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1403
1404         if (pdata->node) {
1405                 device_destroy(nvhost_master->nvhost_class, pdata->cdev.dev);
1406                 cdev_del(&pdata->cdev);
1407         }
1408
1409         if (pdata->as_node) {
1410                 device_destroy(nvhost_master->nvhost_class, pdata->as_cdev.dev);
1411                 cdev_del(&pdata->as_cdev);
1412         }
1413
1414         if (pdata->ctrl_node) {
1415                 device_destroy(nvhost_master->nvhost_class,
1416                                pdata->ctrl_cdev.dev);
1417                 cdev_del(&pdata->ctrl_cdev);
1418         }
1419
1420         unregister_chrdev_region(pdata->cdev_region, NVHOST_NUM_CDEV);
1421 }
1422
1423 int nvhost_client_device_init(struct platform_device *dev)
1424 {
1425         int err;
1426         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1427         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1428
1429         mutex_init(&pdata->userctx_list_lock);
1430         INIT_LIST_HEAD(&pdata->userctx_list);
1431
1432         /* Create debugfs directory for the device */
1433         nvhost_device_debug_init(dev);
1434
1435         err = nvhost_client_user_init(dev);
1436         if (err)
1437                 goto fail;
1438
1439         err = nvhost_device_list_add(dev);
1440         if (err)
1441                 goto fail;
1442
1443         if (pdata->scaling_init)
1444                 pdata->scaling_init(dev);
1445
1446         /* reset syncpoint values for this unit */
1447         err = nvhost_module_busy(nvhost_master->dev);
1448         if (err)
1449                 goto fail_busy;
1450
1451         nvhost_module_idle(nvhost_master->dev);
1452
1453         /* Initialize dma parameters */
1454         dev->dev.dma_parms = &pdata->dma_parms;
1455         dma_set_max_seg_size(&dev->dev, UINT_MAX);
1456
1457         dev_info(&dev->dev, "initialized\n");
1458
1459         if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) {
1460                 nvhost_master->info.channel_policy = MAP_CHANNEL_ON_SUBMIT;
1461                 nvhost_update_characteristics(dev);
1462         }
1463
1464         if (pdata->hw_init)
1465                 return pdata->hw_init(dev);
1466
1467         return 0;
1468
1469 fail_busy:
1470         /* Remove from nvhost device list */
1471         nvhost_device_list_remove(dev);
1472 fail:
1473         /* Add clean-up */
1474         dev_err(&dev->dev, "failed to init client device\n");
1475         nvhost_client_user_deinit(dev);
1476         nvhost_device_debug_deinit(dev);
1477         return err;
1478 }
1479 EXPORT_SYMBOL(nvhost_client_device_init);
1480
1481 int nvhost_client_device_release(struct platform_device *dev)
1482 {
1483         /* Release nvhost module resources */
1484         nvhost_module_deinit(dev);
1485
1486         /* Remove from nvhost device list */
1487         nvhost_device_list_remove(dev);
1488
1489         /* Release chardev and device node for user space */
1490         nvhost_client_user_deinit(dev);
1491
1492         /* Remove debugFS */
1493         nvhost_device_debug_deinit(dev);
1494
1495         return 0;
1496 }
1497 EXPORT_SYMBOL(nvhost_client_device_release);
1498
1499 int nvhost_device_get_resources(struct platform_device *dev)
1500 {
1501         int i;
1502         void __iomem *regs = NULL;
1503         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1504
1505         for (i = 0; i < dev->num_resources; i++) {
1506                 struct resource *r = NULL;
1507
1508                 r = platform_get_resource(dev, IORESOURCE_MEM, i);
1509                 /* We've run out of mem resources */
1510                 if (!r)
1511                         break;
1512
1513                 regs = devm_request_and_ioremap(&dev->dev, r);
1514                 if (!regs)
1515                         goto fail;
1516
1517                 pdata->aperture[i] = regs;
1518         }
1519
1520         return 0;
1521
1522 fail:
1523         dev_err(&dev->dev, "failed to get register memory\n");
1524
1525         return -ENXIO;
1526 }
1527
1528 int nvhost_client_device_get_resources(struct platform_device *dev)
1529 {
1530         return nvhost_device_get_resources(dev);
1531 }
1532 EXPORT_SYMBOL(nvhost_client_device_get_resources);
1533
1534 /* This is a simple wrapper around request_firmware that takes
1535  * 'fw_name' and if available applies a SOC relative path prefix to it.
1536  * The caller is responsible for calling release_firmware later.
1537  */
1538 const struct firmware *
1539 nvhost_client_request_firmware(struct platform_device *dev, const char *fw_name)
1540 {
1541         struct nvhost_chip_support *op = nvhost_get_chip_ops();
1542         const struct firmware *fw;
1543         char *fw_path = NULL;
1544         int path_len, err;
1545
1546         /* This field is NULL when calling from SYS_EXIT.
1547            Add a check here to prevent crash in request_firmware */
1548         if (!current->fs) {
1549                 BUG();
1550                 return NULL;
1551         }
1552
1553         if (!fw_name)
1554                 return NULL;
1555
1556         if (op->soc_name) {
1557                 path_len = strlen(fw_name) + strlen(op->soc_name);
1558                 path_len += 2; /* for the path separator and zero terminator*/
1559
1560                 fw_path = kzalloc(sizeof(*fw_path) * path_len,
1561                                      GFP_KERNEL);
1562                 if (!fw_path)
1563                         return NULL;
1564
1565                 sprintf(fw_path, "%s/%s", op->soc_name, fw_name);
1566                 fw_name = fw_path;
1567         }
1568
1569         err = request_firmware(&fw, fw_name, &dev->dev);
1570         kfree(fw_path);
1571         if (err) {
1572                 dev_err(&dev->dev, "failed to get firmware\n");
1573                 return NULL;
1574         }
1575
1576         /* note: caller must release_firmware */
1577         return fw;
1578 }
1579 EXPORT_SYMBOL(nvhost_client_request_firmware);
1580
1581 struct nvhost_channel *nvhost_find_chan_by_clientid(
1582                                 struct platform_device *pdev,
1583                                 int clientid)
1584 {
1585         struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
1586         struct nvhost_channel_userctx *ctx;
1587         struct nvhost_channel *ch = NULL;
1588
1589         mutex_lock(&pdata->userctx_list_lock);
1590         list_for_each_entry(ctx, &pdata->userctx_list, node) {
1591                 if (ctx->clientid == clientid) {
1592                         ch = ctx->ch;
1593                         break;
1594                 }
1595         }
1596         mutex_unlock(&pdata->userctx_list_lock);
1597
1598         return ch;
1599 }