]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/video/tegra/host/nvhost_channel.c
video: tegra: host: simplify channel map usage
[sojka/nv-tegra/linux-3.10.git] / drivers / video / tegra / host / nvhost_channel.c
1 /*
2  * drivers/video/tegra/host/nvhost_channel.c
3  *
4  * Tegra Graphics Host Channel
5  *
6  * Copyright (c) 2010-2014, NVIDIA Corporation.  All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include "nvhost_channel.h"
22 #include "dev.h"
23 #include "nvhost_acm.h"
24 #include "nvhost_job.h"
25 #include "nvhost_hwctx.h"
26 #include "chip_support.h"
27
28 #include <trace/events/nvhost.h>
29 #include <linux/nvhost_ioctl.h>
30 #include <linux/nvhost.h>
31 #include <linux/slab.h>
32
33 #define NVHOST_CHANNEL_LOW_PRIO_MAX_WAIT 50
34
35 /* Constructor for the host1x device list */
36 int nvhost_channel_list_init(struct nvhost_master *host)
37 {
38         if (host->info.nb_channels > BITS_PER_LONG) {
39                 WARN(1, "host1x hardware has more channels than supported\n");
40                 return -ENOSYS;
41         }
42
43         host->chlist = kzalloc(host->info.nb_channels *
44                                 sizeof(struct nvhost_channel *), GFP_KERNEL);
45         if (host->chlist == NULL)
46                         return -ENOMEM;
47
48         mutex_init(&host->chlist_mutex);
49
50         return 0;
51 }
52
53 /* Memory allocation for all supported channels */
54 int nvhost_alloc_channels(struct nvhost_master *host)
55 {
56         int max_channels = host->info.nb_channels;
57         int i, err = 0;
58         struct nvhost_channel *ch;
59
60         err = nvhost_channel_list_init(host);
61         if (err) {
62                 dev_err(&host->dev->dev, "failed to init channel list\n");
63                 return err;
64         }
65
66         mutex_lock(&host->chlist_mutex);
67         for (i = 0; i < max_channels; i++) {
68                 ch = nvhost_alloc_channel_internal(i, max_channels);
69                 if (!ch) {
70                         dev_err(&host->dev->dev, "failed to alloc channels\n");
71                         mutex_unlock(&host->chlist_mutex);
72                         return -ENOMEM;
73                 }
74                 host->chlist[i] = ch;
75                 ch->dev = NULL;
76                 ch->chid = NVHOST_INVALID_CHANNEL;
77         }
78         mutex_unlock(&host->chlist_mutex);
79
80         return 0;
81 }
82
83 /* return any one of assigned channel from device
84  * This API can be used to check if any channel assigned to device
85  */
86 struct nvhost_channel *nvhost_check_channel(struct nvhost_device_data *pdata)
87 {
88         int i;
89         struct nvhost_channel *ch;
90
91         for (i = 0; i < pdata->num_channels; i++) {
92                 ch = pdata->channels[i];
93                 if (ch && ch->chid != NVHOST_INVALID_CHANNEL)
94                         return ch;
95         }
96
97         return NULL;
98 }
99
100 /* Check if more than channel needed for device and assign */
101 int nvhost_channel_assign(struct nvhost_device_data *pdata,
102                           struct nvhost_channel *ch)
103 {
104         int i;
105
106         for (i = 0; i < pdata->num_channels; i++) {
107                 if (!pdata->channels[i]) {
108                         pdata->channels[i] = ch;
109                         pdata->num_mapped_chs++;
110                         ch->dev_chid = i;
111                         return 0;
112                 }
113         }
114         dev_err(&pdata->pdev->dev, "%s: All channels assigned\n", __func__);
115
116         return -EINVAL;
117 }
118
119 /* Releases all channels assigned with device */
120 int nvhost_channel_release(struct nvhost_device_data *pdata)
121 {
122         struct nvhost_channel *ch;
123         int i;
124
125         for (i = 0; i < pdata->num_channels; i++) {
126                 ch = pdata->channels[i];
127                 if (ch && ch->dev)
128                         nvhost_putchannel(ch);
129         }
130         return 0;
131 }
132 /* Unmap channel from device and free all resources, deinit device */
133 int nvhost_channel_unmap(struct nvhost_channel *ch)
134 {
135         struct nvhost_device_data *pdata;
136         struct nvhost_master *host;
137         int max_channels;
138         int i = 0;
139
140         if (!ch->dev) {
141                 pr_err("%s: freeing unmapped channel\n", __func__);
142                 return 0;
143         }
144
145         pdata = platform_get_drvdata(ch->dev);
146         host = nvhost_get_host(pdata->pdev);
147
148         mutex_lock(&host->chlist_mutex);
149         max_channels = host->info.nb_channels;
150
151         if (ch->chid == NVHOST_INVALID_CHANNEL) {
152                 dev_err(&host->dev->dev, "Freeing un-mapped channel\n");
153                 mutex_unlock(&host->chlist_mutex);
154                 return 0;
155         }
156         if (ch->error_notifier_ref)
157                 nvhost_free_error_notifiers(ch);
158
159         dev_dbg(&ch->dev->dev, "channel %d un-mapped\n", ch->chid);
160
161         pdata->num_mapped_chs--;
162
163         /* Allow keep-alive'd module to be turned off
164          * make sure that all channels are unmapped before calling
165          * nvhost_module_enable_poweroff
166          */
167         if (!pdata->num_mapped_chs) {
168                 channel_cdma_op().stop(&ch->cdma);
169                 nvhost_cdma_deinit(&ch->cdma);
170
171                 if (pdata->keepalive)
172                         nvhost_module_enable_poweroff(pdata->pdev);
173
174                 if (pdata->deinit)
175                         pdata->deinit(ch->dev);
176
177                 /*
178                  * when ALL of the channels are unmapped from device,
179                  * we can free all the host managed syncpts assigned
180                  * to that device
181                  */
182                 for (i = 0; i < NVHOST_MODULE_MAX_SYNCPTS; ++i) {
183                         if (pdata->syncpts[i]) {
184                                 nvhost_free_syncpt(pdata->syncpts[i]);
185                                 pdata->syncpts[i] = 0;
186                         }
187                 }
188         }
189
190         clear_bit(ch->chid, &host->allocated_channels);
191
192         ch->chid = NVHOST_INVALID_CHANNEL;
193         ch->dev = NULL;
194         kfree(ch->ctxhandler);
195         ch->ctxhandler = NULL;
196         ch->cur_ctx = NULL;
197         ch->aperture = NULL;
198         pdata->channels[ch->dev_chid] = NULL;
199
200         mutex_unlock(&host->chlist_mutex);
201
202         return 0;
203 }
204
205 /* Maps free channel with device */
206 int nvhost_channel_map(struct nvhost_device_data *pdata,
207                         struct nvhost_channel **channel)
208 {
209         struct nvhost_master *host = NULL;
210         struct nvhost_channel *ch = NULL;
211         int max_channels = 0;
212         int index = 0;
213         int err = 0;
214
215         if (!pdata) {
216                 pr_err("%s: NULL device data\n", __func__);
217                 return -EINVAL;
218         }
219
220         host = nvhost_get_host(pdata->pdev);
221
222         mutex_lock(&host->chlist_mutex);
223         max_channels = host->info.nb_channels;
224
225         /* Check if already channel(s) assigned for device */
226         if (pdata->num_channels == pdata->num_mapped_chs) {
227                 if (pdata->exclusive) {
228                         mutex_unlock(&host->chlist_mutex);
229                         return -EBUSY;
230                 }
231                 ch = nvhost_check_channel(pdata);
232                 if (ch)
233                         nvhost_getchannel(ch);
234                 mutex_unlock(&host->chlist_mutex);
235                 *channel = ch;
236                 return 0;
237         }
238
239         index = find_next_zero_bit(&host->allocated_channels,
240                                         max_channels, host->next_free_ch);
241
242         if (index >= max_channels) {
243                 /* Reset next pointer and try */
244                 host->next_free_ch = 0;
245                 index = find_next_zero_bit(&host->allocated_channels,
246                                         max_channels, host->next_free_ch);
247                 if (index >= max_channels) {
248                         pr_err("All host1x channels are mapped, BITMAP: %lu\n",
249                                         host->allocated_channels);
250                         mutex_unlock(&host->chlist_mutex);
251                         return -ENOMEM;
252                 }
253         }
254
255         /* Get channel from list and map to device */
256         ch = host->chlist[index];
257         if (!ch) {
258                 dev_err(&host->dev->dev, "%s: No channel is free\n", __func__);
259                 mutex_unlock(&host->chlist_mutex);
260                 return -EBUSY;
261         }
262         if (ch->chid == NVHOST_INVALID_CHANNEL) {
263                 ch->dev = pdata->pdev;
264                 ch->chid = index;
265                 nvhost_channel_assign(pdata, ch);
266                 nvhost_set_chanops(ch);
267         } else {
268                 dev_err(&host->dev->dev, "%s: wrong channel map\n", __func__);
269                 mutex_unlock(&host->chlist_mutex);
270                 return -EINVAL;
271         }
272
273         /* Initialize channel */
274         err = nvhost_channel_init(ch, host);
275         if (err) {
276                 dev_err(&ch->dev->dev, "%s: channel init failed\n", __func__);
277                 mutex_unlock(&host->chlist_mutex);
278                 nvhost_channel_unmap(ch);
279                 return err;
280         }
281         nvhost_getchannel(ch);
282         set_bit(ch->chid, &host->allocated_channels);
283
284         /* set next free channel */
285         if (index >= (max_channels - 1))
286                 host->next_free_ch = 0;
287         else
288                 host->next_free_ch = index + 1;
289
290         if (pdata->init && pdata->num_mapped_chs == 1) {
291                 err = pdata->init(ch->dev);
292                 if (err) {
293                         dev_err(&ch->dev->dev, "device init failed\n");
294                         mutex_unlock(&host->chlist_mutex);
295                         nvhost_channel_unmap(ch);
296                         return err;
297                 }
298         }
299
300         /* Keep alive modules that needs to be when a channel is open */
301         if (pdata->keepalive && pdata->num_mapped_chs)
302                 nvhost_module_disable_poweroff(pdata->pdev);
303
304         dev_dbg(&ch->dev->dev, "channel %d mapped\n", ch->chid);
305         mutex_unlock(&host->chlist_mutex);
306
307         *channel = ch;
308         return 0;
309 }
310
311 /* Free channel memory and list */
312 int nvhost_channel_list_free(struct nvhost_master *host)
313 {
314         int i;
315
316         for (i = 0; i < host->info.nb_channels; i++)
317                 kfree(host->chlist[i]);
318
319         dev_info(&host->dev->dev, "channel list free'd\n");
320
321         return 0;
322 }
323
324 int nvhost_channel_init(struct nvhost_channel *ch,
325                 struct nvhost_master *dev)
326 {
327         int err;
328
329         /* Link platform_device to nvhost_channel */
330         err = channel_op(ch).init(ch, dev);
331         if (err < 0) {
332                 dev_err(&dev->dev->dev, "failed to init channel %d\n",
333                                 ch->chid);
334                 return err;
335         }
336
337         return nvhost_cdma_init(&ch->cdma);
338 }
339
340 void nvhost_channel_init_gather_filter(struct nvhost_channel *ch)
341 {
342         struct nvhost_device_data *pdata = platform_get_drvdata(ch->dev);
343         if (channel_op(ch).init_gather_filter && pdata->gather_filter_enabled)
344                 channel_op(ch).init_gather_filter(ch);
345 }
346
347 int nvhost_channel_submit(struct nvhost_job *job)
348 {
349         /*
350          * Check if queue has higher priority jobs running. If so, wait until
351          * queue is empty. Ignores result from nvhost_cdma_flush, as we submit
352          * either when push buffer is empty or when we reach the timeout.
353          */
354         int higher_count = 0;
355
356         switch (job->priority) {
357         case NVHOST_PRIORITY_HIGH:
358                 higher_count = 0;
359                 break;
360         case NVHOST_PRIORITY_MEDIUM:
361                 higher_count = job->ch->cdma.high_prio_count;
362                 break;
363         case NVHOST_PRIORITY_LOW:
364                 higher_count = job->ch->cdma.high_prio_count
365                         + job->ch->cdma.med_prio_count;
366                 break;
367         }
368         if (higher_count > 0)
369                 (void)nvhost_cdma_flush(&job->ch->cdma,
370                                 NVHOST_CHANNEL_LOW_PRIO_MAX_WAIT);
371
372         return channel_op(job->ch).submit(job);
373 }
374
375
376 void nvhost_getchannel(struct nvhost_channel *ch)
377 {
378         atomic_inc(&ch->refcount);
379 }
380
381 void nvhost_putchannel(struct nvhost_channel *ch)
382 {
383         if (!atomic_dec_if_positive(&ch->refcount))
384                 nvhost_channel_unmap(ch);
385 }
386
387
388 void nvhost_putchannel_mult(struct nvhost_channel *ch, int cnt)
389 {
390         int i;
391
392         for (i = 0; i < cnt; i++)
393                 nvhost_putchannel(ch);
394 }
395
396 int nvhost_channel_suspend(struct nvhost_channel *ch)
397 {
398         int ret = 0;
399
400         if (channel_cdma_op().stop && ch->dev)
401                 channel_cdma_op().stop(&ch->cdma);
402
403         return ret;
404 }
405
406 struct nvhost_channel *nvhost_alloc_channel_internal(int chindex,
407                         int max_channels)
408 {
409         struct nvhost_channel *ch = NULL;
410
411         ch = kzalloc(sizeof(*ch), GFP_KERNEL);
412         if (ch)
413                 ch->chid = chindex;
414
415         return ch;
416 }
417
418 int nvhost_channel_save_context(struct nvhost_channel *ch)
419 {
420         int err = 0;
421
422         if (ch && ch->cur_ctx)
423                 err = channel_op(ch).save_context(ch);
424
425         return err;
426
427 }
428
429 static struct nvhost_hwctx *alloc_hwctx(struct nvhost_hwctx_handler *h,
430                 struct nvhost_channel *ch)
431 {
432         struct nvhost_hwctx *ctx;
433
434         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
435         if (!ctx)
436                 return NULL;
437
438         kref_init(&ctx->ref);
439         ctx->h = h;
440         ctx->channel = ch;
441         ctx->valid = true;
442
443         return ctx;
444 }
445
446 static void free_hwctx(struct kref *ref)
447 {
448         struct nvhost_hwctx *ctx = container_of(ref, struct nvhost_hwctx, ref);
449
450         kfree(ctx);
451 }
452
453 static void get_hwctx(struct nvhost_hwctx *ctx)
454 {
455         kref_get(&ctx->ref);
456 }
457
458 static void put_hwctx(struct nvhost_hwctx *ctx)
459 {
460         kref_put(&ctx->ref, free_hwctx);
461 }
462
463 struct nvhost_hwctx_handler *nvhost_alloc_hwctx_handler(u32 syncpt,
464         u32 waitbase, struct nvhost_channel *ch)
465 {
466         struct nvhost_hwctx_handler *p;
467
468         p = kzalloc(sizeof(*p), GFP_KERNEL);
469         if (!p)
470                 return NULL;
471
472         p->syncpt = NVSYNCPT_INVALID;
473         p->waitbase = waitbase;
474
475         p->alloc = alloc_hwctx;
476         p->get   = get_hwctx;
477         p->put   = put_hwctx;
478
479         return p;
480 }