2 * drivers/video/tegra/host/nvhost_channel.c
4 * Tegra Graphics Host Channel
6 * Copyright (c) 2010-2014, NVIDIA Corporation. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "nvhost_channel.h"
23 #include "nvhost_acm.h"
24 #include "nvhost_job.h"
25 #include "nvhost_hwctx.h"
26 #include "chip_support.h"
28 #include <trace/events/nvhost.h>
29 #include <linux/nvhost_ioctl.h>
30 #include <linux/nvhost.h>
31 #include <linux/slab.h>
33 #define NVHOST_CHANNEL_LOW_PRIO_MAX_WAIT 50
35 /* Constructor for the host1x device list */
36 int nvhost_channel_list_init(struct nvhost_master *host)
38 if (host->info.nb_channels > BITS_PER_LONG) {
39 WARN(1, "host1x hardware has more channels than supported\n");
43 host->chlist = kzalloc(host->info.nb_channels *
44 sizeof(struct nvhost_channel *), GFP_KERNEL);
45 if (host->chlist == NULL)
48 mutex_init(&host->chlist_mutex);
53 /* Memory allocation for all supported channels */
54 int nvhost_alloc_channels(struct nvhost_master *host)
56 int max_channels = host->info.nb_channels;
58 struct nvhost_channel *ch;
60 err = nvhost_channel_list_init(host);
62 dev_err(&host->dev->dev, "failed to init channel list\n");
66 mutex_lock(&host->chlist_mutex);
67 for (i = 0; i < max_channels; i++) {
68 ch = nvhost_alloc_channel_internal(i, max_channels);
70 dev_err(&host->dev->dev, "failed to alloc channels\n");
71 mutex_unlock(&host->chlist_mutex);
76 ch->chid = NVHOST_INVALID_CHANNEL;
78 mutex_unlock(&host->chlist_mutex);
83 /* return any one of assigned channel from device
84 * This API can be used to check if any channel assigned to device
86 struct nvhost_channel *nvhost_check_channel(struct nvhost_device_data *pdata)
89 struct nvhost_channel *ch;
91 for (i = 0; i < pdata->num_channels; i++) {
92 ch = pdata->channels[i];
93 if (ch && ch->chid != NVHOST_INVALID_CHANNEL)
100 /* Check if more than channel needed for device and assign */
101 int nvhost_channel_assign(struct nvhost_device_data *pdata,
102 struct nvhost_channel *ch)
106 for (i = 0; i < pdata->num_channels; i++) {
107 if (!pdata->channels[i]) {
108 pdata->channels[i] = ch;
109 pdata->num_mapped_chs++;
114 dev_err(&pdata->pdev->dev, "%s: All channels assigned\n", __func__);
119 /* Releases all channels assigned with device */
120 int nvhost_channel_release(struct nvhost_device_data *pdata)
122 struct nvhost_channel *ch;
125 for (i = 0; i < pdata->num_channels; i++) {
126 ch = pdata->channels[i];
128 nvhost_putchannel(ch);
132 /* Unmap channel from device and free all resources, deinit device */
133 int nvhost_channel_unmap(struct nvhost_channel *ch)
135 struct nvhost_device_data *pdata;
136 struct nvhost_master *host;
141 pr_err("%s: freeing unmapped channel\n", __func__);
145 pdata = platform_get_drvdata(ch->dev);
146 host = nvhost_get_host(pdata->pdev);
148 mutex_lock(&host->chlist_mutex);
149 max_channels = host->info.nb_channels;
151 if (ch->chid == NVHOST_INVALID_CHANNEL) {
152 dev_err(&host->dev->dev, "Freeing un-mapped channel\n");
153 mutex_unlock(&host->chlist_mutex);
156 if (ch->error_notifier_ref)
157 nvhost_free_error_notifiers(ch);
159 dev_dbg(&ch->dev->dev, "channel %d un-mapped\n", ch->chid);
161 pdata->num_mapped_chs--;
163 /* Allow keep-alive'd module to be turned off
164 * make sure that all channels are unmapped before calling
165 * nvhost_module_enable_poweroff
167 if (!pdata->num_mapped_chs) {
168 channel_cdma_op().stop(&ch->cdma);
169 nvhost_cdma_deinit(&ch->cdma);
171 if (pdata->keepalive)
172 nvhost_module_enable_poweroff(pdata->pdev);
175 pdata->deinit(ch->dev);
178 * when ALL of the channels are unmapped from device,
179 * we can free all the host managed syncpts assigned
182 for (i = 0; i < NVHOST_MODULE_MAX_SYNCPTS; ++i) {
183 if (pdata->syncpts[i]) {
184 nvhost_free_syncpt(pdata->syncpts[i]);
185 pdata->syncpts[i] = 0;
190 clear_bit(ch->chid, &host->allocated_channels);
192 ch->chid = NVHOST_INVALID_CHANNEL;
194 kfree(ch->ctxhandler);
195 ch->ctxhandler = NULL;
198 pdata->channels[ch->dev_chid] = NULL;
200 mutex_unlock(&host->chlist_mutex);
205 /* Maps free channel with device */
206 int nvhost_channel_map(struct nvhost_device_data *pdata,
207 struct nvhost_channel **channel)
209 struct nvhost_master *host = NULL;
210 struct nvhost_channel *ch = NULL;
211 int max_channels = 0;
216 pr_err("%s: NULL device data\n", __func__);
220 host = nvhost_get_host(pdata->pdev);
222 mutex_lock(&host->chlist_mutex);
223 max_channels = host->info.nb_channels;
225 /* Check if already channel(s) assigned for device */
226 if (pdata->num_channels == pdata->num_mapped_chs) {
227 if (pdata->exclusive) {
228 mutex_unlock(&host->chlist_mutex);
231 ch = nvhost_check_channel(pdata);
233 nvhost_getchannel(ch);
234 mutex_unlock(&host->chlist_mutex);
239 index = find_next_zero_bit(&host->allocated_channels,
240 max_channels, host->next_free_ch);
242 if (index >= max_channels) {
243 /* Reset next pointer and try */
244 host->next_free_ch = 0;
245 index = find_next_zero_bit(&host->allocated_channels,
246 max_channels, host->next_free_ch);
247 if (index >= max_channels) {
248 pr_err("All host1x channels are mapped, BITMAP: %lu\n",
249 host->allocated_channels);
250 mutex_unlock(&host->chlist_mutex);
255 /* Get channel from list and map to device */
256 ch = host->chlist[index];
258 dev_err(&host->dev->dev, "%s: No channel is free\n", __func__);
259 mutex_unlock(&host->chlist_mutex);
262 if (ch->chid == NVHOST_INVALID_CHANNEL) {
263 ch->dev = pdata->pdev;
265 nvhost_channel_assign(pdata, ch);
266 nvhost_set_chanops(ch);
268 dev_err(&host->dev->dev, "%s: wrong channel map\n", __func__);
269 mutex_unlock(&host->chlist_mutex);
273 /* Initialize channel */
274 err = nvhost_channel_init(ch, host);
276 dev_err(&ch->dev->dev, "%s: channel init failed\n", __func__);
277 mutex_unlock(&host->chlist_mutex);
278 nvhost_channel_unmap(ch);
281 nvhost_getchannel(ch);
282 set_bit(ch->chid, &host->allocated_channels);
284 /* set next free channel */
285 if (index >= (max_channels - 1))
286 host->next_free_ch = 0;
288 host->next_free_ch = index + 1;
290 if (pdata->init && pdata->num_mapped_chs == 1) {
291 err = pdata->init(ch->dev);
293 dev_err(&ch->dev->dev, "device init failed\n");
294 mutex_unlock(&host->chlist_mutex);
295 nvhost_channel_unmap(ch);
300 /* Keep alive modules that needs to be when a channel is open */
301 if (pdata->keepalive && pdata->num_mapped_chs)
302 nvhost_module_disable_poweroff(pdata->pdev);
304 dev_dbg(&ch->dev->dev, "channel %d mapped\n", ch->chid);
305 mutex_unlock(&host->chlist_mutex);
311 /* Free channel memory and list */
312 int nvhost_channel_list_free(struct nvhost_master *host)
316 for (i = 0; i < host->info.nb_channels; i++)
317 kfree(host->chlist[i]);
319 dev_info(&host->dev->dev, "channel list free'd\n");
324 int nvhost_channel_init(struct nvhost_channel *ch,
325 struct nvhost_master *dev)
329 /* Link platform_device to nvhost_channel */
330 err = channel_op(ch).init(ch, dev);
332 dev_err(&dev->dev->dev, "failed to init channel %d\n",
337 return nvhost_cdma_init(&ch->cdma);
340 void nvhost_channel_init_gather_filter(struct nvhost_channel *ch)
342 struct nvhost_device_data *pdata = platform_get_drvdata(ch->dev);
343 if (channel_op(ch).init_gather_filter && pdata->gather_filter_enabled)
344 channel_op(ch).init_gather_filter(ch);
347 int nvhost_channel_submit(struct nvhost_job *job)
350 * Check if queue has higher priority jobs running. If so, wait until
351 * queue is empty. Ignores result from nvhost_cdma_flush, as we submit
352 * either when push buffer is empty or when we reach the timeout.
354 int higher_count = 0;
356 switch (job->priority) {
357 case NVHOST_PRIORITY_HIGH:
360 case NVHOST_PRIORITY_MEDIUM:
361 higher_count = job->ch->cdma.high_prio_count;
363 case NVHOST_PRIORITY_LOW:
364 higher_count = job->ch->cdma.high_prio_count
365 + job->ch->cdma.med_prio_count;
368 if (higher_count > 0)
369 (void)nvhost_cdma_flush(&job->ch->cdma,
370 NVHOST_CHANNEL_LOW_PRIO_MAX_WAIT);
372 return channel_op(job->ch).submit(job);
376 void nvhost_getchannel(struct nvhost_channel *ch)
378 atomic_inc(&ch->refcount);
381 void nvhost_putchannel(struct nvhost_channel *ch)
383 if (!atomic_dec_if_positive(&ch->refcount))
384 nvhost_channel_unmap(ch);
388 void nvhost_putchannel_mult(struct nvhost_channel *ch, int cnt)
392 for (i = 0; i < cnt; i++)
393 nvhost_putchannel(ch);
396 int nvhost_channel_suspend(struct nvhost_channel *ch)
400 if (channel_cdma_op().stop && ch->dev)
401 channel_cdma_op().stop(&ch->cdma);
406 struct nvhost_channel *nvhost_alloc_channel_internal(int chindex,
409 struct nvhost_channel *ch = NULL;
411 ch = kzalloc(sizeof(*ch), GFP_KERNEL);
418 int nvhost_channel_save_context(struct nvhost_channel *ch)
422 if (ch && ch->cur_ctx)
423 err = channel_op(ch).save_context(ch);
429 static struct nvhost_hwctx *alloc_hwctx(struct nvhost_hwctx_handler *h,
430 struct nvhost_channel *ch)
432 struct nvhost_hwctx *ctx;
434 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
438 kref_init(&ctx->ref);
446 static void free_hwctx(struct kref *ref)
448 struct nvhost_hwctx *ctx = container_of(ref, struct nvhost_hwctx, ref);
453 static void get_hwctx(struct nvhost_hwctx *ctx)
458 static void put_hwctx(struct nvhost_hwctx *ctx)
460 kref_put(&ctx->ref, free_hwctx);
463 struct nvhost_hwctx_handler *nvhost_alloc_hwctx_handler(u32 syncpt,
464 u32 waitbase, struct nvhost_channel *ch)
466 struct nvhost_hwctx_handler *p;
468 p = kzalloc(sizeof(*p), GFP_KERNEL);
472 p->syncpt = NVSYNCPT_INVALID;
473 p->waitbase = waitbase;
475 p->alloc = alloc_hwctx;