2 * drivers/video/tegra/host/gk20a/channel_sync_gk20a.c
4 * GK20A Channel Synchronization Abstraction
6 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 #include <linux/gk20a.h>
20 #include "channel_sync_gk20a.h"
24 #include "../../../staging/android/sync.h"
27 #ifdef CONFIG_TEGRA_GK20A
28 #include <linux/nvhost.h>
31 #ifdef CONFIG_TEGRA_GK20A
33 struct gk20a_channel_syncpt {
34 struct gk20a_channel_sync ops;
35 struct channel_gk20a *c;
36 struct platform_device *host1x_pdev;
40 static void add_wait_cmd(u32 *ptr, u32 id, u32 thresh)
48 /* syncpt_id, switch_en, wait */
49 ptr[3] = (id << 8) | 0x10;
52 int gk20a_channel_syncpt_wait_cpu(struct gk20a_channel_sync *s,
53 struct gk20a_channel_fence *fence,
56 struct gk20a_channel_syncpt *sp =
57 container_of(s, struct gk20a_channel_syncpt, ops);
60 return nvhost_syncpt_wait_timeout_ext(
61 sp->host1x_pdev, sp->id, fence->thresh,
65 bool gk20a_channel_syncpt_is_expired(struct gk20a_channel_sync *s,
66 struct gk20a_channel_fence *fence)
68 struct gk20a_channel_syncpt *sp =
69 container_of(s, struct gk20a_channel_syncpt, ops);
72 return nvhost_syncpt_is_expired_ext(sp->host1x_pdev, sp->id,
76 int gk20a_channel_syncpt_wait_syncpt(struct gk20a_channel_sync *s, u32 id,
77 u32 thresh, struct priv_cmd_entry **entry)
79 struct gk20a_channel_syncpt *sp =
80 container_of(s, struct gk20a_channel_syncpt, ops);
81 struct priv_cmd_entry *wait_cmd = NULL;
83 if (id >= nvhost_syncpt_nb_pts_ext(sp->host1x_pdev)) {
84 dev_warn(dev_from_gk20a(sp->c->g),
85 "invalid wait id in gpfifo submit, elided");
89 if (nvhost_syncpt_is_expired_ext(sp->host1x_pdev, id, thresh))
92 gk20a_channel_alloc_priv_cmdbuf(sp->c, 4, &wait_cmd);
93 if (wait_cmd == NULL) {
94 gk20a_err(dev_from_gk20a(sp->c->g),
95 "not enough priv cmd buffer space");
99 add_wait_cmd(&wait_cmd->ptr[0], id, thresh);
105 int gk20a_channel_syncpt_wait_fd(struct gk20a_channel_sync *s, int fd,
106 struct priv_cmd_entry **entry)
112 struct sync_fence *sync_fence;
113 struct priv_cmd_entry *wait_cmd = NULL;
114 struct gk20a_channel_syncpt *sp =
115 container_of(s, struct gk20a_channel_syncpt, ops);
116 struct channel_gk20a *c = sp->c;
118 sync_fence = nvhost_sync_fdget(fd);
122 /* validate syncpt ids */
123 list_for_each_entry(pt, &sync_fence->pt_list_head, pt_list) {
124 u32 wait_id = nvhost_sync_pt_id(pt);
126 wait_id >= nvhost_syncpt_nb_pts_ext(sp->host1x_pdev)) {
127 sync_fence_put(sync_fence);
132 num_wait_cmds = nvhost_sync_num_pts(sync_fence);
133 gk20a_channel_alloc_priv_cmdbuf(c, 4 * num_wait_cmds, &wait_cmd);
134 if (wait_cmd == NULL) {
135 gk20a_err(dev_from_gk20a(c->g),
136 "not enough priv cmd buffer space");
137 sync_fence_put(sync_fence);
142 list_for_each_entry(pt, &sync_fence->pt_list_head, pt_list) {
143 u32 wait_id = nvhost_sync_pt_id(pt);
144 u32 wait_value = nvhost_sync_pt_thresh(pt);
146 if (nvhost_syncpt_is_expired_ext(sp->host1x_pdev,
147 wait_id, wait_value)) {
148 wait_cmd->ptr[i * 4 + 0] = 0;
149 wait_cmd->ptr[i * 4 + 1] = 0;
150 wait_cmd->ptr[i * 4 + 2] = 0;
151 wait_cmd->ptr[i * 4 + 3] = 0;
153 add_wait_cmd(&wait_cmd->ptr[i * 4], wait_id,
157 WARN_ON(i != num_wait_cmds);
158 sync_fence_put(sync_fence);
167 static void gk20a_channel_syncpt_update(void *priv, int nr_completed)
169 struct channel_gk20a *ch20a = priv;
170 gk20a_channel_update(ch20a, nr_completed);
173 static int __gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
174 bool gfx_class, bool wfi_cmd,
176 struct priv_cmd_entry **entry,
177 struct gk20a_channel_fence *fence)
183 struct priv_cmd_entry *incr_cmd = NULL;
184 struct gk20a_channel_syncpt *sp =
185 container_of(s, struct gk20a_channel_syncpt, ops);
186 struct channel_gk20a *c = sp->c;
192 gk20a_channel_alloc_priv_cmdbuf(c, incr_cmd_size, &incr_cmd);
193 if (incr_cmd == NULL) {
194 gk20a_err(dev_from_gk20a(c->g),
195 "not enough priv cmd buffer space");
201 incr_cmd->ptr[j++] = 0x2001001E;
202 /* handle, ignored */
203 incr_cmd->ptr[j++] = 0x00000000;
206 incr_cmd->ptr[j++] = 0x2001001C;
207 /* payload, ignored */
208 incr_cmd->ptr[j++] = 0;
210 incr_cmd->ptr[j++] = 0x2001001D;
211 /* syncpt_id, incr */
212 incr_cmd->ptr[j++] = (sp->id << 8) | 0x1;
213 WARN_ON(j != incr_cmd_size);
215 thresh = nvhost_syncpt_incr_max_ext(sp->host1x_pdev, sp->id, 1);
218 err = nvhost_intr_register_notifier(sp->host1x_pdev,
220 gk20a_channel_syncpt_update, c);
222 /* Adding interrupt action should never fail. A proper error
223 * handling here would require us to decrement the syncpt max
224 * back to its original value. */
225 WARN(err, "failed to set submit complete interrupt");
228 fence->thresh = thresh;
230 fence->wfi = wfi_cmd;
235 int gk20a_channel_syncpt_incr_wfi(struct gk20a_channel_sync *s,
236 struct priv_cmd_entry **entry,
237 struct gk20a_channel_fence *fence)
239 return __gk20a_channel_syncpt_incr(s,
240 false /* use host class */,
242 false /* no irq handler */,
246 int gk20a_channel_syncpt_incr(struct gk20a_channel_sync *s,
247 struct priv_cmd_entry **entry,
248 struct gk20a_channel_fence *fence)
250 struct gk20a_channel_syncpt *sp =
251 container_of(s, struct gk20a_channel_syncpt, ops);
252 /* Don't put wfi cmd to this one since we're not returning
253 * a fence to user space. */
254 return __gk20a_channel_syncpt_incr(s,
255 sp->c->obj_class == KEPLER_C /* may use gfx class */,
257 true /* register irq */,
261 int gk20a_channel_syncpt_incr_user_syncpt(struct gk20a_channel_sync *s,
262 struct priv_cmd_entry **entry,
263 struct gk20a_channel_fence *fence,
265 u32 *id, u32 *thresh)
267 struct gk20a_channel_syncpt *sp =
268 container_of(s, struct gk20a_channel_syncpt, ops);
269 /* Need to do 'host incr + wfi' or 'gfx incr' since we return the fence
271 int err = __gk20a_channel_syncpt_incr(s,
273 sp->c->obj_class == KEPLER_C /* use gfx class? */,
275 sp->c->obj_class != KEPLER_C /* wfi if host class */,
276 true /* register irq */,
281 *thresh = fence->thresh;
285 int gk20a_channel_syncpt_incr_user_fd(struct gk20a_channel_sync *s,
286 struct priv_cmd_entry **entry,
287 struct gk20a_channel_fence *fence,
293 struct nvhost_ctrl_sync_fence_info pt;
294 struct gk20a_channel_syncpt *sp =
295 container_of(s, struct gk20a_channel_syncpt, ops);
296 err = gk20a_channel_syncpt_incr_user_syncpt(s, entry, fence, wfi,
300 return nvhost_sync_create_fence_fd(sp->host1x_pdev, &pt, 1,
307 void gk20a_channel_syncpt_set_min_eq_max(struct gk20a_channel_sync *s)
309 struct gk20a_channel_syncpt *sp =
310 container_of(s, struct gk20a_channel_syncpt, ops);
311 nvhost_syncpt_set_min_eq_max_ext(sp->host1x_pdev, sp->id);
314 static void gk20a_channel_syncpt_destroy(struct gk20a_channel_sync *s)
316 struct gk20a_channel_syncpt *sp =
317 container_of(s, struct gk20a_channel_syncpt, ops);
318 nvhost_free_syncpt(sp->id);
322 static struct gk20a_channel_sync *
323 gk20a_channel_syncpt_create(struct channel_gk20a *c)
325 struct gk20a_channel_syncpt *sp;
327 sp = kzalloc(sizeof(*sp), GFP_KERNEL);
332 sp->host1x_pdev = to_platform_device(c->g->dev->dev.parent);
333 sp->id = nvhost_get_syncpt_host_managed(c->g->dev, c->hw_chid);
335 sp->ops.wait_cpu = gk20a_channel_syncpt_wait_cpu;
336 sp->ops.is_expired = gk20a_channel_syncpt_is_expired;
337 sp->ops.wait_syncpt = gk20a_channel_syncpt_wait_syncpt;
338 sp->ops.wait_fd = gk20a_channel_syncpt_wait_fd;
339 sp->ops.incr = gk20a_channel_syncpt_incr;
340 sp->ops.incr_wfi = gk20a_channel_syncpt_incr_wfi;
341 sp->ops.incr_user_syncpt = gk20a_channel_syncpt_incr_user_syncpt;
342 sp->ops.incr_user_fd = gk20a_channel_syncpt_incr_user_fd;
343 sp->ops.set_min_eq_max = gk20a_channel_syncpt_set_min_eq_max;
344 sp->ops.destroy = gk20a_channel_syncpt_destroy;
346 sp->ops.syncpt_aggressive_destroy = false;
350 #endif /* CONFIG_TEGRA_GK20A */
352 struct gk20a_channel_sync *gk20a_channel_sync_create(struct channel_gk20a *c)
354 #ifdef CONFIG_TEGRA_GK20A
355 if (gk20a_platform_has_syncpoints(c->g->dev))
356 return gk20a_channel_syncpt_create(c);