2 * GK20A graphics channel
4 * Copyright (c) 2011-2016, NVIDIA CORPORATION. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #ifndef CHANNEL_GK20A_H
19 #define CHANNEL_GK20A_H
21 #include <linux/log2.h>
22 #include <linux/mutex.h>
23 #include <linux/poll.h>
24 #include <linux/semaphore.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock.h>
27 #include <linux/wait.h>
28 #include <uapi/linux/nvgpu.h>
32 struct dbg_session_gk20a;
35 #include "channel_sync_gk20a.h"
39 #include "fence_gk20a.h"
60 /* contexts associated with a channel */
61 struct channel_ctx_gk20a {
62 struct gr_ctx_desc *gr_ctx;
63 struct patch_desc patch_ctx;
64 struct zcull_ctx_desc zcull_ctx;
65 u64 global_ctx_buffer_va[NR_GLOBAL_CTX_BUF_VA];
66 u64 global_ctx_buffer_size[NR_GLOBAL_CTX_BUF_VA];
67 bool global_ctx_buffer_mapped;
70 struct channel_gk20a_job {
71 struct mapped_buffer_node **mapped_buffers;
72 int num_mapped_buffers;
73 struct gk20a_fence *pre_fence;
74 struct gk20a_fence *post_fence;
75 struct priv_cmd_entry *wait_cmd;
76 struct priv_cmd_entry *incr_cmd;
77 struct list_head list;
80 struct channel_gk20a_poll_events {
83 int num_pending_events;
86 struct channel_gk20a_clean_up {
89 struct delayed_work wq;
92 /* this is the priv element of struct nvhost_channel */
93 struct channel_gk20a {
94 struct gk20a *g; /* set only when channel is active */
96 struct list_head free_chs;
98 spinlock_t ref_obtain_lock;
101 wait_queue_head_t ref_count_dec_wq;
109 struct mutex ioctl_lock;
112 struct list_head ch_entry; /* channel's entry in TSG */
114 struct list_head jobs;
115 spinlock_t jobs_lock;
119 struct gpfifo_desc gpfifo;
121 struct channel_ctx_gk20a ch_ctx;
123 struct mem_desc inst_block;
124 struct mem_desc_sub ramfc;
131 u32 obj_class; /* we support only one obj per channel */
133 struct priv_cmd_queue priv_cmd_q;
135 wait_queue_head_t notifier_wq;
136 wait_queue_head_t semaphore_wq;
137 wait_queue_head_t submit_wq;
139 u32 timeout_accumulated_ms;
140 u32 timeout_gpfifo_get;
142 struct channel_gk20a_clean_up clean_up;
146 struct gk20a_fence *pre_fence;
147 struct gk20a_fence *post_fence;
148 struct mutex fence_lock;
151 void (*remove_support)(struct channel_gk20a *);
152 #if defined(CONFIG_GK20A_CYCLE_STATS)
154 void *cyclestate_buffer;
155 u32 cyclestate_buffer_size;
156 struct dma_buf *cyclestate_buffer_handler;
157 struct mutex cyclestate_buffer_mutex;
160 struct mutex cs_client_mutex;
161 struct gk20a_cs_snapshot_client *cs_client;
163 struct mutex dbg_s_lock;
164 struct list_head dbg_s_list;
168 bool timeout_debug_dump;
170 struct dma_buf *error_notifier_ref;
171 struct nvgpu_notification *error_notifier;
172 void *error_notifier_va;
174 struct mutex sync_lock;
175 struct gk20a_channel_sync *sync;
177 #ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
182 struct channel_gk20a_poll_events poll_events;
184 /* signal channel owner via a callback, if set, in gk20a_channel_update
185 * via schedule_work */
186 void (*update_fn)(struct channel_gk20a *, void *);
187 void *update_fn_data;
188 spinlock_t update_fn_lock; /* make access to the two above atomic */
189 struct work_struct update_fn_work;
192 static inline bool gk20a_channel_as_bound(struct channel_gk20a *ch)
196 int channel_gk20a_commit_va(struct channel_gk20a *c);
197 int gk20a_init_channel_support(struct gk20a *, u32 chid);
199 /* must be inside gk20a_busy()..gk20a_idle() */
200 void gk20a_channel_close(struct channel_gk20a *ch);
202 bool gk20a_channel_update_and_check_timeout(struct channel_gk20a *ch,
203 u32 timeout_delta_ms);
204 void gk20a_disable_channel(struct channel_gk20a *ch);
205 void gk20a_channel_abort(struct channel_gk20a *ch, bool channel_preempt);
206 int gk20a_channel_finish(struct channel_gk20a *ch, unsigned long timeout);
207 void gk20a_set_error_notifier(struct channel_gk20a *ch, __u32 error);
208 void gk20a_channel_semaphore_wakeup(struct gk20a *g);
209 int gk20a_channel_alloc_priv_cmdbuf(struct channel_gk20a *c, u32 size,
210 struct priv_cmd_entry **entry);
212 int gk20a_channel_suspend(struct gk20a *g);
213 int gk20a_channel_resume(struct gk20a *g);
215 /* Channel file operations */
216 int gk20a_channel_open(struct inode *inode, struct file *filp);
217 int gk20a_channel_open_ioctl(struct gk20a *g,
218 struct nvgpu_channel_open_args *args);
219 long gk20a_channel_ioctl(struct file *filp,
222 int gk20a_channel_release(struct inode *inode, struct file *filp);
223 struct channel_gk20a *gk20a_get_channel_from_file(int fd);
224 void gk20a_channel_update(struct channel_gk20a *c, int nr_completed);
225 unsigned int gk20a_channel_poll(struct file *filep, poll_table *wait);
226 void gk20a_channel_event(struct channel_gk20a *ch);
228 void gk20a_init_channel(struct gpu_ops *gops);
230 /* returns ch if reference was obtained */
231 struct channel_gk20a *__must_check _gk20a_channel_get(struct channel_gk20a *ch,
233 #define gk20a_channel_get(ch) _gk20a_channel_get(ch, __func__)
236 void _gk20a_channel_put(struct channel_gk20a *ch, const char *caller);
237 #define gk20a_channel_put(ch) _gk20a_channel_put(ch, __func__)
239 int gk20a_wait_channel_idle(struct channel_gk20a *ch);
240 struct channel_gk20a *gk20a_open_new_channel(struct gk20a *g);
241 struct channel_gk20a *gk20a_open_new_channel_with_cb(struct gk20a *g,
242 void (*update_fn)(struct channel_gk20a *, void *),
243 void *update_fn_data);
244 void channel_gk20a_unbind(struct channel_gk20a *ch_gk20a);
246 void gk20a_channel_cancel_job_clean_up(struct channel_gk20a *c,
247 bool wait_for_completion);
249 int gk20a_submit_channel_gpfifo(struct channel_gk20a *c,
250 struct nvgpu_gpfifo *gpfifo,
251 struct nvgpu_submit_gpfifo_args *args,
254 struct nvgpu_fence *fence,
255 struct gk20a_fence **fence_out,
256 bool force_need_sync_fence);
258 int gk20a_alloc_channel_gpfifo(struct channel_gk20a *c,
259 struct nvgpu_alloc_gpfifo_args *args);
261 void channel_gk20a_unbind(struct channel_gk20a *ch_gk20a);
262 void channel_gk20a_disable(struct channel_gk20a *ch);
263 int channel_gk20a_alloc_inst(struct gk20a *g, struct channel_gk20a *ch);
264 void channel_gk20a_free_inst(struct gk20a *g, struct channel_gk20a *ch);
265 int channel_gk20a_setup_ramfc(struct channel_gk20a *c,
266 u64 gpfifo_base, u32 gpfifo_entries, u32 flags);
267 void channel_gk20a_enable(struct channel_gk20a *ch);
268 #endif /* CHANNEL_GK20A_H */