2 * drivers/video/tegra/host/gk20a/as_gk20a.c
6 * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 #include <linux/slab.h>
20 #include <linux/cdev.h>
21 #include <linux/uaccess.h>
23 #include <trace/events/gk20a.h>
27 /* dumb allocator... */
28 static int generate_as_share_id(struct gk20a_as *as)
31 return ++as->last_share_id;
34 static void release_as_share_id(struct gk20a_as *as, int id)
40 static int gk20a_as_alloc_share(struct gk20a_as *as,
41 struct gk20a_as_share **out)
43 struct gk20a_as_share *as_share;
49 as_share = kzalloc(sizeof(*as_share), GFP_KERNEL);
54 as_share->id = generate_as_share_id(as_share->as);
55 as_share->ref_cnt.counter = 1;
57 /* this will set as_share->vm. */
58 err = gk20a_vm_alloc_share(as_share);
71 * channels and the device nodes call this to release.
72 * once the ref_cnt hits zero the share is deleted.
74 int gk20a_as_release_share(struct gk20a_as_share *as_share)
80 if (atomic_dec_return(&as_share->ref_cnt) > 0)
83 err = gk20a_vm_release_share(as_share);
84 release_as_share_id(as_share->as, as_share->id);
89 static int gk20a_as_ioctl_bind_channel(
90 struct gk20a_as_share *as_share,
91 struct nvhost_as_bind_channel_args *args)
94 struct channel_gk20a *ch;
98 ch = gk20a_get_channel_from_file(args->channel_fd);
99 if (!ch || gk20a_channel_as_bound(ch))
102 atomic_inc(&as_share->ref_cnt);
104 /* this will set channel_gk20a->vm */
105 err = gk20a_vm_bind_channel(as_share, ch);
107 atomic_dec(&as_share->ref_cnt);
114 static int gk20a_as_ioctl_alloc_space(
115 struct gk20a_as_share *as_share,
116 struct nvhost_as_alloc_space_args *args)
119 return gk20a_vm_alloc_space(as_share, args);
122 static int gk20a_as_ioctl_free_space(
123 struct gk20a_as_share *as_share,
124 struct nvhost_as_free_space_args *args)
127 return gk20a_vm_free_space(as_share, args);
130 static int gk20a_as_ioctl_map_buffer_ex(
131 struct gk20a_as_share *as_share,
132 struct nvhost_as_map_buffer_ex_args *args)
136 return gk20a_vm_map_buffer(as_share, args->dmabuf_fd,
137 &args->as_offset, args->flags,
144 static int gk20a_as_ioctl_map_buffer(
145 struct gk20a_as_share *as_share,
146 struct nvhost_as_map_buffer_args *args)
149 return gk20a_vm_map_buffer(as_share, args->nvmap_handle,
151 args->flags, NV_KIND_DEFAULT,
153 /* args->o_a.offset will be set if !err */
156 static int gk20a_as_ioctl_unmap_buffer(
157 struct gk20a_as_share *as_share,
158 struct nvhost_as_unmap_buffer_args *args)
161 return gk20a_vm_unmap_buffer(as_share, args->offset);
164 int gk20a_as_dev_open(struct inode *inode, struct file *filp)
166 struct gk20a_as_share *as_share;
172 g = container_of(inode->i_cdev, struct gk20a, as.cdev);
174 err = gk20a_get_client(g);
176 gk20a_dbg_fn("fail to get channel!");
180 err = gk20a_as_alloc_share(&g->as, &as_share);
182 gk20a_dbg_fn("failed to alloc share");
187 filp->private_data = as_share;
191 int gk20a_as_dev_release(struct inode *inode, struct file *filp)
193 struct gk20a_as_share *as_share = filp->private_data;
195 struct gk20a *g = gk20a_from_as(as_share->as);
199 ret = gk20a_as_release_share(as_share);
206 long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
209 struct gk20a_as_share *as_share = filp->private_data;
210 struct gk20a *g = gk20a_from_as(as_share->as);
212 u8 buf[NVHOST_AS_IOCTL_MAX_ARG_SIZE];
214 if ((_IOC_TYPE(cmd) != NVHOST_AS_IOCTL_MAGIC) ||
215 (_IOC_NR(cmd) == 0) ||
216 (_IOC_NR(cmd) > NVHOST_AS_IOCTL_LAST))
219 BUG_ON(_IOC_SIZE(cmd) > NVHOST_AS_IOCTL_MAX_ARG_SIZE);
221 if (_IOC_DIR(cmd) & _IOC_WRITE) {
222 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
226 err = gk20a_busy(g->dev);
231 case NVHOST_AS_IOCTL_BIND_CHANNEL:
232 trace_gk20a_as_ioctl_bind_channel(dev_name(dev_from_gk20a(g)));
233 err = gk20a_as_ioctl_bind_channel(as_share,
234 (struct nvhost_as_bind_channel_args *)buf);
237 case NVHOST32_AS_IOCTL_ALLOC_SPACE:
239 struct nvhost32_as_alloc_space_args *args32 =
240 (struct nvhost32_as_alloc_space_args *)buf;
241 struct nvhost_as_alloc_space_args args;
243 args.pages = args32->pages;
244 args.page_size = args32->page_size;
245 args.flags = args32->flags;
246 args.o_a.offset = args32->o_a.offset;
247 trace_gk20a_as_ioctl_alloc_space(dev_name(dev_from_gk20a(g)));
248 err = gk20a_as_ioctl_alloc_space(as_share, &args);
249 args32->o_a.offset = args.o_a.offset;
252 case NVHOST_AS_IOCTL_ALLOC_SPACE:
253 trace_gk20a_as_ioctl_alloc_space(dev_name(dev_from_gk20a(g)));
254 err = gk20a_as_ioctl_alloc_space(as_share,
255 (struct nvhost_as_alloc_space_args *)buf);
257 case NVHOST_AS_IOCTL_FREE_SPACE:
258 trace_gk20a_as_ioctl_free_space(dev_name(dev_from_gk20a(g)));
259 err = gk20a_as_ioctl_free_space(as_share,
260 (struct nvhost_as_free_space_args *)buf);
262 case NVHOST_AS_IOCTL_MAP_BUFFER:
263 trace_gk20a_as_ioctl_map_buffer(dev_name(dev_from_gk20a(g)));
264 err = gk20a_as_ioctl_map_buffer(as_share,
265 (struct nvhost_as_map_buffer_args *)buf);
267 case NVHOST_AS_IOCTL_MAP_BUFFER_EX:
268 trace_gk20a_as_ioctl_map_buffer(dev_name(dev_from_gk20a(g)));
269 err = gk20a_as_ioctl_map_buffer_ex(as_share,
270 (struct nvhost_as_map_buffer_ex_args *)buf);
272 case NVHOST_AS_IOCTL_UNMAP_BUFFER:
273 trace_gk20a_as_ioctl_unmap_buffer(dev_name(dev_from_gk20a(g)));
274 err = gk20a_as_ioctl_unmap_buffer(as_share,
275 (struct nvhost_as_unmap_buffer_args *)buf);
278 dev_err(dev_from_gk20a(g), "unrecognized as ioctl: 0x%x", cmd);
285 if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
286 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));