]> rtime.felk.cvut.cz Git - sojka/nv-tegra/linux-3.10.git/blob - drivers/misc/tegra-profiler/comm.c
misc: tegra-profiler: support too deep stack level
[sojka/nv-tegra/linux-3.10.git] / drivers / misc / tegra-profiler / comm.c
1 /*
2  * drivers/misc/tegra-profiler/comm.c
3  *
4  * Copyright (c) 2014, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/fs.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/miscdevice.h>
23 #include <linux/sched.h>
24 #include <linux/poll.h>
25 #include <linux/bitops.h>
26 #include <linux/err.h>
27 #include <linux/mm.h>
28
29 #include <asm/uaccess.h>
30
31 #include <linux/tegra_profiler.h>
32
33 #include "comm.h"
34 #include "version.h"
35
36 struct quadd_comm_ctx comm_ctx;
37
38 static inline void *rb_alloc(unsigned long size)
39 {
40         return vmalloc(size);
41 }
42
43 static inline void rb_free(void *addr)
44 {
45         vfree(addr);
46 }
47
48 static void rb_reset(struct quadd_ring_buffer *rb)
49 {
50         rb->pos_read = 0;
51         rb->pos_write = 0;
52         rb->fill_count = 0;
53         rb->max_fill_count = 0;
54 }
55
56 static int rb_init(struct quadd_ring_buffer *rb, size_t size)
57 {
58         spin_lock_init(&rb->lock);
59
60         rb->size = size;
61         rb->buf = NULL;
62
63         rb->buf = (char *) rb_alloc(rb->size);
64         if (!rb->buf) {
65                 pr_err("Ring buffer alloc error\n");
66                 return -ENOMEM;
67         }
68         pr_info("rb: data buffer size: %u\n", (unsigned int)rb->size);
69
70         rb_reset(rb);
71
72         return 0;
73 }
74
75 static void rb_deinit(struct quadd_ring_buffer *rb)
76 {
77         unsigned long flags;
78
79         spin_lock_irqsave(&rb->lock, flags);
80         if (rb->buf) {
81                 rb_reset(rb);
82
83                 rb_free(rb->buf);
84                 rb->buf = NULL;
85         }
86         spin_unlock_irqrestore(&rb->lock, flags);
87 }
88
89 static __attribute__((unused)) int rb_is_full(struct quadd_ring_buffer *rb)
90 {
91         return rb->fill_count == rb->size;
92 }
93
94 static int rb_is_empty(struct quadd_ring_buffer *rb)
95 {
96         return rb->fill_count == 0;
97 }
98
99 static int rb_is_empty_lock(struct quadd_ring_buffer *rb)
100 {
101         int res;
102         unsigned long flags;
103
104         spin_lock_irqsave(&rb->lock, flags);
105         res = rb->fill_count == 0;
106         spin_unlock_irqrestore(&rb->lock, flags);
107
108         return res;
109 }
110
111 static size_t
112 rb_get_free_space(struct quadd_ring_buffer *rb)
113 {
114         return rb->size - rb->fill_count;
115 }
116
117 static size_t
118 rb_write(struct quadd_ring_buffer *rb, char *data, size_t length)
119 {
120         size_t new_pos_write, chunk1;
121
122         if (length > rb_get_free_space(rb))
123                 return 0;
124
125         new_pos_write = (rb->pos_write + length) % rb->size;
126
127         if (new_pos_write < rb->pos_write) {
128                 chunk1 = rb->size - rb->pos_write;
129                 memcpy(rb->buf + rb->pos_write, data, chunk1);
130                 if (new_pos_write > 0)
131                         memcpy(rb->buf, data + chunk1, new_pos_write);
132         } else {
133                 memcpy(rb->buf + rb->pos_write, data, length);
134         }
135
136         rb->pos_write = new_pos_write;
137         rb->fill_count += length;
138
139         return length;
140 }
141
142 static ssize_t rb_read_undo(struct quadd_ring_buffer *rb, size_t length)
143 {
144         if (rb_get_free_space(rb) < length)
145                 return -EIO;
146
147         if (rb->pos_read > length)
148                 rb->pos_read -= length;
149         else
150                 rb->pos_read += rb->size - length;
151
152         rb->fill_count += length;
153         return length;
154 }
155
156 static size_t rb_read(struct quadd_ring_buffer *rb, char *data, size_t length)
157 {
158         unsigned int new_pos_read, chunk1;
159
160         if (length > rb->fill_count)
161                 return 0;
162
163         new_pos_read = (rb->pos_read + length) % rb->size;
164
165         if (new_pos_read < rb->pos_read) {
166                 chunk1 = rb->size - rb->pos_read;
167                 memcpy(data, rb->buf + rb->pos_read, chunk1);
168                 if (new_pos_read > 0)
169                         memcpy(data + chunk1, rb->buf, new_pos_read);
170         } else {
171                 memcpy(data, rb->buf + rb->pos_read, length);
172         }
173
174         rb->pos_read = new_pos_read;
175         rb->fill_count -= length;
176
177         return length;
178 }
179
180 static ssize_t
181 rb_read_user(struct quadd_ring_buffer *rb, char __user *data, size_t length)
182 {
183         size_t new_pos_read, chunk1;
184
185         if (length > rb->fill_count)
186                 return 0;
187
188         new_pos_read = (rb->pos_read + length) % rb->size;
189
190         if (new_pos_read < rb->pos_read) {
191                 chunk1 = rb->size - rb->pos_read;
192                 if (copy_to_user(data, rb->buf + rb->pos_read, chunk1))
193                         return -EFAULT;
194
195                 if (new_pos_read > 0) {
196                         if (copy_to_user(data + chunk1, rb->buf,
197                                          new_pos_read))
198                                 return -EFAULT;
199                 }
200         } else {
201                 if (copy_to_user(data, rb->buf + rb->pos_read, length))
202                         return -EFAULT;
203         }
204
205         rb->pos_read = new_pos_read;
206         rb->fill_count -= length;
207
208         return length;
209 }
210
211 static void
212 write_sample(struct quadd_record_data *sample,
213              struct quadd_iovec *vec, int vec_count)
214 {
215         int i;
216         unsigned long flags;
217         struct quadd_ring_buffer *rb = &comm_ctx.rb;
218         size_t length_sample;
219
220         length_sample = sizeof(struct quadd_record_data);
221         for (i = 0; i < vec_count; i++)
222                 length_sample += vec[i].len;
223
224         spin_lock_irqsave(&rb->lock, flags);
225
226         if (length_sample > rb_get_free_space(rb)) {
227                 pr_err_once("Error: Buffer has been overflowed\n");
228                 spin_unlock_irqrestore(&rb->lock, flags);
229                 return;
230         }
231
232         if (!rb_write(rb, (char *)sample, sizeof(struct quadd_record_data))) {
233                 spin_unlock_irqrestore(&rb->lock, flags);
234                 return;
235         }
236
237         for (i = 0; i < vec_count; i++) {
238                 if (!rb_write(rb, vec[i].base, vec[i].len)) {
239                         spin_unlock_irqrestore(&rb->lock, flags);
240                         pr_err_once("%s: error: ring buffer\n", __func__);
241                         return;
242                 }
243         }
244
245         if (rb->fill_count > rb->max_fill_count)
246                 rb->max_fill_count = rb->fill_count;
247
248         spin_unlock_irqrestore(&rb->lock, flags);
249
250         wake_up_interruptible(&comm_ctx.read_wait);
251 }
252
253 static ssize_t read_sample(char __user *buffer, size_t max_length)
254 {
255         u32 sed;
256         unsigned int type;
257         int retval = -EIO, ip_size;
258         int was_read = 0, write_offset = 0;
259         unsigned long flags;
260         struct quadd_ring_buffer *rb = &comm_ctx.rb;
261         struct quadd_record_data record;
262         size_t length_extra = 0, nr_events;
263         struct quadd_sample_data *sample;
264
265         spin_lock_irqsave(&rb->lock, flags);
266
267         if (rb_is_empty(rb)) {
268                 retval = 0;
269                 goto out;
270         }
271
272         if (rb->fill_count < sizeof(record))
273                 goto out;
274
275         if (!rb_read(rb, (char *)&record, sizeof(record)))
276                 goto out;
277
278         was_read += sizeof(record);
279
280         type = record.record_type;
281
282         switch (type) {
283         case QUADD_RECORD_TYPE_SAMPLE:
284                 sample = &record.sample;
285
286                 if (rb->fill_count < sizeof(sed))
287                         goto out;
288
289                 if (!rb_read(rb, (char *)&sed, sizeof(sed)))
290                         goto out;
291
292                 was_read += sizeof(sed);
293
294                 ip_size = (sed & QUADD_SED_IP64) ?
295                         sizeof(u64) : sizeof(u32);
296
297                 length_extra = sample->callchain_nr * ip_size;
298
299                 nr_events = __sw_hweight32(sample->events_flags);
300                 length_extra += nr_events * sizeof(u32);
301
302                 length_extra += sample->state ? sizeof(u32) : 0;
303                 break;
304
305         case QUADD_RECORD_TYPE_MMAP:
306                 length_extra = sizeof(u64) * 2;
307
308                 if (record.mmap.filename_length > 0) {
309                         length_extra += record.mmap.filename_length;
310                 } else {
311                         pr_err("Error: filename is empty\n");
312                         goto out;
313                 }
314                 break;
315
316         case QUADD_RECORD_TYPE_HEADER:
317                 length_extra = record.hdr.nr_events * sizeof(u32);
318                 break;
319
320         case QUADD_RECORD_TYPE_DEBUG:
321                 length_extra = record.debug.extra_length;
322                 break;
323
324         case QUADD_RECORD_TYPE_MA:
325                 length_extra = 0;
326                 break;
327
328         case QUADD_RECORD_TYPE_POWER_RATE:
329                 length_extra = record.power_rate.nr_cpus * sizeof(u32);
330                 break;
331
332         case QUADD_RECORD_TYPE_ADDITIONAL_SAMPLE:
333                 length_extra = record.additional_sample.extra_length;
334                 break;
335
336         default:
337                 goto out;
338         }
339
340         if (was_read + length_extra > max_length) {
341                 retval = rb_read_undo(rb, was_read);
342                 if (retval < 0)
343                         goto out;
344
345                 retval = 0;
346                 goto out;
347         }
348
349         if (length_extra > rb->fill_count)
350                 goto out;
351
352         if (copy_to_user(buffer, &record, sizeof(record)))
353                 goto out_fault_error;
354
355         write_offset += sizeof(record);
356
357         if (type == QUADD_RECORD_TYPE_SAMPLE) {
358                 if (copy_to_user(buffer + write_offset, &sed, sizeof(sed)))
359                         goto out_fault_error;
360
361                 write_offset += sizeof(sed);
362         }
363
364         if (length_extra > 0) {
365                 retval = rb_read_user(rb, buffer + write_offset,
366                                       length_extra);
367                 if (retval <= 0)
368                         goto out;
369
370                 write_offset += length_extra;
371         }
372
373         spin_unlock_irqrestore(&rb->lock, flags);
374         return write_offset;
375
376 out_fault_error:
377         retval = -EFAULT;
378
379 out:
380         spin_unlock_irqrestore(&rb->lock, flags);
381         return retval;
382 }
383
384 static void put_sample(struct quadd_record_data *data,
385                        struct quadd_iovec *vec, int vec_count)
386 {
387         if (!atomic_read(&comm_ctx.active))
388                 return;
389
390         write_sample(data, vec, vec_count);
391 }
392
393 static void comm_reset(void)
394 {
395         unsigned long flags;
396
397         pr_debug("Comm reset\n");
398         spin_lock_irqsave(&comm_ctx.rb.lock, flags);
399         rb_reset(&comm_ctx.rb);
400         spin_unlock_irqrestore(&comm_ctx.rb.lock, flags);
401 }
402
403 static int is_active(void)
404 {
405         return atomic_read(&comm_ctx.active) != 0;
406 }
407
408 static struct quadd_comm_data_interface comm_data = {
409         .put_sample = put_sample,
410         .reset = comm_reset,
411         .is_active = is_active,
412 };
413
414 static int check_access_permission(void)
415 {
416         struct task_struct *task;
417
418         if (capable(CAP_SYS_ADMIN))
419                 return 0;
420
421         if (!comm_ctx.params_ok || comm_ctx.process_pid == 0)
422                 return -EACCES;
423
424         rcu_read_lock();
425         task = pid_task(find_vpid(comm_ctx.process_pid), PIDTYPE_PID);
426         rcu_read_unlock();
427         if (!task)
428                 return -EACCES;
429
430         if (current_fsuid() != task_uid(task) &&
431             task_uid(task) != comm_ctx.debug_app_uid) {
432                 pr_err("Permission denied, owner/task uids: %u/%u\n",
433                            current_fsuid(), task_uid(task));
434                 return -EACCES;
435         }
436         return 0;
437 }
438
439 static struct quadd_extabs_mmap *
440 find_mmap(unsigned long vm_start)
441 {
442         struct quadd_extabs_mmap *entry;
443
444         list_for_each_entry(entry, &comm_ctx.ext_mmaps, list) {
445                 struct vm_area_struct *mmap_vma = entry->mmap_vma;
446                 if (vm_start == mmap_vma->vm_start)
447                         return entry;
448         }
449
450         return NULL;
451 }
452
453 static int device_open(struct inode *inode, struct file *file)
454 {
455         mutex_lock(&comm_ctx.io_mutex);
456         comm_ctx.nr_users++;
457         mutex_unlock(&comm_ctx.io_mutex);
458         return 0;
459 }
460
461 static int device_release(struct inode *inode, struct file *file)
462 {
463         mutex_lock(&comm_ctx.io_mutex);
464         comm_ctx.nr_users--;
465
466         if (comm_ctx.nr_users == 0) {
467                 if (atomic_cmpxchg(&comm_ctx.active, 1, 0)) {
468                         comm_ctx.control->stop();
469                         pr_info("Stop profiling: daemon is closed\n");
470                 }
471         }
472         mutex_unlock(&comm_ctx.io_mutex);
473
474         return 0;
475 }
476
477 static unsigned int
478 device_poll(struct file *file, poll_table *wait)
479 {
480         unsigned int mask = 0;
481         struct quadd_ring_buffer *rb = &comm_ctx.rb;
482
483         poll_wait(file, &comm_ctx.read_wait, wait);
484
485         if (!rb_is_empty_lock(rb))
486                 mask |= POLLIN | POLLRDNORM;
487
488         if (!atomic_read(&comm_ctx.active))
489                 mask |= POLLHUP;
490
491         return mask;
492 }
493
494 static ssize_t
495 device_read(struct file *filp,
496             char __user *buffer,
497             size_t length,
498             loff_t *offset)
499 {
500         int err;
501         ssize_t res;
502         size_t samples_counter = 0;
503         size_t was_read = 0, min_size;
504
505         err = check_access_permission();
506         if (err)
507                 return err;
508
509         mutex_lock(&comm_ctx.io_mutex);
510
511         if (!atomic_read(&comm_ctx.active)) {
512                 mutex_unlock(&comm_ctx.io_mutex);
513                 return -EPIPE;
514         }
515
516         min_size = sizeof(struct quadd_record_data) + sizeof(u32);
517
518         while (was_read + min_size < length) {
519                 res = read_sample(buffer + was_read, length - was_read);
520                 if (res < 0) {
521                         mutex_unlock(&comm_ctx.io_mutex);
522                         pr_err("Error: data is corrupted\n");
523                         return res;
524                 }
525
526                 if (res == 0)
527                         break;
528
529                 was_read += res;
530                 samples_counter++;
531
532                 if (!atomic_read(&comm_ctx.active))
533                         break;
534         }
535
536         mutex_unlock(&comm_ctx.io_mutex);
537         return was_read;
538 }
539
540 static long
541 device_ioctl(struct file *file,
542              unsigned int ioctl_num,
543              unsigned long ioctl_param)
544 {
545         int err = 0;
546         unsigned long flags;
547         u64 *mmap_vm_start;
548         struct quadd_extabs_mmap *mmap;
549         struct quadd_parameters *user_params;
550         struct quadd_comm_cap cap;
551         struct quadd_module_state state;
552         struct quadd_module_version versions;
553         struct quadd_extables extabs;
554         struct quadd_ring_buffer *rb = &comm_ctx.rb;
555
556         if (ioctl_num != IOCTL_SETUP &&
557             ioctl_num != IOCTL_GET_CAP &&
558             ioctl_num != IOCTL_GET_STATE &&
559             ioctl_num != IOCTL_GET_VERSION) {
560                 err = check_access_permission();
561                 if (err)
562                         return err;
563         }
564
565         mutex_lock(&comm_ctx.io_mutex);
566
567         switch (ioctl_num) {
568         case IOCTL_SETUP:
569                 if (atomic_read(&comm_ctx.active)) {
570                         pr_err("error: tegra profiler is active\n");
571                         err = -EBUSY;
572                         goto error_out;
573                 }
574
575                 user_params = vmalloc(sizeof(*user_params));
576                 if (!user_params) {
577                         err = -ENOMEM;
578                         goto error_out;
579                 }
580
581                 if (copy_from_user(user_params, (void __user *)ioctl_param,
582                                    sizeof(struct quadd_parameters))) {
583                         pr_err("setup failed\n");
584                         vfree(user_params);
585                         err = -EFAULT;
586                         goto error_out;
587                 }
588
589                 err = comm_ctx.control->set_parameters(user_params,
590                                                        &comm_ctx.debug_app_uid);
591                 if (err) {
592                         pr_err("error: setup failed\n");
593                         vfree(user_params);
594                         goto error_out;
595                 }
596                 comm_ctx.params_ok = 1;
597                 comm_ctx.process_pid = user_params->pids[0];
598
599                 if (user_params->reserved[QUADD_PARAM_IDX_SIZE_OF_RB] == 0) {
600                         pr_err("error: too old version of daemon\n");
601                         vfree(user_params);
602                         err = -EINVAL;
603                         goto error_out;
604                 }
605                 comm_ctx.rb_size = user_params->reserved[0];
606
607                 pr_info("setup success: freq/mafreq: %u/%u, backtrace: %d, pid: %d\n",
608                         user_params->freq,
609                         user_params->ma_freq,
610                         user_params->backtrace,
611                         user_params->pids[0]);
612
613                 vfree(user_params);
614                 break;
615
616         case IOCTL_GET_CAP:
617                 comm_ctx.control->get_capabilities(&cap);
618                 if (copy_to_user((void __user *)ioctl_param, &cap,
619                                  sizeof(struct quadd_comm_cap))) {
620                         pr_err("error: get_capabilities failed\n");
621                         err = -EFAULT;
622                         goto error_out;
623                 }
624                 break;
625
626         case IOCTL_GET_VERSION:
627                 strcpy((char *)versions.branch, QUADD_MODULE_BRANCH);
628                 strcpy((char *)versions.version, QUADD_MODULE_VERSION);
629
630                 versions.samples_version = QUADD_SAMPLES_VERSION;
631                 versions.io_version = QUADD_IO_VERSION;
632
633                 if (copy_to_user((void __user *)ioctl_param, &versions,
634                                  sizeof(struct quadd_module_version))) {
635                         pr_err("error: get version failed\n");
636                         err = -EFAULT;
637                         goto error_out;
638                 }
639                 break;
640
641         case IOCTL_GET_STATE:
642                 comm_ctx.control->get_state(&state);
643
644                 state.buffer_size = comm_ctx.rb_size;
645
646                 spin_lock_irqsave(&rb->lock, flags);
647                 state.buffer_fill_size =
648                         comm_ctx.rb_size - rb_get_free_space(rb);
649                 state.reserved[QUADD_MOD_STATE_IDX_RB_MAX_FILL_COUNT] =
650                         rb->max_fill_count;
651                 spin_unlock_irqrestore(&rb->lock, flags);
652
653                 if (copy_to_user((void __user *)ioctl_param, &state,
654                                  sizeof(struct quadd_module_state))) {
655                         pr_err("error: get_state failed\n");
656                         err = -EFAULT;
657                         goto error_out;
658                 }
659                 break;
660
661         case IOCTL_START:
662                 if (!atomic_cmpxchg(&comm_ctx.active, 0, 1)) {
663                         if (!comm_ctx.params_ok) {
664                                 pr_err("error: params failed\n");
665                                 atomic_set(&comm_ctx.active, 0);
666                                 err = -EFAULT;
667                                 goto error_out;
668                         }
669
670                         err = rb_init(rb, comm_ctx.rb_size);
671                         if (err) {
672                                 pr_err("error: rb_init failed\n");
673                                 atomic_set(&comm_ctx.active, 0);
674                                 goto error_out;
675                         }
676
677                         err = comm_ctx.control->start();
678                         if (err) {
679                                 pr_err("error: start failed\n");
680                                 atomic_set(&comm_ctx.active, 0);
681                                 goto error_out;
682                         }
683                         pr_info("Start profiling success\n");
684                 }
685                 break;
686
687         case IOCTL_STOP:
688                 if (atomic_cmpxchg(&comm_ctx.active, 1, 0)) {
689                         comm_ctx.control->stop();
690                         wake_up_interruptible(&comm_ctx.read_wait);
691                         rb_deinit(&comm_ctx.rb);
692                         pr_info("Stop profiling success\n");
693                 }
694                 break;
695
696         case IOCTL_SET_EXTAB:
697                 if (copy_from_user(&extabs, (void __user *)ioctl_param,
698                                    sizeof(extabs))) {
699                         pr_err("error: set_extab failed\n");
700                         err = -EFAULT;
701                         goto error_out;
702                 }
703
704                 mmap_vm_start = (u64 *)
705                         &extabs.reserved[QUADD_EXT_IDX_MMAP_VM_START];
706
707                 spin_lock(&comm_ctx.mmaps_lock);
708                 mmap = find_mmap((unsigned long)*mmap_vm_start);
709                 if (!mmap) {
710                         pr_err("%s: error: mmap is not found\n", __func__);
711                         err = -ENXIO;
712                         spin_unlock(&comm_ctx.mmaps_lock);
713                         goto error_out;
714                 }
715
716                 err = comm_ctx.control->set_extab(&extabs, mmap);
717                 spin_unlock(&comm_ctx.mmaps_lock);
718                 if (err) {
719                         pr_err("error: set_extab\n");
720                         goto error_out;
721                 }
722                 break;
723
724         default:
725                 pr_err("error: ioctl %u is unsupported in this version of module\n",
726                        ioctl_num);
727                 err = -EFAULT;
728                 goto error_out;
729         }
730
731 error_out:
732         mutex_unlock(&comm_ctx.io_mutex);
733         return err;
734 }
735
736 static void
737 delete_mmap(struct quadd_extabs_mmap *mmap)
738 {
739         struct quadd_extabs_mmap *entry, *next;
740
741         list_for_each_entry_safe(entry, next, &comm_ctx.ext_mmaps, list) {
742                 if (entry == mmap) {
743                         list_del(&entry->list);
744                         vfree(entry->data);
745                         kfree(entry);
746                         break;
747                 }
748         }
749 }
750
751 static void mmap_open(struct vm_area_struct *vma)
752 {
753 }
754
755 static void mmap_close(struct vm_area_struct *vma)
756 {
757         struct quadd_extabs_mmap *mmap;
758
759         pr_debug("mmap_close: vma: %#lx - %#lx\n",
760                  vma->vm_start, vma->vm_end);
761
762         spin_lock(&comm_ctx.mmaps_lock);
763
764         mmap = find_mmap(vma->vm_start);
765         if (!mmap) {
766                 pr_err("%s: error: mmap is not found\n", __func__);
767                 goto out;
768         }
769
770         comm_ctx.control->delete_mmap(mmap);
771         delete_mmap(mmap);
772
773 out:
774         spin_unlock(&comm_ctx.mmaps_lock);
775 }
776
777 static int mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
778 {
779         void *data;
780         struct quadd_extabs_mmap *mmap;
781         unsigned long offset = vmf->pgoff << PAGE_SHIFT;
782
783         pr_debug("mmap_fault: vma: %#lx - %#lx, pgoff: %#lx, vaddr: %p\n",
784                  vma->vm_start, vma->vm_end, vmf->pgoff, vmf->virtual_address);
785
786         spin_lock(&comm_ctx.mmaps_lock);
787
788         mmap = find_mmap(vma->vm_start);
789         if (!mmap) {
790                 spin_unlock(&comm_ctx.mmaps_lock);
791                 return VM_FAULT_SIGBUS;
792         }
793
794         data = mmap->data;
795
796         vmf->page = vmalloc_to_page(data + offset);
797         get_page(vmf->page);
798
799         spin_unlock(&comm_ctx.mmaps_lock);
800         return 0;
801 }
802
803 static struct vm_operations_struct mmap_vm_ops = {
804         .open   = mmap_open,
805         .close  = mmap_close,
806         .fault  = mmap_fault,
807 };
808
809 static int
810 device_mmap(struct file *filp, struct vm_area_struct *vma)
811 {
812         unsigned long vma_size, nr_pages;
813         struct quadd_extabs_mmap *entry;
814
815         pr_debug("mmap: vma: %#lx - %#lx, pgoff: %#lx\n",
816                  vma->vm_start, vma->vm_end, vma->vm_pgoff);
817
818         if (vma->vm_pgoff != 0)
819                 return -EINVAL;
820
821         vma->vm_private_data = filp->private_data;
822
823         vma_size = vma->vm_end - vma->vm_start;
824         nr_pages = vma_size / PAGE_SIZE;
825
826         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
827         if (!entry)
828                 return -ENOMEM;
829
830         entry->mmap_vma = vma;
831
832         INIT_LIST_HEAD(&entry->list);
833         INIT_LIST_HEAD(&entry->ex_entries);
834
835         entry->data = vmalloc_user(nr_pages * PAGE_SIZE);
836         if (!entry->data) {
837                 pr_err("%s: error: vmalloc_user", __func__);
838                 kfree(entry);
839                 return -ENOMEM;
840         }
841
842         spin_lock(&comm_ctx.mmaps_lock);
843         list_add_tail(&entry->list, &comm_ctx.ext_mmaps);
844         spin_unlock(&comm_ctx.mmaps_lock);
845
846         vma->vm_ops = &mmap_vm_ops;
847         vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
848
849         vma->vm_ops->open(vma);
850
851         return 0;
852 }
853
854 static void unregister(void)
855 {
856         misc_deregister(comm_ctx.misc_dev);
857         kfree(comm_ctx.misc_dev);
858 }
859
860 static void free_ctx(void)
861 {
862         rb_deinit(&comm_ctx.rb);
863 }
864
865 static const struct file_operations qm_fops = {
866         .read           = device_read,
867         .poll           = device_poll,
868         .open           = device_open,
869         .release        = device_release,
870         .unlocked_ioctl = device_ioctl,
871         .compat_ioctl   = device_ioctl,
872         .mmap           = device_mmap,
873 };
874
875 static int comm_init(void)
876 {
877         int res;
878         struct miscdevice *misc_dev;
879
880         misc_dev = kzalloc(sizeof(*misc_dev), GFP_KERNEL);
881         if (!misc_dev) {
882                 pr_err("Error: alloc error\n");
883                 return -ENOMEM;
884         }
885
886         misc_dev->minor = MISC_DYNAMIC_MINOR;
887         misc_dev->name = QUADD_DEVICE_NAME;
888         misc_dev->fops = &qm_fops;
889
890         res = misc_register(misc_dev);
891         if (res < 0) {
892                 pr_err("Error: misc_register: %d\n", res);
893                 kfree(misc_dev);
894                 return res;
895         }
896         comm_ctx.misc_dev = misc_dev;
897
898         mutex_init(&comm_ctx.io_mutex);
899         atomic_set(&comm_ctx.active, 0);
900
901         comm_ctx.params_ok = 0;
902         comm_ctx.process_pid = 0;
903         comm_ctx.nr_users = 0;
904
905         init_waitqueue_head(&comm_ctx.read_wait);
906
907         INIT_LIST_HEAD(&comm_ctx.ext_mmaps);
908         spin_lock_init(&comm_ctx.mmaps_lock);
909
910         return 0;
911 }
912
913 struct quadd_comm_data_interface *
914 quadd_comm_events_init(struct quadd_comm_control_interface *control)
915 {
916         int err;
917
918         err = comm_init();
919         if (err < 0)
920                 return ERR_PTR(err);
921
922         comm_ctx.control = control;
923         return &comm_data;
924 }
925
926 void quadd_comm_events_exit(void)
927 {
928         mutex_lock(&comm_ctx.io_mutex);
929         unregister();
930         free_ctx();
931         mutex_unlock(&comm_ctx.io_mutex);
932 }