]> rtime.felk.cvut.cz Git - linux-imx.git/blob - mm/nommu.c
mm/nommu.c: fix remap_pfn_range()
[linux-imx.git] / mm / nommu.c
1 /*
2  *  linux/mm/nommu.c
3  *
4  *  Replacement code for mm functions to support CPU's that don't
5  *  have any form of memory management unit (thus no virtual memory).
6  *
7  *  See Documentation/nommu-mmap.txt
8  *
9  *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
10  *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
11  *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
12  *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
13  *  Copyright (c) 2007-2009 Paul Mundt <lethal@linux-sh.org>
14  */
15
16 #include <linux/module.h>
17 #include <linux/mm.h>
18 #include <linux/mman.h>
19 #include <linux/swap.h>
20 #include <linux/file.h>
21 #include <linux/highmem.h>
22 #include <linux/pagemap.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/tracehook.h>
26 #include <linux/blkdev.h>
27 #include <linux/backing-dev.h>
28 #include <linux/mount.h>
29 #include <linux/personality.h>
30 #include <linux/security.h>
31 #include <linux/syscalls.h>
32
33 #include <asm/uaccess.h>
34 #include <asm/tlb.h>
35 #include <asm/tlbflush.h>
36 #include <asm/mmu_context.h>
37 #include "internal.h"
38
39 static inline __attribute__((format(printf, 1, 2)))
40 void no_printk(const char *fmt, ...)
41 {
42 }
43
44 #if 0
45 #define kenter(FMT, ...) \
46         printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
47 #define kleave(FMT, ...) \
48         printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
49 #define kdebug(FMT, ...) \
50         printk(KERN_DEBUG "xxx" FMT"yyy\n", ##__VA_ARGS__)
51 #else
52 #define kenter(FMT, ...) \
53         no_printk(KERN_DEBUG "==> %s("FMT")\n", __func__, ##__VA_ARGS__)
54 #define kleave(FMT, ...) \
55         no_printk(KERN_DEBUG "<== %s()"FMT"\n", __func__, ##__VA_ARGS__)
56 #define kdebug(FMT, ...) \
57         no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
58 #endif
59
60 void *high_memory;
61 struct page *mem_map;
62 unsigned long max_mapnr;
63 unsigned long num_physpages;
64 unsigned long highest_memmap_pfn;
65 struct percpu_counter vm_committed_as;
66 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
67 int sysctl_overcommit_ratio = 50; /* default is 50% */
68 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
69 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
70 int heap_stack_gap = 0;
71
72 atomic_long_t mmap_pages_allocated;
73
74 EXPORT_SYMBOL(mem_map);
75 EXPORT_SYMBOL(num_physpages);
76
77 /* list of mapped, potentially shareable regions */
78 static struct kmem_cache *vm_region_jar;
79 struct rb_root nommu_region_tree = RB_ROOT;
80 DECLARE_RWSEM(nommu_region_sem);
81
82 const struct vm_operations_struct generic_file_vm_ops = {
83 };
84
85 /*
86  * Return the total memory allocated for this pointer, not
87  * just what the caller asked for.
88  *
89  * Doesn't have to be accurate, i.e. may have races.
90  */
91 unsigned int kobjsize(const void *objp)
92 {
93         struct page *page;
94
95         /*
96          * If the object we have should not have ksize performed on it,
97          * return size of 0
98          */
99         if (!objp || !virt_addr_valid(objp))
100                 return 0;
101
102         page = virt_to_head_page(objp);
103
104         /*
105          * If the allocator sets PageSlab, we know the pointer came from
106          * kmalloc().
107          */
108         if (PageSlab(page))
109                 return ksize(objp);
110
111         /*
112          * If it's not a compound page, see if we have a matching VMA
113          * region. This test is intentionally done in reverse order,
114          * so if there's no VMA, we still fall through and hand back
115          * PAGE_SIZE for 0-order pages.
116          */
117         if (!PageCompound(page)) {
118                 struct vm_area_struct *vma;
119
120                 vma = find_vma(current->mm, (unsigned long)objp);
121                 if (vma)
122                         return vma->vm_end - vma->vm_start;
123         }
124
125         /*
126          * The ksize() function is only guaranteed to work for pointers
127          * returned by kmalloc(). So handle arbitrary pointers here.
128          */
129         return PAGE_SIZE << compound_order(page);
130 }
131
132 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
133                      unsigned long start, int nr_pages, unsigned int foll_flags,
134                      struct page **pages, struct vm_area_struct **vmas)
135 {
136         struct vm_area_struct *vma;
137         unsigned long vm_flags;
138         int i;
139
140         /* calculate required read or write permissions.
141          * If FOLL_FORCE is set, we only require the "MAY" flags.
142          */
143         vm_flags  = (foll_flags & FOLL_WRITE) ?
144                         (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
145         vm_flags &= (foll_flags & FOLL_FORCE) ?
146                         (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
147
148         for (i = 0; i < nr_pages; i++) {
149                 vma = find_vma(mm, start);
150                 if (!vma)
151                         goto finish_or_fault;
152
153                 /* protect what we can, including chardevs */
154                 if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
155                     !(vm_flags & vma->vm_flags))
156                         goto finish_or_fault;
157
158                 if (pages) {
159                         pages[i] = virt_to_page(start);
160                         if (pages[i])
161                                 page_cache_get(pages[i]);
162                 }
163                 if (vmas)
164                         vmas[i] = vma;
165                 start = (start + PAGE_SIZE) & PAGE_MASK;
166         }
167
168         return i;
169
170 finish_or_fault:
171         return i ? : -EFAULT;
172 }
173
174 /*
175  * get a list of pages in an address range belonging to the specified process
176  * and indicate the VMA that covers each page
177  * - this is potentially dodgy as we may end incrementing the page count of a
178  *   slab page or a secondary page from a compound page
179  * - don't permit access to VMAs that don't support it, such as I/O mappings
180  */
181 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
182         unsigned long start, int nr_pages, int write, int force,
183         struct page **pages, struct vm_area_struct **vmas)
184 {
185         int flags = 0;
186
187         if (write)
188                 flags |= FOLL_WRITE;
189         if (force)
190                 flags |= FOLL_FORCE;
191
192         return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
193 }
194 EXPORT_SYMBOL(get_user_pages);
195
196 /**
197  * follow_pfn - look up PFN at a user virtual address
198  * @vma: memory mapping
199  * @address: user virtual address
200  * @pfn: location to store found PFN
201  *
202  * Only IO mappings and raw PFN mappings are allowed.
203  *
204  * Returns zero and the pfn at @pfn on success, -ve otherwise.
205  */
206 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
207         unsigned long *pfn)
208 {
209         if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
210                 return -EINVAL;
211
212         *pfn = address >> PAGE_SHIFT;
213         return 0;
214 }
215 EXPORT_SYMBOL(follow_pfn);
216
217 DEFINE_RWLOCK(vmlist_lock);
218 struct vm_struct *vmlist;
219
220 void vfree(const void *addr)
221 {
222         kfree(addr);
223 }
224 EXPORT_SYMBOL(vfree);
225
226 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
227 {
228         /*
229          *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
230          * returns only a logical address.
231          */
232         return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
233 }
234 EXPORT_SYMBOL(__vmalloc);
235
236 void *vmalloc_user(unsigned long size)
237 {
238         void *ret;
239
240         ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
241                         PAGE_KERNEL);
242         if (ret) {
243                 struct vm_area_struct *vma;
244
245                 down_write(&current->mm->mmap_sem);
246                 vma = find_vma(current->mm, (unsigned long)ret);
247                 if (vma)
248                         vma->vm_flags |= VM_USERMAP;
249                 up_write(&current->mm->mmap_sem);
250         }
251
252         return ret;
253 }
254 EXPORT_SYMBOL(vmalloc_user);
255
256 struct page *vmalloc_to_page(const void *addr)
257 {
258         return virt_to_page(addr);
259 }
260 EXPORT_SYMBOL(vmalloc_to_page);
261
262 unsigned long vmalloc_to_pfn(const void *addr)
263 {
264         return page_to_pfn(virt_to_page(addr));
265 }
266 EXPORT_SYMBOL(vmalloc_to_pfn);
267
268 long vread(char *buf, char *addr, unsigned long count)
269 {
270         memcpy(buf, addr, count);
271         return count;
272 }
273
274 long vwrite(char *buf, char *addr, unsigned long count)
275 {
276         /* Don't allow overflow */
277         if ((unsigned long) addr + count < count)
278                 count = -(unsigned long) addr;
279
280         memcpy(addr, buf, count);
281         return(count);
282 }
283
284 /*
285  *      vmalloc  -  allocate virtually continguos memory
286  *
287  *      @size:          allocation size
288  *
289  *      Allocate enough pages to cover @size from the page level
290  *      allocator and map them into continguos kernel virtual space.
291  *
292  *      For tight control over page level allocator and protection flags
293  *      use __vmalloc() instead.
294  */
295 void *vmalloc(unsigned long size)
296 {
297        return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
298 }
299 EXPORT_SYMBOL(vmalloc);
300
301 void *vmalloc_node(unsigned long size, int node)
302 {
303         return vmalloc(size);
304 }
305 EXPORT_SYMBOL(vmalloc_node);
306
307 #ifndef PAGE_KERNEL_EXEC
308 # define PAGE_KERNEL_EXEC PAGE_KERNEL
309 #endif
310
311 /**
312  *      vmalloc_exec  -  allocate virtually contiguous, executable memory
313  *      @size:          allocation size
314  *
315  *      Kernel-internal function to allocate enough pages to cover @size
316  *      the page level allocator and map them into contiguous and
317  *      executable kernel virtual space.
318  *
319  *      For tight control over page level allocator and protection flags
320  *      use __vmalloc() instead.
321  */
322
323 void *vmalloc_exec(unsigned long size)
324 {
325         return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
326 }
327
328 /**
329  * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
330  *      @size:          allocation size
331  *
332  *      Allocate enough 32bit PA addressable pages to cover @size from the
333  *      page level allocator and map them into continguos kernel virtual space.
334  */
335 void *vmalloc_32(unsigned long size)
336 {
337         return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
338 }
339 EXPORT_SYMBOL(vmalloc_32);
340
341 /**
342  * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
343  *      @size:          allocation size
344  *
345  * The resulting memory area is 32bit addressable and zeroed so it can be
346  * mapped to userspace without leaking data.
347  *
348  * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
349  * remap_vmalloc_range() are permissible.
350  */
351 void *vmalloc_32_user(unsigned long size)
352 {
353         /*
354          * We'll have to sort out the ZONE_DMA bits for 64-bit,
355          * but for now this can simply use vmalloc_user() directly.
356          */
357         return vmalloc_user(size);
358 }
359 EXPORT_SYMBOL(vmalloc_32_user);
360
361 void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
362 {
363         BUG();
364         return NULL;
365 }
366 EXPORT_SYMBOL(vmap);
367
368 void vunmap(const void *addr)
369 {
370         BUG();
371 }
372 EXPORT_SYMBOL(vunmap);
373
374 void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
375 {
376         BUG();
377         return NULL;
378 }
379 EXPORT_SYMBOL(vm_map_ram);
380
381 void vm_unmap_ram(const void *mem, unsigned int count)
382 {
383         BUG();
384 }
385 EXPORT_SYMBOL(vm_unmap_ram);
386
387 void vm_unmap_aliases(void)
388 {
389 }
390 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
391
392 /*
393  * Implement a stub for vmalloc_sync_all() if the architecture chose not to
394  * have one.
395  */
396 void  __attribute__((weak)) vmalloc_sync_all(void)
397 {
398 }
399
400 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
401                    struct page *page)
402 {
403         return -EINVAL;
404 }
405 EXPORT_SYMBOL(vm_insert_page);
406
407 /*
408  *  sys_brk() for the most part doesn't need the global kernel
409  *  lock, except when an application is doing something nasty
410  *  like trying to un-brk an area that has already been mapped
411  *  to a regular file.  in this case, the unmapping will need
412  *  to invoke file system routines that need the global lock.
413  */
414 SYSCALL_DEFINE1(brk, unsigned long, brk)
415 {
416         struct mm_struct *mm = current->mm;
417
418         if (brk < mm->start_brk || brk > mm->context.end_brk)
419                 return mm->brk;
420
421         if (mm->brk == brk)
422                 return mm->brk;
423
424         /*
425          * Always allow shrinking brk
426          */
427         if (brk <= mm->brk) {
428                 mm->brk = brk;
429                 return brk;
430         }
431
432         /*
433          * Ok, looks good - let it rip.
434          */
435         flush_icache_range(mm->brk, brk);
436         return mm->brk = brk;
437 }
438
439 /*
440  * initialise the VMA and region record slabs
441  */
442 void __init mmap_init(void)
443 {
444         int ret;
445
446         ret = percpu_counter_init(&vm_committed_as, 0);
447         VM_BUG_ON(ret);
448         vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC);
449 }
450
451 /*
452  * validate the region tree
453  * - the caller must hold the region lock
454  */
455 #ifdef CONFIG_DEBUG_NOMMU_REGIONS
456 static noinline void validate_nommu_regions(void)
457 {
458         struct vm_region *region, *last;
459         struct rb_node *p, *lastp;
460
461         lastp = rb_first(&nommu_region_tree);
462         if (!lastp)
463                 return;
464
465         last = rb_entry(lastp, struct vm_region, vm_rb);
466         BUG_ON(unlikely(last->vm_end <= last->vm_start));
467         BUG_ON(unlikely(last->vm_top < last->vm_end));
468
469         while ((p = rb_next(lastp))) {
470                 region = rb_entry(p, struct vm_region, vm_rb);
471                 last = rb_entry(lastp, struct vm_region, vm_rb);
472
473                 BUG_ON(unlikely(region->vm_end <= region->vm_start));
474                 BUG_ON(unlikely(region->vm_top < region->vm_end));
475                 BUG_ON(unlikely(region->vm_start < last->vm_top));
476
477                 lastp = p;
478         }
479 }
480 #else
481 static void validate_nommu_regions(void)
482 {
483 }
484 #endif
485
486 /*
487  * add a region into the global tree
488  */
489 static void add_nommu_region(struct vm_region *region)
490 {
491         struct vm_region *pregion;
492         struct rb_node **p, *parent;
493
494         validate_nommu_regions();
495
496         parent = NULL;
497         p = &nommu_region_tree.rb_node;
498         while (*p) {
499                 parent = *p;
500                 pregion = rb_entry(parent, struct vm_region, vm_rb);
501                 if (region->vm_start < pregion->vm_start)
502                         p = &(*p)->rb_left;
503                 else if (region->vm_start > pregion->vm_start)
504                         p = &(*p)->rb_right;
505                 else if (pregion == region)
506                         return;
507                 else
508                         BUG();
509         }
510
511         rb_link_node(&region->vm_rb, parent, p);
512         rb_insert_color(&region->vm_rb, &nommu_region_tree);
513
514         validate_nommu_regions();
515 }
516
517 /*
518  * delete a region from the global tree
519  */
520 static void delete_nommu_region(struct vm_region *region)
521 {
522         BUG_ON(!nommu_region_tree.rb_node);
523
524         validate_nommu_regions();
525         rb_erase(&region->vm_rb, &nommu_region_tree);
526         validate_nommu_regions();
527 }
528
529 /*
530  * free a contiguous series of pages
531  */
532 static void free_page_series(unsigned long from, unsigned long to)
533 {
534         for (; from < to; from += PAGE_SIZE) {
535                 struct page *page = virt_to_page(from);
536
537                 kdebug("- free %lx", from);
538                 atomic_long_dec(&mmap_pages_allocated);
539                 if (page_count(page) != 1)
540                         kdebug("free page %p: refcount not one: %d",
541                                page, page_count(page));
542                 put_page(page);
543         }
544 }
545
546 /*
547  * release a reference to a region
548  * - the caller must hold the region semaphore for writing, which this releases
549  * - the region may not have been added to the tree yet, in which case vm_top
550  *   will equal vm_start
551  */
552 static void __put_nommu_region(struct vm_region *region)
553         __releases(nommu_region_sem)
554 {
555         kenter("%p{%d}", region, region->vm_usage);
556
557         BUG_ON(!nommu_region_tree.rb_node);
558
559         if (--region->vm_usage == 0) {
560                 if (region->vm_top > region->vm_start)
561                         delete_nommu_region(region);
562                 up_write(&nommu_region_sem);
563
564                 if (region->vm_file)
565                         fput(region->vm_file);
566
567                 /* IO memory and memory shared directly out of the pagecache
568                  * from ramfs/tmpfs mustn't be released here */
569                 if (region->vm_flags & VM_MAPPED_COPY) {
570                         kdebug("free series");
571                         free_page_series(region->vm_start, region->vm_top);
572                 }
573                 kmem_cache_free(vm_region_jar, region);
574         } else {
575                 up_write(&nommu_region_sem);
576         }
577 }
578
579 /*
580  * release a reference to a region
581  */
582 static void put_nommu_region(struct vm_region *region)
583 {
584         down_write(&nommu_region_sem);
585         __put_nommu_region(region);
586 }
587
588 /*
589  * update protection on a vma
590  */
591 static void protect_vma(struct vm_area_struct *vma, unsigned long flags)
592 {
593 #ifdef CONFIG_MPU
594         struct mm_struct *mm = vma->vm_mm;
595         long start = vma->vm_start & PAGE_MASK;
596         while (start < vma->vm_end) {
597                 protect_page(mm, start, flags);
598                 start += PAGE_SIZE;
599         }
600         update_protections(mm);
601 #endif
602 }
603
604 /*
605  * add a VMA into a process's mm_struct in the appropriate place in the list
606  * and tree and add to the address space's page tree also if not an anonymous
607  * page
608  * - should be called with mm->mmap_sem held writelocked
609  */
610 static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
611 {
612         struct vm_area_struct *pvma, **pp, *next;
613         struct address_space *mapping;
614         struct rb_node **p, *parent;
615
616         kenter(",%p", vma);
617
618         BUG_ON(!vma->vm_region);
619
620         mm->map_count++;
621         vma->vm_mm = mm;
622
623         protect_vma(vma, vma->vm_flags);
624
625         /* add the VMA to the mapping */
626         if (vma->vm_file) {
627                 mapping = vma->vm_file->f_mapping;
628
629                 flush_dcache_mmap_lock(mapping);
630                 vma_prio_tree_insert(vma, &mapping->i_mmap);
631                 flush_dcache_mmap_unlock(mapping);
632         }
633
634         /* add the VMA to the tree */
635         parent = NULL;
636         p = &mm->mm_rb.rb_node;
637         while (*p) {
638                 parent = *p;
639                 pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
640
641                 /* sort by: start addr, end addr, VMA struct addr in that order
642                  * (the latter is necessary as we may get identical VMAs) */
643                 if (vma->vm_start < pvma->vm_start)
644                         p = &(*p)->rb_left;
645                 else if (vma->vm_start > pvma->vm_start)
646                         p = &(*p)->rb_right;
647                 else if (vma->vm_end < pvma->vm_end)
648                         p = &(*p)->rb_left;
649                 else if (vma->vm_end > pvma->vm_end)
650                         p = &(*p)->rb_right;
651                 else if (vma < pvma)
652                         p = &(*p)->rb_left;
653                 else if (vma > pvma)
654                         p = &(*p)->rb_right;
655                 else
656                         BUG();
657         }
658
659         rb_link_node(&vma->vm_rb, parent, p);
660         rb_insert_color(&vma->vm_rb, &mm->mm_rb);
661
662         /* add VMA to the VMA list also */
663         for (pp = &mm->mmap; (pvma = *pp); pp = &(*pp)->vm_next) {
664                 if (pvma->vm_start > vma->vm_start)
665                         break;
666                 if (pvma->vm_start < vma->vm_start)
667                         continue;
668                 if (pvma->vm_end < vma->vm_end)
669                         break;
670         }
671
672         next = *pp;
673         *pp = vma;
674         vma->vm_next = next;
675         if (next)
676                 next->vm_prev = vma;
677 }
678
679 /*
680  * delete a VMA from its owning mm_struct and address space
681  */
682 static void delete_vma_from_mm(struct vm_area_struct *vma)
683 {
684         struct vm_area_struct **pp;
685         struct address_space *mapping;
686         struct mm_struct *mm = vma->vm_mm;
687
688         kenter("%p", vma);
689
690         protect_vma(vma, 0);
691
692         mm->map_count--;
693         if (mm->mmap_cache == vma)
694                 mm->mmap_cache = NULL;
695
696         /* remove the VMA from the mapping */
697         if (vma->vm_file) {
698                 mapping = vma->vm_file->f_mapping;
699
700                 flush_dcache_mmap_lock(mapping);
701                 vma_prio_tree_remove(vma, &mapping->i_mmap);
702                 flush_dcache_mmap_unlock(mapping);
703         }
704
705         /* remove from the MM's tree and list */
706         rb_erase(&vma->vm_rb, &mm->mm_rb);
707         for (pp = &mm->mmap; *pp; pp = &(*pp)->vm_next) {
708                 if (*pp == vma) {
709                         *pp = vma->vm_next;
710                         break;
711                 }
712         }
713
714         vma->vm_mm = NULL;
715 }
716
717 /*
718  * destroy a VMA record
719  */
720 static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
721 {
722         kenter("%p", vma);
723         if (vma->vm_ops && vma->vm_ops->close)
724                 vma->vm_ops->close(vma);
725         if (vma->vm_file) {
726                 fput(vma->vm_file);
727                 if (vma->vm_flags & VM_EXECUTABLE)
728                         removed_exe_file_vma(mm);
729         }
730         put_nommu_region(vma->vm_region);
731         kmem_cache_free(vm_area_cachep, vma);
732 }
733
734 /*
735  * look up the first VMA in which addr resides, NULL if none
736  * - should be called with mm->mmap_sem at least held readlocked
737  */
738 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
739 {
740         struct vm_area_struct *vma;
741         struct rb_node *n = mm->mm_rb.rb_node;
742
743         /* check the cache first */
744         vma = mm->mmap_cache;
745         if (vma && vma->vm_start <= addr && vma->vm_end > addr)
746                 return vma;
747
748         /* trawl the tree (there may be multiple mappings in which addr
749          * resides) */
750         for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
751                 vma = rb_entry(n, struct vm_area_struct, vm_rb);
752                 if (vma->vm_start > addr)
753                         return NULL;
754                 if (vma->vm_end > addr) {
755                         mm->mmap_cache = vma;
756                         return vma;
757                 }
758         }
759
760         return NULL;
761 }
762 EXPORT_SYMBOL(find_vma);
763
764 /*
765  * find a VMA
766  * - we don't extend stack VMAs under NOMMU conditions
767  */
768 struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
769 {
770         return find_vma(mm, addr);
771 }
772
773 /*
774  * expand a stack to a given address
775  * - not supported under NOMMU conditions
776  */
777 int expand_stack(struct vm_area_struct *vma, unsigned long address)
778 {
779         return -ENOMEM;
780 }
781
782 /*
783  * look up the first VMA exactly that exactly matches addr
784  * - should be called with mm->mmap_sem at least held readlocked
785  */
786 static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
787                                              unsigned long addr,
788                                              unsigned long len)
789 {
790         struct vm_area_struct *vma;
791         struct rb_node *n = mm->mm_rb.rb_node;
792         unsigned long end = addr + len;
793
794         /* check the cache first */
795         vma = mm->mmap_cache;
796         if (vma && vma->vm_start == addr && vma->vm_end == end)
797                 return vma;
798
799         /* trawl the tree (there may be multiple mappings in which addr
800          * resides) */
801         for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
802                 vma = rb_entry(n, struct vm_area_struct, vm_rb);
803                 if (vma->vm_start < addr)
804                         continue;
805                 if (vma->vm_start > addr)
806                         return NULL;
807                 if (vma->vm_end == end) {
808                         mm->mmap_cache = vma;
809                         return vma;
810                 }
811         }
812
813         return NULL;
814 }
815
816 /*
817  * determine whether a mapping should be permitted and, if so, what sort of
818  * mapping we're capable of supporting
819  */
820 static int validate_mmap_request(struct file *file,
821                                  unsigned long addr,
822                                  unsigned long len,
823                                  unsigned long prot,
824                                  unsigned long flags,
825                                  unsigned long pgoff,
826                                  unsigned long *_capabilities)
827 {
828         unsigned long capabilities, rlen;
829         unsigned long reqprot = prot;
830         int ret;
831
832         /* do the simple checks first */
833         if (flags & MAP_FIXED) {
834                 printk(KERN_DEBUG
835                        "%d: Can't do fixed-address/overlay mmap of RAM\n",
836                        current->pid);
837                 return -EINVAL;
838         }
839
840         if ((flags & MAP_TYPE) != MAP_PRIVATE &&
841             (flags & MAP_TYPE) != MAP_SHARED)
842                 return -EINVAL;
843
844         if (!len)
845                 return -EINVAL;
846
847         /* Careful about overflows.. */
848         rlen = PAGE_ALIGN(len);
849         if (!rlen || rlen > TASK_SIZE)
850                 return -ENOMEM;
851
852         /* offset overflow? */
853         if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
854                 return -EOVERFLOW;
855
856         if (file) {
857                 /* validate file mapping requests */
858                 struct address_space *mapping;
859
860                 /* files must support mmap */
861                 if (!file->f_op || !file->f_op->mmap)
862                         return -ENODEV;
863
864                 /* work out if what we've got could possibly be shared
865                  * - we support chardevs that provide their own "memory"
866                  * - we support files/blockdevs that are memory backed
867                  */
868                 mapping = file->f_mapping;
869                 if (!mapping)
870                         mapping = file->f_path.dentry->d_inode->i_mapping;
871
872                 capabilities = 0;
873                 if (mapping && mapping->backing_dev_info)
874                         capabilities = mapping->backing_dev_info->capabilities;
875
876                 if (!capabilities) {
877                         /* no explicit capabilities set, so assume some
878                          * defaults */
879                         switch (file->f_path.dentry->d_inode->i_mode & S_IFMT) {
880                         case S_IFREG:
881                         case S_IFBLK:
882                                 capabilities = BDI_CAP_MAP_COPY;
883                                 break;
884
885                         case S_IFCHR:
886                                 capabilities =
887                                         BDI_CAP_MAP_DIRECT |
888                                         BDI_CAP_READ_MAP |
889                                         BDI_CAP_WRITE_MAP;
890                                 break;
891
892                         default:
893                                 return -EINVAL;
894                         }
895                 }
896
897                 /* eliminate any capabilities that we can't support on this
898                  * device */
899                 if (!file->f_op->get_unmapped_area)
900                         capabilities &= ~BDI_CAP_MAP_DIRECT;
901                 if (!file->f_op->read)
902                         capabilities &= ~BDI_CAP_MAP_COPY;
903
904                 /* The file shall have been opened with read permission. */
905                 if (!(file->f_mode & FMODE_READ))
906                         return -EACCES;
907
908                 if (flags & MAP_SHARED) {
909                         /* do checks for writing, appending and locking */
910                         if ((prot & PROT_WRITE) &&
911                             !(file->f_mode & FMODE_WRITE))
912                                 return -EACCES;
913
914                         if (IS_APPEND(file->f_path.dentry->d_inode) &&
915                             (file->f_mode & FMODE_WRITE))
916                                 return -EACCES;
917
918                         if (locks_verify_locked(file->f_path.dentry->d_inode))
919                                 return -EAGAIN;
920
921                         if (!(capabilities & BDI_CAP_MAP_DIRECT))
922                                 return -ENODEV;
923
924                         /* we mustn't privatise shared mappings */
925                         capabilities &= ~BDI_CAP_MAP_COPY;
926                 }
927                 else {
928                         /* we're going to read the file into private memory we
929                          * allocate */
930                         if (!(capabilities & BDI_CAP_MAP_COPY))
931                                 return -ENODEV;
932
933                         /* we don't permit a private writable mapping to be
934                          * shared with the backing device */
935                         if (prot & PROT_WRITE)
936                                 capabilities &= ~BDI_CAP_MAP_DIRECT;
937                 }
938
939                 if (capabilities & BDI_CAP_MAP_DIRECT) {
940                         if (((prot & PROT_READ)  && !(capabilities & BDI_CAP_READ_MAP))  ||
941                             ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) ||
942                             ((prot & PROT_EXEC)  && !(capabilities & BDI_CAP_EXEC_MAP))
943                             ) {
944                                 capabilities &= ~BDI_CAP_MAP_DIRECT;
945                                 if (flags & MAP_SHARED) {
946                                         printk(KERN_WARNING
947                                                "MAP_SHARED not completely supported on !MMU\n");
948                                         return -EINVAL;
949                                 }
950                         }
951                 }
952
953                 /* handle executable mappings and implied executable
954                  * mappings */
955                 if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
956                         if (prot & PROT_EXEC)
957                                 return -EPERM;
958                 }
959                 else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
960                         /* handle implication of PROT_EXEC by PROT_READ */
961                         if (current->personality & READ_IMPLIES_EXEC) {
962                                 if (capabilities & BDI_CAP_EXEC_MAP)
963                                         prot |= PROT_EXEC;
964                         }
965                 }
966                 else if ((prot & PROT_READ) &&
967                          (prot & PROT_EXEC) &&
968                          !(capabilities & BDI_CAP_EXEC_MAP)
969                          ) {
970                         /* backing file is not executable, try to copy */
971                         capabilities &= ~BDI_CAP_MAP_DIRECT;
972                 }
973         }
974         else {
975                 /* anonymous mappings are always memory backed and can be
976                  * privately mapped
977                  */
978                 capabilities = BDI_CAP_MAP_COPY;
979
980                 /* handle PROT_EXEC implication by PROT_READ */
981                 if ((prot & PROT_READ) &&
982                     (current->personality & READ_IMPLIES_EXEC))
983                         prot |= PROT_EXEC;
984         }
985
986         /* allow the security API to have its say */
987         ret = security_file_mmap(file, reqprot, prot, flags, addr, 0);
988         if (ret < 0)
989                 return ret;
990
991         /* looks okay */
992         *_capabilities = capabilities;
993         return 0;
994 }
995
996 /*
997  * we've determined that we can make the mapping, now translate what we
998  * now know into VMA flags
999  */
1000 static unsigned long determine_vm_flags(struct file *file,
1001                                         unsigned long prot,
1002                                         unsigned long flags,
1003                                         unsigned long capabilities)
1004 {
1005         unsigned long vm_flags;
1006
1007         vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags);
1008         /* vm_flags |= mm->def_flags; */
1009
1010         if (!(capabilities & BDI_CAP_MAP_DIRECT)) {
1011                 /* attempt to share read-only copies of mapped file chunks */
1012                 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1013                 if (file && !(prot & PROT_WRITE))
1014                         vm_flags |= VM_MAYSHARE;
1015         } else {
1016                 /* overlay a shareable mapping on the backing device or inode
1017                  * if possible - used for chardevs, ramfs/tmpfs/shmfs and
1018                  * romfs/cramfs */
1019                 vm_flags |= VM_MAYSHARE | (capabilities & BDI_CAP_VMFLAGS);
1020                 if (flags & MAP_SHARED)
1021                         vm_flags |= VM_SHARED;
1022         }
1023
1024         /* refuse to let anyone share private mappings with this process if
1025          * it's being traced - otherwise breakpoints set in it may interfere
1026          * with another untraced process
1027          */
1028         if ((flags & MAP_PRIVATE) && tracehook_expect_breakpoints(current))
1029                 vm_flags &= ~VM_MAYSHARE;
1030
1031         return vm_flags;
1032 }
1033
1034 /*
1035  * set up a shared mapping on a file (the driver or filesystem provides and
1036  * pins the storage)
1037  */
1038 static int do_mmap_shared_file(struct vm_area_struct *vma)
1039 {
1040         int ret;
1041
1042         ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
1043         if (ret == 0) {
1044                 vma->vm_region->vm_top = vma->vm_region->vm_end;
1045                 return 0;
1046         }
1047         if (ret != -ENOSYS)
1048                 return ret;
1049
1050         /* getting -ENOSYS indicates that direct mmap isn't possible (as
1051          * opposed to tried but failed) so we can only give a suitable error as
1052          * it's not possible to make a private copy if MAP_SHARED was given */
1053         return -ENODEV;
1054 }
1055
1056 /*
1057  * set up a private mapping or an anonymous shared mapping
1058  */
1059 static int do_mmap_private(struct vm_area_struct *vma,
1060                            struct vm_region *region,
1061                            unsigned long len,
1062                            unsigned long capabilities)
1063 {
1064         struct page *pages;
1065         unsigned long total, point, n, rlen;
1066         void *base;
1067         int ret, order;
1068
1069         /* invoke the file's mapping function so that it can keep track of
1070          * shared mappings on devices or memory
1071          * - VM_MAYSHARE will be set if it may attempt to share
1072          */
1073         if (capabilities & BDI_CAP_MAP_DIRECT) {
1074                 ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
1075                 if (ret == 0) {
1076                         /* shouldn't return success if we're not sharing */
1077                         BUG_ON(!(vma->vm_flags & VM_MAYSHARE));
1078                         vma->vm_region->vm_top = vma->vm_region->vm_end;
1079                         return 0;
1080                 }
1081                 if (ret != -ENOSYS)
1082                         return ret;
1083
1084                 /* getting an ENOSYS error indicates that direct mmap isn't
1085                  * possible (as opposed to tried but failed) so we'll try to
1086                  * make a private copy of the data and map that instead */
1087         }
1088
1089         rlen = PAGE_ALIGN(len);
1090
1091         /* allocate some memory to hold the mapping
1092          * - note that this may not return a page-aligned address if the object
1093          *   we're allocating is smaller than a page
1094          */
1095         order = get_order(rlen);
1096         kdebug("alloc order %d for %lx", order, len);
1097
1098         pages = alloc_pages(GFP_KERNEL, order);
1099         if (!pages)
1100                 goto enomem;
1101
1102         total = 1 << order;
1103         atomic_long_add(total, &mmap_pages_allocated);
1104
1105         point = rlen >> PAGE_SHIFT;
1106
1107         /* we allocated a power-of-2 sized page set, so we may want to trim off
1108          * the excess */
1109         if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) {
1110                 while (total > point) {
1111                         order = ilog2(total - point);
1112                         n = 1 << order;
1113                         kdebug("shave %lu/%lu @%lu", n, total - point, total);
1114                         atomic_long_sub(n, &mmap_pages_allocated);
1115                         total -= n;
1116                         set_page_refcounted(pages + total);
1117                         __free_pages(pages + total, order);
1118                 }
1119         }
1120
1121         for (point = 1; point < total; point++)
1122                 set_page_refcounted(&pages[point]);
1123
1124         base = page_address(pages);
1125         region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY;
1126         region->vm_start = (unsigned long) base;
1127         region->vm_end   = region->vm_start + rlen;
1128         region->vm_top   = region->vm_start + (total << PAGE_SHIFT);
1129
1130         vma->vm_start = region->vm_start;
1131         vma->vm_end   = region->vm_start + len;
1132
1133         if (vma->vm_file) {
1134                 /* read the contents of a file into the copy */
1135                 mm_segment_t old_fs;
1136                 loff_t fpos;
1137
1138                 fpos = vma->vm_pgoff;
1139                 fpos <<= PAGE_SHIFT;
1140
1141                 old_fs = get_fs();
1142                 set_fs(KERNEL_DS);
1143                 ret = vma->vm_file->f_op->read(vma->vm_file, base, rlen, &fpos);
1144                 set_fs(old_fs);
1145
1146                 if (ret < 0)
1147                         goto error_free;
1148
1149                 /* clear the last little bit */
1150                 if (ret < rlen)
1151                         memset(base + ret, 0, rlen - ret);
1152
1153         }
1154
1155         return 0;
1156
1157 error_free:
1158         free_page_series(region->vm_start, region->vm_end);
1159         region->vm_start = vma->vm_start = 0;
1160         region->vm_end   = vma->vm_end = 0;
1161         region->vm_top   = 0;
1162         return ret;
1163
1164 enomem:
1165         printk("Allocation of length %lu from process %d (%s) failed\n",
1166                len, current->pid, current->comm);
1167         show_free_areas();
1168         return -ENOMEM;
1169 }
1170
1171 /*
1172  * handle mapping creation for uClinux
1173  */
1174 unsigned long do_mmap_pgoff(struct file *file,
1175                             unsigned long addr,
1176                             unsigned long len,
1177                             unsigned long prot,
1178                             unsigned long flags,
1179                             unsigned long pgoff)
1180 {
1181         struct vm_area_struct *vma;
1182         struct vm_region *region;
1183         struct rb_node *rb;
1184         unsigned long capabilities, vm_flags, result;
1185         int ret;
1186
1187         kenter(",%lx,%lx,%lx,%lx,%lx", addr, len, prot, flags, pgoff);
1188
1189         /* decide whether we should attempt the mapping, and if so what sort of
1190          * mapping */
1191         ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
1192                                     &capabilities);
1193         if (ret < 0) {
1194                 kleave(" = %d [val]", ret);
1195                 return ret;
1196         }
1197
1198         /* we ignore the address hint */
1199         addr = 0;
1200
1201         /* we've determined that we can make the mapping, now translate what we
1202          * now know into VMA flags */
1203         vm_flags = determine_vm_flags(file, prot, flags, capabilities);
1204
1205         /* we're going to need to record the mapping */
1206         region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1207         if (!region)
1208                 goto error_getting_region;
1209
1210         vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1211         if (!vma)
1212                 goto error_getting_vma;
1213
1214         region->vm_usage = 1;
1215         region->vm_flags = vm_flags;
1216         region->vm_pgoff = pgoff;
1217
1218         INIT_LIST_HEAD(&vma->anon_vma_chain);
1219         vma->vm_flags = vm_flags;
1220         vma->vm_pgoff = pgoff;
1221
1222         if (file) {
1223                 region->vm_file = file;
1224                 get_file(file);
1225                 vma->vm_file = file;
1226                 get_file(file);
1227                 if (vm_flags & VM_EXECUTABLE) {
1228                         added_exe_file_vma(current->mm);
1229                         vma->vm_mm = current->mm;
1230                 }
1231         }
1232
1233         down_write(&nommu_region_sem);
1234
1235         /* if we want to share, we need to check for regions created by other
1236          * mmap() calls that overlap with our proposed mapping
1237          * - we can only share with a superset match on most regular files
1238          * - shared mappings on character devices and memory backed files are
1239          *   permitted to overlap inexactly as far as we are concerned for in
1240          *   these cases, sharing is handled in the driver or filesystem rather
1241          *   than here
1242          */
1243         if (vm_flags & VM_MAYSHARE) {
1244                 struct vm_region *pregion;
1245                 unsigned long pglen, rpglen, pgend, rpgend, start;
1246
1247                 pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1248                 pgend = pgoff + pglen;
1249
1250                 for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
1251                         pregion = rb_entry(rb, struct vm_region, vm_rb);
1252
1253                         if (!(pregion->vm_flags & VM_MAYSHARE))
1254                                 continue;
1255
1256                         /* search for overlapping mappings on the same file */
1257                         if (pregion->vm_file->f_path.dentry->d_inode !=
1258                             file->f_path.dentry->d_inode)
1259                                 continue;
1260
1261                         if (pregion->vm_pgoff >= pgend)
1262                                 continue;
1263
1264                         rpglen = pregion->vm_end - pregion->vm_start;
1265                         rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1266                         rpgend = pregion->vm_pgoff + rpglen;
1267                         if (pgoff >= rpgend)
1268                                 continue;
1269
1270                         /* handle inexactly overlapping matches between
1271                          * mappings */
1272                         if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1273                             !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1274                                 /* new mapping is not a subset of the region */
1275                                 if (!(capabilities & BDI_CAP_MAP_DIRECT))
1276                                         goto sharing_violation;
1277                                 continue;
1278                         }
1279
1280                         /* we've found a region we can share */
1281                         pregion->vm_usage++;
1282                         vma->vm_region = pregion;
1283                         start = pregion->vm_start;
1284                         start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
1285                         vma->vm_start = start;
1286                         vma->vm_end = start + len;
1287
1288                         if (pregion->vm_flags & VM_MAPPED_COPY) {
1289                                 kdebug("share copy");
1290                                 vma->vm_flags |= VM_MAPPED_COPY;
1291                         } else {
1292                                 kdebug("share mmap");
1293                                 ret = do_mmap_shared_file(vma);
1294                                 if (ret < 0) {
1295                                         vma->vm_region = NULL;
1296                                         vma->vm_start = 0;
1297                                         vma->vm_end = 0;
1298                                         pregion->vm_usage--;
1299                                         pregion = NULL;
1300                                         goto error_just_free;
1301                                 }
1302                         }
1303                         fput(region->vm_file);
1304                         kmem_cache_free(vm_region_jar, region);
1305                         region = pregion;
1306                         result = start;
1307                         goto share;
1308                 }
1309
1310                 /* obtain the address at which to make a shared mapping
1311                  * - this is the hook for quasi-memory character devices to
1312                  *   tell us the location of a shared mapping
1313                  */
1314                 if (capabilities & BDI_CAP_MAP_DIRECT) {
1315                         addr = file->f_op->get_unmapped_area(file, addr, len,
1316                                                              pgoff, flags);
1317                         if (IS_ERR((void *) addr)) {
1318                                 ret = addr;
1319                                 if (ret != (unsigned long) -ENOSYS)
1320                                         goto error_just_free;
1321
1322                                 /* the driver refused to tell us where to site
1323                                  * the mapping so we'll have to attempt to copy
1324                                  * it */
1325                                 ret = (unsigned long) -ENODEV;
1326                                 if (!(capabilities & BDI_CAP_MAP_COPY))
1327                                         goto error_just_free;
1328
1329                                 capabilities &= ~BDI_CAP_MAP_DIRECT;
1330                         } else {
1331                                 vma->vm_start = region->vm_start = addr;
1332                                 vma->vm_end = region->vm_end = addr + len;
1333                         }
1334                 }
1335         }
1336
1337         vma->vm_region = region;
1338
1339         /* set up the mapping
1340          * - the region is filled in if BDI_CAP_MAP_DIRECT is still set
1341          */
1342         if (file && vma->vm_flags & VM_SHARED)
1343                 ret = do_mmap_shared_file(vma);
1344         else
1345                 ret = do_mmap_private(vma, region, len, capabilities);
1346         if (ret < 0)
1347                 goto error_just_free;
1348         add_nommu_region(region);
1349
1350         /* clear anonymous mappings that don't ask for uninitialized data */
1351         if (!vma->vm_file && !(flags & MAP_UNINITIALIZED))
1352                 memset((void *)region->vm_start, 0,
1353                        region->vm_end - region->vm_start);
1354
1355         /* okay... we have a mapping; now we have to register it */
1356         result = vma->vm_start;
1357
1358         current->mm->total_vm += len >> PAGE_SHIFT;
1359
1360 share:
1361         add_vma_to_mm(current->mm, vma);
1362
1363         /* we flush the region from the icache only when the first executable
1364          * mapping of it is made  */
1365         if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1366                 flush_icache_range(region->vm_start, region->vm_end);
1367                 region->vm_icache_flushed = true;
1368         }
1369
1370         up_write(&nommu_region_sem);
1371
1372         kleave(" = %lx", result);
1373         return result;
1374
1375 error_just_free:
1376         up_write(&nommu_region_sem);
1377 error:
1378         if (region->vm_file)
1379                 fput(region->vm_file);
1380         kmem_cache_free(vm_region_jar, region);
1381         if (vma->vm_file)
1382                 fput(vma->vm_file);
1383         if (vma->vm_flags & VM_EXECUTABLE)
1384                 removed_exe_file_vma(vma->vm_mm);
1385         kmem_cache_free(vm_area_cachep, vma);
1386         kleave(" = %d", ret);
1387         return ret;
1388
1389 sharing_violation:
1390         up_write(&nommu_region_sem);
1391         printk(KERN_WARNING "Attempt to share mismatched mappings\n");
1392         ret = -EINVAL;
1393         goto error;
1394
1395 error_getting_vma:
1396         kmem_cache_free(vm_region_jar, region);
1397         printk(KERN_WARNING "Allocation of vma for %lu byte allocation"
1398                " from process %d failed\n",
1399                len, current->pid);
1400         show_free_areas();
1401         return -ENOMEM;
1402
1403 error_getting_region:
1404         printk(KERN_WARNING "Allocation of vm region for %lu byte allocation"
1405                " from process %d failed\n",
1406                len, current->pid);
1407         show_free_areas();
1408         return -ENOMEM;
1409 }
1410 EXPORT_SYMBOL(do_mmap_pgoff);
1411
1412 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1413                 unsigned long, prot, unsigned long, flags,
1414                 unsigned long, fd, unsigned long, pgoff)
1415 {
1416         struct file *file = NULL;
1417         unsigned long retval = -EBADF;
1418
1419         if (!(flags & MAP_ANONYMOUS)) {
1420                 file = fget(fd);
1421                 if (!file)
1422                         goto out;
1423         }
1424
1425         flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1426
1427         down_write(&current->mm->mmap_sem);
1428         retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1429         up_write(&current->mm->mmap_sem);
1430
1431         if (file)
1432                 fput(file);
1433 out:
1434         return retval;
1435 }
1436
1437 #ifdef __ARCH_WANT_SYS_OLD_MMAP
1438 struct mmap_arg_struct {
1439         unsigned long addr;
1440         unsigned long len;
1441         unsigned long prot;
1442         unsigned long flags;
1443         unsigned long fd;
1444         unsigned long offset;
1445 };
1446
1447 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1448 {
1449         struct mmap_arg_struct a;
1450
1451         if (copy_from_user(&a, arg, sizeof(a)))
1452                 return -EFAULT;
1453         if (a.offset & ~PAGE_MASK)
1454                 return -EINVAL;
1455
1456         return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1457                               a.offset >> PAGE_SHIFT);
1458 }
1459 #endif /* __ARCH_WANT_SYS_OLD_MMAP */
1460
1461 /*
1462  * split a vma into two pieces at address 'addr', a new vma is allocated either
1463  * for the first part or the tail.
1464  */
1465 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
1466               unsigned long addr, int new_below)
1467 {
1468         struct vm_area_struct *new;
1469         struct vm_region *region;
1470         unsigned long npages;
1471
1472         kenter("");
1473
1474         /* we're only permitted to split anonymous regions (these should have
1475          * only a single usage on the region) */
1476         if (vma->vm_file)
1477                 return -ENOMEM;
1478
1479         if (mm->map_count >= sysctl_max_map_count)
1480                 return -ENOMEM;
1481
1482         region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1483         if (!region)
1484                 return -ENOMEM;
1485
1486         new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
1487         if (!new) {
1488                 kmem_cache_free(vm_region_jar, region);
1489                 return -ENOMEM;
1490         }
1491
1492         /* most fields are the same, copy all, and then fixup */
1493         *new = *vma;
1494         *region = *vma->vm_region;
1495         new->vm_region = region;
1496
1497         npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1498
1499         if (new_below) {
1500                 region->vm_top = region->vm_end = new->vm_end = addr;
1501         } else {
1502                 region->vm_start = new->vm_start = addr;
1503                 region->vm_pgoff = new->vm_pgoff += npages;
1504         }
1505
1506         if (new->vm_ops && new->vm_ops->open)
1507                 new->vm_ops->open(new);
1508
1509         delete_vma_from_mm(vma);
1510         down_write(&nommu_region_sem);
1511         delete_nommu_region(vma->vm_region);
1512         if (new_below) {
1513                 vma->vm_region->vm_start = vma->vm_start = addr;
1514                 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1515         } else {
1516                 vma->vm_region->vm_end = vma->vm_end = addr;
1517                 vma->vm_region->vm_top = addr;
1518         }
1519         add_nommu_region(vma->vm_region);
1520         add_nommu_region(new->vm_region);
1521         up_write(&nommu_region_sem);
1522         add_vma_to_mm(mm, vma);
1523         add_vma_to_mm(mm, new);
1524         return 0;
1525 }
1526
1527 /*
1528  * shrink a VMA by removing the specified chunk from either the beginning or
1529  * the end
1530  */
1531 static int shrink_vma(struct mm_struct *mm,
1532                       struct vm_area_struct *vma,
1533                       unsigned long from, unsigned long to)
1534 {
1535         struct vm_region *region;
1536
1537         kenter("");
1538
1539         /* adjust the VMA's pointers, which may reposition it in the MM's tree
1540          * and list */
1541         delete_vma_from_mm(vma);
1542         if (from > vma->vm_start)
1543                 vma->vm_end = from;
1544         else
1545                 vma->vm_start = to;
1546         add_vma_to_mm(mm, vma);
1547
1548         /* cut the backing region down to size */
1549         region = vma->vm_region;
1550         BUG_ON(region->vm_usage != 1);
1551
1552         down_write(&nommu_region_sem);
1553         delete_nommu_region(region);
1554         if (from > region->vm_start) {
1555                 to = region->vm_top;
1556                 region->vm_top = region->vm_end = from;
1557         } else {
1558                 region->vm_start = to;
1559         }
1560         add_nommu_region(region);
1561         up_write(&nommu_region_sem);
1562
1563         free_page_series(from, to);
1564         return 0;
1565 }
1566
1567 /*
1568  * release a mapping
1569  * - under NOMMU conditions the chunk to be unmapped must be backed by a single
1570  *   VMA, though it need not cover the whole VMA
1571  */
1572 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
1573 {
1574         struct vm_area_struct *vma;
1575         struct rb_node *rb;
1576         unsigned long end = start + len;
1577         int ret;
1578
1579         kenter(",%lx,%zx", start, len);
1580
1581         if (len == 0)
1582                 return -EINVAL;
1583
1584         /* find the first potentially overlapping VMA */
1585         vma = find_vma(mm, start);
1586         if (!vma) {
1587                 static int limit = 0;
1588                 if (limit < 5) {
1589                         printk(KERN_WARNING
1590                                "munmap of memory not mmapped by process %d"
1591                                " (%s): 0x%lx-0x%lx\n",
1592                                current->pid, current->comm,
1593                                start, start + len - 1);
1594                         limit++;
1595                 }
1596                 return -EINVAL;
1597         }
1598
1599         /* we're allowed to split an anonymous VMA but not a file-backed one */
1600         if (vma->vm_file) {
1601                 do {
1602                         if (start > vma->vm_start) {
1603                                 kleave(" = -EINVAL [miss]");
1604                                 return -EINVAL;
1605                         }
1606                         if (end == vma->vm_end)
1607                                 goto erase_whole_vma;
1608                         rb = rb_next(&vma->vm_rb);
1609                         vma = rb_entry(rb, struct vm_area_struct, vm_rb);
1610                 } while (rb);
1611                 kleave(" = -EINVAL [split file]");
1612                 return -EINVAL;
1613         } else {
1614                 /* the chunk must be a subset of the VMA found */
1615                 if (start == vma->vm_start && end == vma->vm_end)
1616                         goto erase_whole_vma;
1617                 if (start < vma->vm_start || end > vma->vm_end) {
1618                         kleave(" = -EINVAL [superset]");
1619                         return -EINVAL;
1620                 }
1621                 if (start & ~PAGE_MASK) {
1622                         kleave(" = -EINVAL [unaligned start]");
1623                         return -EINVAL;
1624                 }
1625                 if (end != vma->vm_end && end & ~PAGE_MASK) {
1626                         kleave(" = -EINVAL [unaligned split]");
1627                         return -EINVAL;
1628                 }
1629                 if (start != vma->vm_start && end != vma->vm_end) {
1630                         ret = split_vma(mm, vma, start, 1);
1631                         if (ret < 0) {
1632                                 kleave(" = %d [split]", ret);
1633                                 return ret;
1634                         }
1635                 }
1636                 return shrink_vma(mm, vma, start, end);
1637         }
1638
1639 erase_whole_vma:
1640         delete_vma_from_mm(vma);
1641         delete_vma(mm, vma);
1642         kleave(" = 0");
1643         return 0;
1644 }
1645 EXPORT_SYMBOL(do_munmap);
1646
1647 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1648 {
1649         int ret;
1650         struct mm_struct *mm = current->mm;
1651
1652         down_write(&mm->mmap_sem);
1653         ret = do_munmap(mm, addr, len);
1654         up_write(&mm->mmap_sem);
1655         return ret;
1656 }
1657
1658 /*
1659  * release all the mappings made in a process's VM space
1660  */
1661 void exit_mmap(struct mm_struct *mm)
1662 {
1663         struct vm_area_struct *vma;
1664
1665         if (!mm)
1666                 return;
1667
1668         kenter("");
1669
1670         mm->total_vm = 0;
1671
1672         while ((vma = mm->mmap)) {
1673                 mm->mmap = vma->vm_next;
1674                 delete_vma_from_mm(vma);
1675                 delete_vma(mm, vma);
1676                 cond_resched();
1677         }
1678
1679         kleave("");
1680 }
1681
1682 unsigned long do_brk(unsigned long addr, unsigned long len)
1683 {
1684         return -ENOMEM;
1685 }
1686
1687 /*
1688  * expand (or shrink) an existing mapping, potentially moving it at the same
1689  * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
1690  *
1691  * under NOMMU conditions, we only permit changing a mapping's size, and only
1692  * as long as it stays within the region allocated by do_mmap_private() and the
1693  * block is not shareable
1694  *
1695  * MREMAP_FIXED is not supported under NOMMU conditions
1696  */
1697 unsigned long do_mremap(unsigned long addr,
1698                         unsigned long old_len, unsigned long new_len,
1699                         unsigned long flags, unsigned long new_addr)
1700 {
1701         struct vm_area_struct *vma;
1702
1703         /* insanity checks first */
1704         if (old_len == 0 || new_len == 0)
1705                 return (unsigned long) -EINVAL;
1706
1707         if (addr & ~PAGE_MASK)
1708                 return -EINVAL;
1709
1710         if (flags & MREMAP_FIXED && new_addr != addr)
1711                 return (unsigned long) -EINVAL;
1712
1713         vma = find_vma_exact(current->mm, addr, old_len);
1714         if (!vma)
1715                 return (unsigned long) -EINVAL;
1716
1717         if (vma->vm_end != vma->vm_start + old_len)
1718                 return (unsigned long) -EFAULT;
1719
1720         if (vma->vm_flags & VM_MAYSHARE)
1721                 return (unsigned long) -EPERM;
1722
1723         if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1724                 return (unsigned long) -ENOMEM;
1725
1726         /* all checks complete - do it */
1727         vma->vm_end = vma->vm_start + new_len;
1728         return vma->vm_start;
1729 }
1730 EXPORT_SYMBOL(do_mremap);
1731
1732 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1733                 unsigned long, new_len, unsigned long, flags,
1734                 unsigned long, new_addr)
1735 {
1736         unsigned long ret;
1737
1738         down_write(&current->mm->mmap_sem);
1739         ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1740         up_write(&current->mm->mmap_sem);
1741         return ret;
1742 }
1743
1744 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1745                         unsigned int foll_flags)
1746 {
1747         return NULL;
1748 }
1749
1750 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1751                 unsigned long pfn, unsigned long size, pgprot_t prot)
1752 {
1753         if (addr != (pfn << PAGE_SHIFT))
1754                 return -EINVAL;
1755
1756         vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
1757         return 0;
1758 }
1759 EXPORT_SYMBOL(remap_pfn_range);
1760
1761 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1762                         unsigned long pgoff)
1763 {
1764         unsigned int size = vma->vm_end - vma->vm_start;
1765
1766         if (!(vma->vm_flags & VM_USERMAP))
1767                 return -EINVAL;
1768
1769         vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1770         vma->vm_end = vma->vm_start + size;
1771
1772         return 0;
1773 }
1774 EXPORT_SYMBOL(remap_vmalloc_range);
1775
1776 void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1777 {
1778 }
1779
1780 unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
1781         unsigned long len, unsigned long pgoff, unsigned long flags)
1782 {
1783         return -ENOMEM;
1784 }
1785
1786 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
1787 {
1788 }
1789
1790 void unmap_mapping_range(struct address_space *mapping,
1791                          loff_t const holebegin, loff_t const holelen,
1792                          int even_cows)
1793 {
1794 }
1795 EXPORT_SYMBOL(unmap_mapping_range);
1796
1797 /*
1798  * Check that a process has enough memory to allocate a new virtual
1799  * mapping. 0 means there is enough memory for the allocation to
1800  * succeed and -ENOMEM implies there is not.
1801  *
1802  * We currently support three overcommit policies, which are set via the
1803  * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting
1804  *
1805  * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
1806  * Additional code 2002 Jul 20 by Robert Love.
1807  *
1808  * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
1809  *
1810  * Note this is a helper function intended to be used by LSMs which
1811  * wish to use this logic.
1812  */
1813 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
1814 {
1815         unsigned long free, allowed;
1816
1817         vm_acct_memory(pages);
1818
1819         /*
1820          * Sometimes we want to use more memory than we have
1821          */
1822         if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
1823                 return 0;
1824
1825         if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
1826                 unsigned long n;
1827
1828                 free = global_page_state(NR_FILE_PAGES);
1829                 free += nr_swap_pages;
1830
1831                 /*
1832                  * Any slabs which are created with the
1833                  * SLAB_RECLAIM_ACCOUNT flag claim to have contents
1834                  * which are reclaimable, under pressure.  The dentry
1835                  * cache and most inode caches should fall into this
1836                  */
1837                 free += global_page_state(NR_SLAB_RECLAIMABLE);
1838
1839                 /*
1840                  * Leave the last 3% for root
1841                  */
1842                 if (!cap_sys_admin)
1843                         free -= free / 32;
1844
1845                 if (free > pages)
1846                         return 0;
1847
1848                 /*
1849                  * nr_free_pages() is very expensive on large systems,
1850                  * only call if we're about to fail.
1851                  */
1852                 n = nr_free_pages();
1853
1854                 /*
1855                  * Leave reserved pages. The pages are not for anonymous pages.
1856                  */
1857                 if (n <= totalreserve_pages)
1858                         goto error;
1859                 else
1860                         n -= totalreserve_pages;
1861
1862                 /*
1863                  * Leave the last 3% for root
1864                  */
1865                 if (!cap_sys_admin)
1866                         n -= n / 32;
1867                 free += n;
1868
1869                 if (free > pages)
1870                         return 0;
1871
1872                 goto error;
1873         }
1874
1875         allowed = totalram_pages * sysctl_overcommit_ratio / 100;
1876         /*
1877          * Leave the last 3% for root
1878          */
1879         if (!cap_sys_admin)
1880                 allowed -= allowed / 32;
1881         allowed += total_swap_pages;
1882
1883         /* Don't let a single process grow too big:
1884            leave 3% of the size of this process for other processes */
1885         if (mm)
1886                 allowed -= mm->total_vm / 32;
1887
1888         if (percpu_counter_read_positive(&vm_committed_as) < allowed)
1889                 return 0;
1890
1891 error:
1892         vm_unacct_memory(pages);
1893
1894         return -ENOMEM;
1895 }
1896
1897 int in_gate_area_no_task(unsigned long addr)
1898 {
1899         return 0;
1900 }
1901
1902 int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1903 {
1904         BUG();
1905         return 0;
1906 }
1907 EXPORT_SYMBOL(filemap_fault);
1908
1909 /*
1910  * Access another process' address space.
1911  * - source/target buffer must be kernel space
1912  */
1913 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
1914 {
1915         struct vm_area_struct *vma;
1916         struct mm_struct *mm;
1917
1918         if (addr + len < addr)
1919                 return 0;
1920
1921         mm = get_task_mm(tsk);
1922         if (!mm)
1923                 return 0;
1924
1925         down_read(&mm->mmap_sem);
1926
1927         /* the access must start within one of the target process's mappings */
1928         vma = find_vma(mm, addr);
1929         if (vma) {
1930                 /* don't overrun this mapping */
1931                 if (addr + len >= vma->vm_end)
1932                         len = vma->vm_end - addr;
1933
1934                 /* only read or write mappings where it is permitted */
1935                 if (write && vma->vm_flags & VM_MAYWRITE)
1936                         copy_to_user_page(vma, NULL, addr,
1937                                          (void *) addr, buf, len);
1938                 else if (!write && vma->vm_flags & VM_MAYREAD)
1939                         copy_from_user_page(vma, NULL, addr,
1940                                             buf, (void *) addr, len);
1941                 else
1942                         len = 0;
1943         } else {
1944                 len = 0;
1945         }
1946
1947         up_read(&mm->mmap_sem);
1948         mmput(mm);
1949         return len;
1950 }
1951
1952 /**
1953  * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
1954  * @inode: The inode to check
1955  * @size: The current filesize of the inode
1956  * @newsize: The proposed filesize of the inode
1957  *
1958  * Check the shared mappings on an inode on behalf of a shrinking truncate to
1959  * make sure that that any outstanding VMAs aren't broken and then shrink the
1960  * vm_regions that extend that beyond so that do_mmap_pgoff() doesn't
1961  * automatically grant mappings that are too large.
1962  */
1963 int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
1964                                 size_t newsize)
1965 {
1966         struct vm_area_struct *vma;
1967         struct prio_tree_iter iter;
1968         struct vm_region *region;
1969         pgoff_t low, high;
1970         size_t r_size, r_top;
1971
1972         low = newsize >> PAGE_SHIFT;
1973         high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1974
1975         down_write(&nommu_region_sem);
1976
1977         /* search for VMAs that fall within the dead zone */
1978         vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
1979                               low, high) {
1980                 /* found one - only interested if it's shared out of the page
1981                  * cache */
1982                 if (vma->vm_flags & VM_SHARED) {
1983                         up_write(&nommu_region_sem);
1984                         return -ETXTBSY; /* not quite true, but near enough */
1985                 }
1986         }
1987
1988         /* reduce any regions that overlap the dead zone - if in existence,
1989          * these will be pointed to by VMAs that don't overlap the dead zone
1990          *
1991          * we don't check for any regions that start beyond the EOF as there
1992          * shouldn't be any
1993          */
1994         vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
1995                               0, ULONG_MAX) {
1996                 if (!(vma->vm_flags & VM_SHARED))
1997                         continue;
1998
1999                 region = vma->vm_region;
2000                 r_size = region->vm_top - region->vm_start;
2001                 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
2002
2003                 if (r_top > newsize) {
2004                         region->vm_top -= r_top - newsize;
2005                         if (region->vm_end > region->vm_top)
2006                                 region->vm_end = region->vm_top;
2007                 }
2008         }
2009
2010         up_write(&nommu_region_sem);
2011         return 0;
2012 }