2 * (c) 2008-2010 Adam Lackorzynski <adam@os.inf.tu-dresden.de>,
3 * Alexander Warg <warg@os.inf.tu-dresden.de>,
4 * Björn Döbel <doebel@os.inf.tu-dresden.de>
5 * economic rights: Technische Universität Dresden (Germany)
7 * This file is part of TUD:OS and distributed under the terms of the
8 * GNU General Public License 2.
9 * Please see the COPYING-GPL-2 file for details.
11 * As a special exception, you may use this file as part of a free software
12 * library without restriction. Specifically, if other files instantiate
13 * templates or use macros or inline functions from this file, or you compile
14 * this file and link it with other files to produce an executable, this
15 * file does not by itself cause the resulting executable to be covered by
16 * the GNU General Public License. This exception does not however
17 * invalidate any other reasons why the executable file might be covered by
18 * the GNU General Public License.
23 #include "vcon_stream.h"
29 #include <l4/re/dataspace>
31 #include <l4/l4re_vfs/backend>
38 //#include <l4/sys/kdebug.h>
39 //static int debug_mmap = 1;
40 //#define DEBUG_LOG(level, dbg...) do { if (level) dbg } while (0)
42 #define DEBUG_LOG(level, dbg...) do { } while (0)
45 * If USE_BIG_ANON_DS is defined the implementation will use a really big
46 * data space for backing anonymous memory. Otherwise each mmap call
47 * with anonymous memory will allocate a separate data space.
49 #define USE_BIG_ANON_DS
57 void *(*malloc)(size_t);
60 unsigned long (*cap_alloc)();
61 void (*cap_free)(unsigned long);
66 class Fd_store : public L4Re::Core::Fd_store
73 Fd_store::Fd_store() throw()
75 static L4Re::Core::Vcon_stream s(L4Re::Env::env()->log());
76 // make sure that we never delete the static io stream thing
78 set(0, cxx::ref_ptr(&s)); // stdin
79 set(1, cxx::ref_ptr(&s)); // stdout
80 set(2, cxx::ref_ptr(&s)); // stderr
83 class Root_mount_tree : public L4Re::Vfs::Mount_tree
86 Root_mount_tree() : L4Re::Vfs::Mount_tree(0) {}
87 void operator delete (void *) {}
90 class Vfs : public L4Re::Vfs::Ops
97 : _early_oom(true), _root_mount(), _root(L4Re::Env::env()),
98 _annon_size(0x10000000)
100 _root_mount.add_ref();
102 _root_mount.mount(cxx::ref_ptr(&_root));
106 Ref_ptr<L4Re::Vfs::File> rom;
107 _root.openat("rom", 0, 0, &rom);
109 _root_mount.create_tree("lib/foo", rom);
111 _root.openat("lib", 0, 0, &_cwd);
116 int alloc_fd(Ref_ptr<L4Re::Vfs::File> const &f) throw();
117 Ref_ptr<L4Re::Vfs::File> free_fd(int fd) throw();
118 Ref_ptr<L4Re::Vfs::File> get_root() throw();
119 Ref_ptr<L4Re::Vfs::File> get_cwd() throw();
120 void set_cwd(Ref_ptr<L4Re::Vfs::File> const &dir) throw();
121 Ref_ptr<L4Re::Vfs::File> get_file(int fd) throw();
122 Ref_ptr<L4Re::Vfs::File> set_fd(int fd, Ref_ptr<L4Re::Vfs::File> const &f = Ref_ptr<>::Nil) throw();
123 L4Re::Cap_alloc *cap_alloc() throw();
125 int mmap2(void *start, size_t len, int prot, int flags, int fd,
126 off_t offset, void **ptr) throw();
128 int munmap(void *start, size_t len) throw();
129 int mremap(void *old, size_t old_sz, size_t new_sz, int flags,
130 void **new_adr) throw();
131 int mprotect(const void *a, size_t sz, int prot) throw();
132 int msync(void *addr, size_t len, int flags) throw();
133 int madvise(void *addr, size_t len, int advice) throw();
135 int register_file_system(L4Re::Vfs::File_system *f) throw();
136 int unregister_file_system(L4Re::Vfs::File_system *f) throw();
137 L4Re::Vfs::File_system *get_file_system(char const *fstype) throw();
139 void operator delete (void *) {}
142 Root_mount_tree _root_mount;
143 L4Re::Core::Env_dir _root;
144 Ref_ptr<L4Re::Vfs::File> _cwd;
147 L4Re::Vfs::File_system *_fs_registry;
149 l4_addr_t _annon_size;
150 l4_addr_t _annon_offset;
151 L4::Cap<L4Re::Dataspace> _annon_ds;
153 int alloc_ds(unsigned long size, L4::Cap<L4Re::Dataspace> *ds);
154 int alloc_anon_mem(l4_umword_t size, L4::Cap<L4Re::Dataspace> *ds,
158 static inline bool strequal(char const *a, char const *b)
160 for (;*a && *a == *b; ++a, ++b)
166 Vfs::register_file_system(L4Re::Vfs::File_system *f) throw()
168 using L4Re::Vfs::File_system;
173 for (File_system *c = _fs_registry; c; c = c->next())
174 if (strequal(c->type(), f->type()))
177 f->next(_fs_registry);
184 Vfs::unregister_file_system(L4Re::Vfs::File_system *f) throw()
186 using L4Re::Vfs::File_system;
191 File_system **p = &_fs_registry;
193 for (; *p; p = &(*p)->next())
204 L4Re::Vfs::File_system *
205 Vfs::get_file_system(char const *fstype) throw()
207 bool try_dynamic = true;
210 using L4Re::Vfs::File_system;
211 for (File_system *c = _fs_registry; c; c = c->next())
212 if (strequal(c->type(), fstype))
218 // try to load a file system module dynamically
219 int res = Vfs_config::load_module(fstype);
229 Vfs::alloc_fd(Ref_ptr<L4Re::Vfs::File> const &f) throw()
231 int fd = fds.alloc();
241 Ref_ptr<L4Re::Vfs::File>
242 Vfs::free_fd(int fd) throw()
244 Ref_ptr<L4Re::Vfs::File> f = fds.get(fd);
247 return Ref_ptr<>::Nil;
254 Ref_ptr<L4Re::Vfs::File>
255 Vfs::get_root() throw()
257 return cxx::ref_ptr(&_root);
260 Ref_ptr<L4Re::Vfs::File>
261 Vfs::get_cwd() throw()
267 Vfs::set_cwd(Ref_ptr<L4Re::Vfs::File> const &dir) throw()
269 // FIXME: check for is dir
274 Ref_ptr<L4Re::Vfs::File>
275 Vfs::get_file(int fd) throw()
280 Ref_ptr<L4Re::Vfs::File>
281 Vfs::set_fd(int fd, Ref_ptr<L4Re::Vfs::File> const &f) throw()
283 Ref_ptr<L4Re::Vfs::File> old = fds.get(fd);
289 Vfs::cap_alloc() throw()
291 return L4Re::Core::cap_alloc();
296 #define GET_FILE_DBG(fd, err) \
297 Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd); \
303 #define GET_FILE(fd, err) \
304 Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd); \
310 Vfs::munmap(void *start, size_t len) L4_NOTHROW
313 using namespace L4Re;
317 Cap<Rm> r = Env::env()->rm();
321 DEBUG_LOG(debug_mmap, {
322 outstring("DETACH: ");
323 outhex32(l4_addr_t(start));
328 err = r->detach(l4_addr_t(start), len, &ds, This_task);
332 switch (err & Rm::Detach_result_mask)
338 case Rm::Detached_ds:
340 L4Re::Core::release_ds(ds);
346 if (!(err & Rm::Detach_again))
352 Vfs::alloc_ds(unsigned long size, L4::Cap<L4Re::Dataspace> *ds)
354 *ds = Vfs_config::cap_alloc.alloc<L4Re::Dataspace>();
360 if ((err = Vfs_config::allocator()->alloc(size, *ds)) < 0)
363 DEBUG_LOG(debug_mmap, {
364 outstring("ANNON DS ALLOCATED: size=");
375 Vfs::alloc_anon_mem(l4_umword_t size, L4::Cap<L4Re::Dataspace> *ds,
378 #ifdef USE_BIG_ANON_DS
379 if (!_annon_ds.is_valid() || _annon_offset + size >= _annon_size)
381 if (_annon_ds.is_valid())
382 L4Re::Core::release_ds(_annon_ds);
385 if ((err = alloc_ds(_annon_size, ds)) < 0)
398 if (int err = (*ds)->allocate(_annon_offset, size))
402 *offset = _annon_offset;
403 _annon_offset += size;
406 if ((err = alloc_ds(size, ds)) < 0)
411 if ((err = (*ds)->allocate(0, size)))
420 Vfs::mmap2(void *start, size_t len, int prot, int flags, int fd, off_t _offset,
421 void **resptr) L4_NOTHROW
423 using namespace L4Re;
424 off64_t offset = _offset << 12;
426 start = (void*)l4_trunc_page(l4_addr_t(start));
427 len = l4_round_page(len);
428 l4_umword_t size = (len + L4_PAGESIZE-1) & ~(L4_PAGESIZE-1);
430 // special code to just reserve an area of the virtual address space
431 if (flags & 0x1000000)
434 L4::Cap<Rm> r = Env::env()->rm();
435 l4_addr_t area = (l4_addr_t)start;
436 err = r->reserve_area(&area, size, L4Re::Rm::Search_addr);
439 *resptr = (void*)area;
440 DEBUG_LOG(debug_mmap, {
441 outstring("MMAP reserved area: ");
450 L4::Cap<L4Re::Dataspace> ds;
451 l4_addr_t annon_offset = 0;
452 unsigned rm_flags = 0;
454 if (flags & (MAP_ANONYMOUS | MAP_PRIVATE))
456 rm_flags |= L4Re::Rm::Detach_free;
458 int err = alloc_anon_mem(size, &ds, &annon_offset);
462 DEBUG_LOG(debug_mmap, {
463 outstring("USE ANNON MEM: ");
466 outhex32(annon_offset);
471 if (!(flags & MAP_ANONYMOUS))
473 Ref_ptr<L4Re::Vfs::File> fi = fds.get(fd);
479 L4::Cap<L4Re::Dataspace> fds = fi->data_space();
486 if (size + offset > l4_round_page(fds->size()))
491 if (flags & MAP_PRIVATE)
493 DEBUG_LOG(debug_mmap, outstring("COW\n"););
494 ds->copy_in(annon_offset, fds, l4_trunc_page(offset), l4_round_page(size));
495 offset = annon_offset;
504 offset = annon_offset;
507 if (!(flags & MAP_FIXED) && start == 0)
508 start = (void*)L4_PAGESIZE;
511 char *data = (char *)start;
512 L4::Cap<Rm> r = Env::env()->rm();
513 l4_addr_t overmap_area = L4_INVALID_ADDR;
515 if (flags & MAP_FIXED)
517 overmap_area = l4_addr_t(start);
519 err = r->reserve_area(&overmap_area, size);
521 overmap_area = L4_INVALID_ADDR;
523 rm_flags |= Rm::In_area;
525 err = munmap(start, len);
526 if (err && err != -ENOENT)
530 if (!(flags & MAP_FIXED)) rm_flags |= Rm::Search_addr;
531 if (!(prot & PROT_WRITE)) rm_flags |= Rm::Read_only;
533 err = r->attach(&data, size, rm_flags, ds, offset);
535 DEBUG_LOG(debug_mmap, {
536 outstring(" MAPPED: ");
538 outstring(" addr: ");
539 outhex32(l4_addr_t(data));
540 outstring(" bytes: ");
542 outstring(" offset: ");
550 if (overmap_area != L4_INVALID_ADDR)
551 r->free_area(overmap_area);
572 explicit Auto_area(L4::Cap<L4Re::Rm> r, l4_addr_t a = L4_INVALID_ADDR)
575 int reserve(l4_addr_t _a, l4_size_t sz, unsigned flags)
578 int e = r->reserve_area(&a, sz, flags);
586 if (a != L4_INVALID_ADDR)
593 ~Auto_area() { free(); }
598 Vfs::mremap(void *old_addr, size_t old_size, size_t new_size, int flags,
599 void **new_addr) L4_NOTHROW
601 using namespace L4Re;
603 if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
606 L4::Cap<Rm> r = Env::env()->rm();
608 // sanitize input parameters to multiples of pages
609 l4_addr_t oa = l4_trunc_page((l4_addr_t)old_addr);
610 old_size = l4_round_page(old_size);
611 new_size = l4_round_page(new_size);
615 if (new_size < old_size)
617 *new_addr = old_addr;
618 return munmap((void*)(oa + new_size), old_size - new_size);
621 if (new_size == old_size)
623 *new_addr = old_addr;
629 if (!(flags & MREMAP_FIXED))
632 na = l4_trunc_page((l4_addr_t)new_addr);
636 // check if the current virtual memory area can be expanded
637 err = area.reserve(oa, new_size, 0);
641 l4_addr_t ta = oa + old_size;
642 unsigned long ts = new_size - old_size;
645 L4::Cap<L4Re::Dataspace> tds;
647 err = r->find(&ta, &ts, &toffs, &tflags, &tds);
649 // there is enough space to expand the mapping in place
650 if (!(err == -ENOENT || (err == 0 && (tflags & Rm::In_area))))
652 if ((flags & (MREMAP_FIXED | MREMAP_MAYMOVE)) != MREMAP_MAYMOVE)
655 // free our old reserved area, used for blocking the old memory region
659 err = area.reserve(0, new_size, Rm::Search_addr);
665 // move all the old regions to the new place ...
666 Auto_area block_area(r);
667 err = block_area.reserve(oa, old_size, 0);
676 err = r->find(&ta, &ts, &toffs, &tflags, &tds);
677 if (err == -ENOENT || (err == 0 && (tflags & Rm::In_area)))
690 l4_addr_t n = na + (ta - oa);
691 unsigned long max_s = old_size - (ta - oa);
696 err = r->attach(&n, ts, tflags | Rm::In_area, tds, toffs);
702 err = r->detach(ta, ts, &tds, This_task);
706 switch (err & Rm::Detach_result_mask)
712 case Rm::Detached_ds:
714 L4Re::Core::release_ds(tds);
722 err = alloc_anon_mem(new_size - old_size, &tds, &toffs);
726 *new_addr = (void *)na;
728 err = r->attach(&na, new_size - old_size, Rm::In_area, tds, toffs);
734 Vfs::mprotect(const void *a, size_t sz, int prot) L4_NOTHROW
738 return (prot & PROT_WRITE) ? -1 : 0;
742 Vfs::msync(void *, size_t, int) L4_NOTHROW
746 Vfs::madvise(void *, size_t, int) L4_NOTHROW
749 static Vfs vfs __attribute__((init_priority(1000)));
753 //L4Re::Vfs::Ops *__ldso_posix_vfs_ops = &vfs;
754 void *__rtld_l4re_env_posix_vfs_ops = &vfs;
755 extern void *l4re_env_posix_vfs_ops __attribute__((alias("__rtld_l4re_env_posix_vfs_ops"), visibility("default")));