4 * This file is part of the Valgrind port to L4Re.
6 * (c) 2009-2010 Aaron Pohle <apohle@os.inf.tu-dresden.de>,
7 * Bjoern Doebel <doebel@os.inf.tu-dresden.de>
8 * economic rights: Technische Universitaet Dresden (Germany)
11 #include <l4/sys/compiler.h>
12 // C++'s definition of NULL disagrees with Valgrind's
16 #include <l4/re/env.h>
17 #include <l4/re/c/util/cap_alloc.h>
18 #include <l4/util/bitops.h>
19 #include <l4/re/c/rm.h>
20 #include <l4/re/util/object_registry>
21 #include <l4/sys/factory>
22 #include <l4/sys/ipc.h>
23 #include <l4/sys/utcb.h>
24 #include <l4/sys/vcon.h>
25 #include <l4/sys/scheduler.h>
26 #include <l4/sys/kdebug.h>
27 #include <l4/sys/debugger.h>
28 #include <l4/cxx/list>
29 #include <l4/sys/thread>
30 #include <l4/sys/scheduler>
31 #include <l4/re/util/cap_alloc>
33 #include <l4/re/protocols>
34 #include <l4/re/dataspace>
35 #include <l4/cxx/ipc_server>
36 #include <l4/re/util/vcon_svr>
37 #include <l4/re/util/region_mapping_svr>
38 #include <l4/re/util/region_mapping>
39 #include <l4/re/util/debug>
43 #include "pub_core_basics.h"
45 #include "pub_l4re_consts.h"
46 #include "pub_core_ume.h"
47 #include "pub_core_aspacemgr.h"
48 #include "pub_core_debuginfo.h" //DebugInfo
49 #include "pub_tool_libcbase.h"
50 #include "pub_tool_libcfile.h"
51 #include "pub_core_libcprint.h"
52 #include "pub_core_vki.h"
53 #include "pub_core_threadstate.h"
54 #include "pub_core_scheduler.h"
55 #include "pub_core_tooliface.h"
56 #include "pub_tool_mallocfree.h"
57 #include "pub_core_libcassert.h" // VG_(show_sched_status)
58 #include "l4re/myelf.h"
61 extern const Bool dbg_pf = DEBUG_OFF;
62 extern const Bool dbg_elf = DEBUG_OFF;
63 extern const Bool dbg_rm = DEBUG_OFF;
64 extern const Bool dbg_ds = DEBUG_OFF;
65 extern const Bool dbg_vcap = DEBUG_OFF;
67 L4::Cap<L4::Thread> vcap_thread;
68 char vcap_stack[1 << 13]; // 8 kB
70 #include "l4re/allocator"
73 #include "l4re/loop_hooks"
81 // see l4/pkg/loader/server/src/region.cc:Rm_server
85 typedef L4::Cap<L4Re::Dataspace> Dataspace;
87 enum { Have_find = true };
89 static int validate_ds(L4::Snd_fpage const & ds_cap, unsigned flags,
90 L4::Cap<L4Re::Dataspace> *ds)
92 if (dbg_ds) VG_(debugLog)(4, "vcap", "flags 0x%x\n", flags);
93 if (dbg_ds) VG_(debugLog)(4, "vcap", "ds @ %p\n", ds);
94 if (dbg_ds) VG_(debugLog)(4, "vcap", "ds_cap received: %d\n", ds_cap.id_received());
98 static l4_umword_t find_res(L4::Cap<void> const &ds) { return ds.cap(); }
101 static l4_addr_t the_map_area;
102 static l4_addr_t the_map_area_end;
103 static L4::Cap<L4Re::Rm> real_rm;
106 * Modify the current environment's region manager and return the old one.
107 * This is used when switching into non-VRM mode in __notify_tool_of_mapping.
109 L4::Cap<L4Re::Rm> set_environment_rm(L4::Cap<void> cap)
111 L4::Cap<L4Re::Rm> ret = L4Re::Env::env()->rm();
113 L4::Cap<L4Re::Rm> rm(cap.cap());
114 const_cast<L4Re::Env*>(L4Re::Env::env())->rm(rm);
121 // see: loader/server/src/region.cc
125 typedef L4::Snd_fpage Map_result;
126 typedef L4Re::Util::Region_handler<L4::Cap<L4Re::Dataspace>,
127 Vcap::Rm::Region_ops> MyRegion_handler;
131 real_rm->reserve_area(&the_map_area, L4_PAGESIZE,
132 L4Re::Rm::Reserved | L4Re::Rm::Search_addr);
133 the_map_area_end = the_map_area + L4_PAGESIZE -1;
135 prot |= VKI_PROT_READ;
136 prot |= VKI_PROT_WRITE;
137 prot |= VKI_PROT_EXEC;
139 unsigned flags = VKI_MAP_ANONYMOUS;
141 VG_(am_notify_valgrind_mmap)((Addr)the_map_area,
142 VG_PGROUNDUP(the_map_area_end - the_map_area),
145 VG_(debugLog)(1, "vcap", "Region ops map area: 0x%lx - 0x%lx\n",
146 the_map_area, the_map_area_end);
149 static int map(MyRegion_handler const *h,
150 l4_addr_t local_addr, L4Re::Util::Region const &r, bool writable,
151 L4::Snd_fpage *result)
154 if (dbg_ds) VG_(debugLog)(4, "vcap", "%s handler %p, local_addr 0x%lx, writable %s\n",
155 __func__, (void * )h, local_addr, writable ? "true" : "false");
157 if ((h->flags() & L4Re::Rm::Reserved) || !h->memory().is_valid()) {
158 VG_(debugLog)(4, "vcap", "Reserved || invalid\n");
163 if (h->flags() & L4Re::Rm::Pager) {
164 VG_(debugLog)(4, "vcap", "pager\n");
169 if (h->is_ro() && writable)
170 Dbg(Dbg::Warn).printf("WARNING: Writable mapping request on read-only region at 0x%lx!\n",
173 l4_addr_t offset = local_addr - r.start() + h->offset();
174 L4::Cap<L4Re::Dataspace> ds = L4::cap_cast<L4Re::Dataspace>(h->memory());
176 if (dbg_ds) VG_(debugLog)(4, "vcap", "Dataspace size 0x%lx\n", ds->size());
177 if (dbg_ds) VG_(debugLog)(4, "vcap", "Region: 0x%lx - 0x%lx\n", r.start(), r.end());
178 if (dbg_ds) VG_(debugLog)(4, "vcap", "map(0x%lx, 0x%x, 0x%lx, 0x%lx, 0x%lx)\n", offset, writable,
179 the_map_area, the_map_area, the_map_area_end);
180 if (err = ds->map(offset, writable, the_map_area, the_map_area, the_map_area_end)) {
181 VG_(debugLog)(0, "vcap", "map failed: %d %s\n", err, l4sys_errtostr(err));
182 l4re_rm_show_lists();
183 enter_kdebug("map error");
187 if (dbg_ds) VG_(debugLog)(4, "vcap", "ds->map(): %d\n", err);
189 *result = L4::Snd_fpage::mem(the_map_area, L4_PAGESHIFT, L4_FPAGE_RWX,
190 local_addr, L4::Snd_fpage::Grant);
194 static void unmap(MyRegion_handler const *h, l4_addr_t vaddr,
195 l4_addr_t offs, unsigned long size)
197 VG_(debugLog)(0, "vcap", "\n");
198 enter_kdebug("Region_ops::unmap()");
201 static void take(MyRegion_handler const *h)
203 VG_(debugLog)(0, "vcap", "\n");
204 enter_kdebug("Region_ops::take()");
207 static void release(MyRegion_handler const *h)
209 VG_(debugLog)(0, "vcap", "\n");
210 enter_kdebug("Region_ops::release()");
215 /* Explanation: region_mapping_svr relies on the region map consisting of Nodes.
216 * In fact it relies on an implementation using the cxx avl tree
217 * and the underlying node types. However, we handle regions using
218 * Valgrind's data types here, but need an adaptor node type.
225 node(L4Re::Util::Region const &f,
226 L4Re::Util::Region_handler<L4::Cap<L4Re::Dataspace>, Vcap::Rm::Region_ops> rh)
227 : first(f), second(rh)
230 L4Re::Util::Region first;
231 L4Re::Util::Region_handler<L4::Cap<L4Re::Dataspace>,
232 Vcap::Rm::Region_ops> second;
237 * For reserved areas, we need to store information about their original size, because
238 * during runtime regions may be attached into areas and these reserved regions may
239 * be split into pieces that need to be rejoined if the regions get detached.
247 area_info(Addr start, Addr end)
248 :_start(start), _end(end)
252 bool match_resvn(NSegment const * const seg) const
254 return ((seg->kind == SkResvn) &&
255 (seg->start == _start) &&
260 bool contains_nsegment(NSegment const * const seg) const
262 return ((seg->start >= _start) &&
268 #define DEBUG_RM_LAYOUT do { VG_(am_show_nsegments)(0, (HChar*)"current AS layout"); } while (0)
270 #define RM_ERROR do { DEBUG_RM_LAYOUT; enter_kdebug("rm error"); } while (0)
273 * VCap::rm -- virtual region manager.
275 * This class implements a region manager on top of Valgrind's segment array.
276 * We map L4RM functionality as follows:
278 * 1) attach/detach -> create/delete new segments
280 * Area creation is harder. First, we create a reservation in the segment
281 * array. However, we need to keep track of this area for later attach()es and
282 * therefore keep an additional list of all known regions. This is because the
283 * reservations in the segment array don't suffice -- the area may be
284 * completely mapped with regions and therefore unobtainable from the segment
287 * 2) attach_area -> create new reservation and add to list
288 * 3) detach_area -> remove all reservations in area and list entry
293 cxx::List<struct area_info *, Valgrind_allocator> _areas;
297 typedef Vcap::Rm::node * Node;
298 typedef L4Re::Util::Region_handler<L4::Cap<L4Re::Dataspace>,
299 Vcap::Rm::Region_ops> Region_handler;
300 typedef L4Re::Util::Region Region;
305 Search = L4Re::Rm::Search_addr,
306 In_area = L4Re::Rm::In_area,
309 rm(unsigned id) : _id(id) { }
311 int dispatch(l4_msgtag_t tag, l4_umword_t obj, L4::Ipc_iostream &ios)
316 switch(tag.label()) {
317 case L4_PROTO_PAGE_FAULT:
318 return L4Re::Util::region_pf_handler<Vcap::Dbg>(this, ios);
320 case L4Re::Protocol::Rm:
321 err = L4Re::Util::region_map_server<Vcap::Rm::svr>(this, ios);
322 if (dbg_rm) VG_(debugLog)(4, "vcap", "rm_server() returns %d\n", err);
326 return -L4_EBADPROTO;
334 * Find area info for addr.
336 struct area_info *__find_area_info(void *addr)
338 typedef cxx::List<struct area_info *, Valgrind_allocator> LIST;
339 for (LIST::Iter i = _areas.items(); *i; ++i) {
340 struct area_info *inf = *i;
341 if (inf->_start <= (Addr)addr && inf->_end >= (Addr)addr)
349 * Remove area info for addr.
351 bool __remove_area_info(void *addr) {
352 typedef cxx::List<struct area_info *, Valgrind_allocator> LIST;
353 LIST::Iter i = _areas.items();
355 struct area_info *inf = *i;
356 if (inf->_start <= (Addr)addr && inf->_end >= (Addr)addr)
370 * Do what is necessary to attach a region into an already existing area.
372 * \return L4_INVALID_PTR on error
373 * start address on success
375 void *__handle_attach_in_area(void *addr, unsigned long size, unsigned flags)
377 if (dbg_rm) VG_(debugLog)(4, "vcap", "%s: %p %lx %x\n", __func__, addr, size, flags);
379 /* We need to find the reservation for this area.
381 * Case A: The area is still empty, then we'll find the resvn by simply
384 * Case B: There are already areas attached to this resvn. In this case
385 * a simple lookup may give us an area. Hence, we need to search
386 * forwards and backwards in the segment array to find the resvn.
388 NSegment *seg = const_cast<NSegment*>(VG_(am_find_nsegment)((Addr)addr));
391 VG_(printf)("No reservation found for attach in area. Addr %p, size %ld\n",
396 if (dbg_rm) VG_(debugLog)(4, "vcap", "Segment @ %p (%08lx - %08lx, type: %s)\n",
397 seg, seg->start, seg->end, vcap_segknd_str(seg->kind));
399 struct area_info *info = __find_area_info(addr);
401 VG_(printf)("ERROR: could not find area for address %p\n", addr);
404 if (dbg_rm) VG_(debugLog)(4, "vcap", "Area @ %p (%08lx - %08lx)\n",
405 info, info->_start, info->_end);
410 if (!info->match_resvn(seg) && seg->kind != SkResvn) {
411 enter_kdebug("resvn not matching");
415 * Now we found a reservation suiting the new region. We now go and unmap
416 * the reservation, create a new region and if necessary mark chunks before
417 * and after the region as reservations.
419 Addr old_start = seg->start;
420 Addr old_end = seg->end;
425 Bool needDiscard = VG_(am_notify_munmap)(seg->start, seg->end - seg->start + 1);
426 if (dbg_rm) VG_(debugLog)(4, "vcap", "unmapped, need discard? %d\n", needDiscard);
428 /* new reservation before this area? */
429 if (old_start < (Addr)addr) {
430 if (dbg_rm) VG_(debugLog)(4, "vcap", "in area: split before");
431 Addr new_start = old_start;
432 unsigned new_size = (Addr)addr - old_start + 1;
433 if (dbg_rm) VG_(debugLog)(4, "vcap", "new resvn @ %p, size %lx\n", new_start, new_size);
434 attach_area(new_start, new_size);
437 /* new reservation after this area? */
438 if (old_end > (Addr)addr + size - 1) {
439 if (dbg_rm) VG_(debugLog)(4, "vcap", "in area: split after");
440 Addr new_start = (Addr)addr + size;
441 unsigned new_size = old_end - new_start + 1;
442 if (dbg_rm) VG_(debugLog)(4, "vcap", "new resvn @ %p, size %lx\n", new_start, new_size);
443 attach_area(new_start, new_size);
451 * Find a free segment with start address >= start and a given size.
452 * We use Valgrind's dynamic memory manager to determine such an
455 * \return L4_INVALID_PTR on error
456 * valid segment start on success
458 void *__find_free_segment(void *start, unsigned size)
461 void *ret = (void*)VG_(am_get_advisory_client_simple)((Addr)start,
464 VG_(debugLog)(0, "vcap", "Advisor has no free area for us!\n");
466 enter_kdebug("error looking up free segment");
467 return L4_INVALID_PTR;
475 * Valgrind notifies tools about successful mmap() using the VG_TRACK
476 * functionality. The only place where we can keep track of all
477 * mappings is inside VRM, which means in the tool's page fault
478 * handler. Unfortunately, tools tend to do arbitrary things (such as
479 * allocating memory) in response to such notifications and we cannot
480 * simply execute Valgrind code here, because it may try calling the
481 * pager (ourselves!).
483 * Therefore, for sending such notifications, we switch back to the
484 * mode, where we pretend that VRM is not there yet. This means, memory
485 * allocations are redirected to the original RM and segments are
486 * updated internally.
488 * A word on thread-safety: We don't need additional locking here,
489 * because we are sure that the tool code is currently waiting for us
490 * to handle its page fault or RM request. So this is as thread-safe as
491 * the tool was before.
493 void __notify_tool_of_mapping(Addr addr, size_t size, unsigned prot)
495 L4::Cap<L4Re::Rm> __rm__ = Vcap::Rm::set_environment_rm(Vcap::Rm::real_rm);
498 VG_TRACK(new_mem_mmap, (Addr)addr, size, prot & VKI_PROT_READ,
499 prot & VKI_PROT_WRITE, prot & VKI_PROT_EXEC, 0);
501 Vcap::Rm::set_environment_rm(__rm__);
505 void *attach(void *addr, unsigned long size, Region_handler const &hdlr,
506 unsigned flags = None,
507 unsigned char align = L4_PAGESHIFT) throw()
509 if (dbg_rm) VG_(debugLog)(3, "vcap", "%s: addr %p, size 0x%lx, flags 0x%x=%s%s\n",
510 __func__, addr, size, flags,
511 flags & L4Re::Rm::In_area ? "|In_area" : "",
512 flags & L4Re::Rm::Search_addr ? "|Search_addr" : ""
516 * Special case 1: In_area flag is set -> we need to lookup the area and then
517 * find a free reservation inside it.
519 if (flags & L4Re::Rm::In_area) {
520 addr = __handle_attach_in_area(addr, size, flags);
521 if (dbg_rm) VG_(debugLog)(4, "vcap", "__handle_attach_in_area: %p\n", addr);
522 if (addr == L4_INVALID_PTR)
527 * Special case 2: Search_addr flag is set -> we need to find a free, unreserved
530 if (flags & L4Re::Rm::Search_addr) {
531 addr = __find_free_segment(0, size);
532 if (dbg_rm) VG_(debugLog)(4, "vcap", "__find_free_segment: %p\n", addr);
534 return L4_INVALID_PTR;
538 * Next we need to establish the necessary information. Depending on which VRM
539 * instance this is, we either mark the mapping as Valgrind or Client. Access
540 * rights are determined from the RM handler info.
543 unsigned prot = VKI_PROT_READ;
545 prot |= VKI_PROT_WRITE;
547 Off64T offs = hdlr.offset();
550 * By default, we register all memory mappings as anonymous, because at this
551 * point in the MM chain all we know is data spaces and these are equivalent
552 * to anonymous memory. Valgrind however needs more detailed information for
553 * file mappings. This is handled outside, e.g., in the _dl_mmap trap handler.
555 unsigned vgflags = VKI_MAP_ANONYMOUS;
557 // Notify aspacemgr of Valgrind ...
558 if (_id == VRMcap_valgrind) {
559 if (dbg_rm) VG_(debugLog)(4, "vcap", "am_notify_valgrind_mmap(a=%08lx, size %lx)\n",
561 VG_(am_notify_valgrind_mmap)((Addr)addr, size, prot, vgflags, -1, 0);
563 else if (_id == VRMcap_client) { // ... or client mapping
564 VG_(am_notify_client_mmap)((Addr)addr, size, prot, vgflags, -1, 0);
565 __notify_tool_of_mapping((Addr)addr, size, prot);
569 VG_(printf)("I can't determine what kind of mapping this is -- id = %lX\n", _id);
574 * Aspacemgr now added the reservation/mapping to the segment array. Now we need to
575 * add a RM handler to this segment.
577 vrm_segment_notify((Addr)addr, (Addr)addr + size - 1, hdlr.client_cap_idx(), offs, flags);
579 if (dbg_rm) VG_(debugLog)(4, "vcap", "returning %p\n", addr);
585 * Reserve a region. We do this using Valgrind's RSVN segment type. When attaching areas to
586 * these reservations, we adapt accordingly. This may lead to a point, where no RSVN segment
587 * is there anymore. Hence, we cannot rely only on the segment array to store information on
588 * segments, but need to additionally keep track of regions in an area_info list.
590 l4_addr_t attach_area(l4_addr_t addr, unsigned long size,
591 unsigned flags = None,
592 unsigned char align = L4_PAGESHIFT) throw()
594 if (dbg_rm) VG_(debugLog)(3, "vcap", "%s: addr 0x%lx, size 0x%lx, flags 0x%x, align 0x%x\n",
595 __func__, addr, size, flags, align);
597 if (flags & L4Re::Rm::Search_addr) {
598 addr = (l4_addr_t)__find_free_segment((void*)addr, size);
599 if (dbg_rm) VG_(debugLog)(4, "vcap", "__find_free_segment: %08lx\n", addr);
601 return (l4_addr_t)L4_INVALID_PTR;
604 // Now we mark this area as reserved.
605 Bool ok = VG_(am_create_reservation)(addr, VG_PGROUNDUP(size), SmFixed, 0);
608 * Failing here is ok. This just means, there is already
609 * another reservation existing. L4Re apps must handle this
612 if (dbg_rm) VG_(debugLog)(4, "vcap", "Error creating reservation for area 0x%lx, size 0x%lx\n",
614 return L4_INVALID_ADDR;
617 NSegment *seg = const_cast<NSegment*>(VG_(am_find_nsegment)((Addr)addr));
618 area_info *info = new area_info(addr, addr + VG_PGROUNDUP(size) - 1);
619 VG_(am_set_nodeptr)(seg, (Addr)info);
620 if (dbg_rm) VG_(debugLog)(3, "vcap", "AREA segment %08lx node %p\n", (Addr)seg, info);
622 _areas.push_back(info);
628 bool detach_area(l4_addr_t addr) throw()
630 if (dbg_rm) VG_(debugLog)(3, "vcap", "%s: addr 0x%lx\n", __func__, addr);
632 struct area_info *info = __find_area_info((void*)addr);
634 VG_(printf)("Cannot find area for address %p\n", (void*)addr);
636 enter_kdebug("detach_area");
639 VG_(debugLog)(4, "vcap", "area: %08lx - %08lx\n", info->_start, info->_end);
640 NSegment *seg = const_cast<NSegment*>(VG_(am_find_nsegment)(info->_start));
642 * Run through all segments within the area and unmap them if they are
643 * only reservations for this area.
645 while (seg->start < info->_end) {
646 if (seg->kind == SkResvn && seg->dsNodePtr != NULL) {
647 if (dbg_rm) VG_(debugLog)(4, "vcap", "unmapping resvn %08lx - %08lx\n",
648 seg->start, seg->end);
649 NSegment *next_seg = seg + 1;
650 Bool needDiscard = VG_(am_notify_munmap)(seg->start,
651 seg->end - seg->start + 1);
653 * we need to re-lookup the segment, because by removing
654 * the mapping above, we invalidated our seg pointer
662 if (dbg_rm) VG_(debugLog)(4, "vcap", "removed all reservations. Deleting area_info\n");
663 __remove_area_info((void*)addr);
668 int detach(void *addr, unsigned long sz, unsigned flags,
669 Region *reg /* OUT */,
670 Region_handler *hdlr /* OUT */) throw()
672 // XXX: overlapping detach?
674 if (dbg_rm) VG_(debugLog)(3, "vcap", "%s: addr %p, size 0x%lx, flags 0x%x, region %p, hdlr %p\n",
675 __func__, addr, sz, flags, reg, hdlr);
677 NSegment const * seg = VG_(am_find_nsegment)((Addr)addr);
678 if (!seg ||(seg && seg->kind==SkResvn))
681 unsigned size = l4_round_page(seg->end - seg->start);
682 Node n = reinterpret_cast<Node>(seg->dsNodePtr);
689 Bool needDiscard = VG_(am_notify_munmap)(seg->start, size);
690 if (dbg_rm) VG_(debugLog)(4, "vcap", "unmapped, need discard? %d\n", needDiscard);
692 return L4Re::Rm::Detached_ds;
696 Node find(Region const ®) const throw()
698 if (dbg_pf) VG_(debugLog)(4, "vcap", "Find start 0x%lx, end 0x%lx\n", reg.start(),
702 NSegment const * vg_seg = VG_(am_find_nsegment)(reg.start());
704 VG_(debugLog)(1, "vcap", "Error looking up segment for 0x%lx\n",
708 if (dbg_pf) VG_(debugLog)(4, "vcap", "Found segment: %p (0x%lx - 0x%lx) nptr %08lx, type %s\n",
709 vg_seg, vg_seg->start, vg_seg->end, vg_seg->dsNodePtr, vcap_segknd_str(vg_seg->kind));
712 * If there's no node ptr, something went wrong
714 if (!vg_seg->dsNodePtr) {
715 VG_(debugLog)(0, "vcap", "ERROR: no node ptr found for region %p - %p\n",
716 reg.start(), reg.end());
720 n = (Node)vg_seg->dsNodePtr;
722 if (dbg_pf) VG_(debugLog)(4, "vcap", "Node ptr: %p\n", n);
728 Node area_find(Region const &r) const throw()
730 enter_kdebug("area_find");
734 void get_lists( l4_addr_t addr ) const throw()
736 enter_kdebug("get_lists");
746 class Vcap_object : public L4::Server_object
755 Vcap_object(unsigned id) : _rm(id), _id(id)
759 int handle_exception()
761 VG_(printf)("\033[31mEXCEPTION\033[0m\n");
766 int dispatch(l4_umword_t obj, L4::Ipc_iostream &ios) L4_NOTHROW
768 if (dbg_vcap) VG_(debugLog)(4, "vcap", "dispatch\n");
775 VG_(debugLog)(4, "vcap", "label: %lx (%ld)\n", t.label(), t.label());
781 VG_(debugLog)(2, "vcap", "log protocol\n");
783 return _log.dispatch(obj, ios);
785 case L4_PROTO_PAGE_FAULT:
786 //if (dbg_vcap) VG_(debugLog)(2, "vcap", "page fault\n");
787 return _rm.dispatch(t, obj, ios);
789 case L4Re::Protocol::Rm:
790 return _rm.dispatch(t, obj, ios);
792 case L4Re::Protocol::Parent:
793 if (dbg_vcap) VG_(debugLog)(2, "vcap", "parent protocol\n");
794 enter_kdebug("parent");
798 if (dbg_vcap) VG_(debugLog)(2, "vcap", "irq protocol\n");
799 return _rm.dispatch(t, obj, ios);
802 case L4_PROTO_EXCEPTION:
803 return handle_exception();
806 VG_(debugLog)(2, "vcap", "Unknown protocol: %lx (%ld)\n",
807 t.label(), t.label());
808 VG_(show_sched_status)();
809 enter_kdebug("Unknown protocol");
817 * Flag determining whether VRM is running atm. If this is true, we use VRM's normal
818 * memory management functions, otherwise (before VRM is started) we fall back to local
821 int vcap_running = 0;
824 * We use 2 server objects for handling RM requests -- one for the client and one for
827 static Vcap_object valgrind_obj(VRMcap_valgrind);
828 static Vcap_object client_obj(VRMcap_client);
829 L4::Cap<void> Vcap::Loop_hooks::rcv_cap;
834 static void vcap_thread_fn(void *arg) L4_NOTHROW
836 VG_(debugLog)(1, "vcap", "%s: Here, vcap_running @ %p (%d)\n", __func__,
837 &vcap_running, vcap_running);
839 Vcap::Rm::Region_ops::init();
841 Vcap::Loop_hooks::rcv_cap = L4Re::Util::cap_alloc.alloc<void>();
842 if (!Vcap::Loop_hooks::rcv_cap.is_valid()) {
843 VG_(debugLog)(0, "vcap","Error allocating rcv cap.\n");
844 enter_kdebug("ERROR");
847 VG_(debugLog)(1, "vcap","Global rcv cap: %lx\n",
848 Vcap::Loop_hooks::rcv_cap.cap());
851 L4Re::Util::Object_registry registry(vcap_thread,
852 L4Re::Env::env()->factory());
854 L4Re::Util::Registry_server<Vcap::Loop_hooks> server(l4_utcb(), vcap_thread,
855 L4Re::Env::env()->factory());
857 server.registry()->register_obj(&valgrind_obj);
858 server.registry()->register_obj(&client_obj);
862 VG_(debugLog)(0, "vcap", "server.loop()\n");
868 * Store info about the real Region Manager before switching everything
873 Vcap::Rm::real_rm = L4Re::Env::env()->rm();
874 VG_(debugLog)(2, "vcap", "real rm %lx\n", Vcap::Rm::real_rm.cap());
879 * Allocate capability and use it to create a thread.
881 L4::Cap<L4::Thread> allocate_new_thread()
883 L4::Cap<L4::Thread> ret = L4Re::Util::cap_alloc.alloc<L4::Thread>();
884 if (!ret.is_valid()) {
885 VG_(debugLog)(0, "vcap", "%s: Error allocating thread cap.",
887 enter_kdebug("ERROR");
890 if (dbg_vcap) VG_(debugLog)(1, "vcap", "vcap cap: %lx\n", ret.cap());
892 l4_msgtag_t tag = L4Re::Env::env()->factory()->create_thread(ret);
893 if (l4_msgtag_has_error(tag)) {
894 VG_(debugLog)(0, "vcap", "Error creating vcap thread from factory.\n");
895 enter_kdebug("ERROR");
903 * Basic VRM thread setup
905 void init_vcap_thread(L4::Cap<L4::Thread>& threadcap)
907 l4_utcb_t *new_utcb = reinterpret_cast<l4_utcb_t*>(
908 L4Re::Env::env()->first_free_utcb());
911 a.pager(L4Re::Env::env()->rm());
912 a.exc_handler(L4Re::Env::env()->rm());
913 a.bind(new_utcb, L4Re::This_task);
914 // XXX: do we need to increment first_free_utcb here?
916 l4_msgtag_t tag = threadcap->control(a);
917 if (l4_msgtag_has_error(tag)) {
918 VG_(debugLog)(0, "vcap-thread", "Error committing vcap thread.\n");
922 // scheduling defaults, copied from loader/server/src/loader.cc
925 sp.affinity = l4_sched_cpu_set(0, ~0, 1);
927 tag = L4Re::Env::env()->scheduler()->run_thread(threadcap, sp);
928 if (l4_msgtag_has_error(tag)) {
929 VG_(debugLog)(0, "vcap-thread", "Error setting scheduling attributes.\n");
934 void run_vcap_thread(L4::Cap<L4::Thread>& threadcap)
936 // XXX: is memset to 0 necessary or does this modify our test results?
937 VG_(memset)(vcap_stack, 0, sizeof(vcap_stack));
940 VG_(debugLog)(2, "vcap", "new stack: %p - %p\n", vcap_stack,
941 vcap_stack + sizeof(vcap_stack));
942 VG_(debugLog)(2, "vcap", "ex_regs(0x%lx, 0x%lx, 0x%lx, 0)\n",
943 threadcap.cap(), (l4_umword_t)vcap_thread_fn,
944 (l4_umword_t)vcap_stack + sizeof(vcap_stack));
947 l4_msgtag_t tag = threadcap->ex_regs((l4_umword_t)vcap_thread_fn,
948 (l4_umword_t)vcap_stack + sizeof(vcap_stack),
950 if (l4_msgtag_has_error(tag)) {
951 VG_(debugLog)(0, "vcap-thread", "Error enabling vcap thread.\n");
954 while (!vcap_running)
959 void main_thread_modify_rm(L4::Cap<void> cap)
962 * Now that the VCap thread is started, we also want it to become _our_
963 * pager, so that it can take care of all region mapping duties.
965 L4::Thread::Attr attr;
966 // we assume, we're always executed by the main thread!
967 L4::Cap<L4::Thread> self = L4Re::Env::env()->main_thread();
970 attr.exc_handler(cap);
974 * And now vcap becomes everyone's region manager
976 Vcap::Rm::set_environment_rm(cap);
980 void vrm_track_utcb_area()
982 l4_fpage_t fp = L4Re::Env::env()->utcb_area();
983 Addr start = l4_fpage_page(fp) << L4_PAGESHIFT;
984 Addr end = start + (l4_fpage_size(fp) << L4_PAGESHIFT);
985 VG_(debugLog)(4, "vcap", "TRACK(%p, %lx, 1, 1, 0, 0)\n", start, end-start);
986 VG_TRACK(new_mem_startup, start, end-start, True, True, False, 0);
990 EXTERN_C void l4re_vcap_start_thread(void)
992 VG_(debugLog)(2, "vcap", "%s: Here\n", __func__);
996 vcap_thread = allocate_new_thread();
997 init_vcap_thread(vcap_thread);
998 run_vcap_thread(vcap_thread);
1000 l4_debugger_set_object_name(vcap_thread.cap(), "VG::vcap");
1001 l4_debugger_set_object_name(L4Re::Env::env()->main_thread().cap(),
1003 l4_debugger_set_object_name(valgrind_obj.obj_cap().cap(), "VRMcap::valgrind");
1004 l4_debugger_set_object_name(client_obj.obj_cap().cap(), "VRMcap::client");
1006 main_thread_modify_rm(valgrind_obj.obj_cap());
1011 * Currently, the client sees all init caps Valgrind sees. In future versions we might
1012 * want to modify this as well. Therefore we create a copy of all init caps here and
1013 * pass the client this copy for usage. This is the place to filter out or modify these
1014 * caps before starting the client.
1016 static L4Re::Env::Cap_entry* __copy_init_caps(L4Re::Env::Env const * const e)
1018 if (dbg_vcap) VG_(debugLog)(4, "vcap", "counting caps\n");
1019 L4Re::Env::Cap_entry const *c = e->initial_caps();
1021 for ( ; c->flags != ~0UL; ++c, ++cnt)
1023 if (dbg_vcap) VG_(debugLog)(4, "vcap", "count: %lx\n", cnt+1);
1025 SysRes res = VG_(am_mmap_anon_float_client)((cnt+1) * sizeof(L4Re::Env::Cap_entry),
1026 VKI_PROT_READ | VKI_PROT_WRITE);
1027 if (sr_isError(res)) {
1028 VG_(debugLog)(0, "vcap", "Error allocating memory for client initial caps.\n");
1031 VG_(memcpy)((void*)sr_Res(res), e->initial_caps(), (cnt+1) * sizeof(L4Re::Env::Cap_entry));
1033 return reinterpret_cast<L4Re::Env::Cap_entry*>(sr_Res(res));
1038 * Modify client environment and make VRM be the handler for all
1039 * interesting events.
1041 EXTERN_C void *l4re_vcap_modify_env(struct ume_auxv *envp)
1043 L4Re::Env::Env *e = new L4Re::Env::Env(*L4Re::Env::env());
1044 VG_(debugLog)(0, "vcap", " New env @ %p\n", e);
1045 VG_(debugLog)(0, "vcap", " Orig env @ %p\n", L4Re::Env::env());
1047 e->parent(L4::cap_cast<L4Re::Parent>(client_obj.obj_cap()));
1048 e->rm(L4::cap_cast<L4Re::Rm>(client_obj.obj_cap()));
1049 e->log(L4::cap_cast<L4Re::Log>(client_obj.obj_cap()));
1050 e->first_free_cap(MAX_LOCAL_RM_CAPS + MAX_VG_CAPS);
1051 e->first_free_utcb(L4Re::Env::env()->first_free_utcb() + VG_UTCB_OFFSET);
1052 VG_(debugLog)(0, "vcap", " new RM @ %lx\n", e->rm().cap());
1054 e->initial_caps(__copy_init_caps(e));
1056 client_env = (void *) e;
1057 client_env_size = sizeof(L4Re::Env::Env);
1064 * When parsing the early region list from RM, we need to add Node descriptors
1065 * for the segments found. Therefore this callback exists for the aspacemgr
1066 * code to notify us about a found segment.
1068 void vrm_segment_notify(Addr start, Addr end, l4_cap_idx_t dscap, unsigned offset, unsigned flags)
1070 NSegment *seg = const_cast<NSegment*>(VG_(am_find_nsegment)((Addr)start));
1071 if (seg->start == start && seg->end == end) {
1072 vrm_update_segptr(seg, dscap, offset, flags);
1075 VG_(printf)("Segment mismatch: args %08lx-%08lx; found %08lx-%08lx\n",
1076 start, end, seg->start, seg->end);
1077 enter_kdebug("error");
1083 * This function actually exists for exactly one region: When starting the
1084 * dynamic memory manager, Valgrind allocates a malloc pool. This is still done
1085 * using the "old" RM in place, therefore at this point we need to go to RM and
1086 * request all information necessary to register a node pointer for this
1089 void vrm_segment_fixup(NSegment *seg)
1094 if (seg->kind == SkResvn)
1097 L4::Cap<L4Re::Rm> _rm;
1098 if (Vcap::Rm::real_rm.is_valid())
1099 _rm = Vcap::Rm::real_rm;
1101 _rm = L4Re::Env::env()->rm();
1103 l4_addr_t addr = seg->start;
1104 unsigned long size = seg->end - seg->start + 1;
1105 l4_addr_t offset = 0;
1107 L4::Cap<L4Re::Dataspace> ds;
1108 int i = _rm->find(&addr, &size, &offset, &flags, &ds);
1110 if (dbg_rm) VG_(debugLog)(2, "vcap", "%s: addr %p size %lx, cap %lx, offs %lx\n",
1111 __func__, addr, size, ds.cap(), offset);
1114 vrm_update_segptr(seg, ds.cap(), offset, flags);
1115 if (dbg_rm) VG_(debugLog)(2, "vcap", "ds %lx node ptr @ %p\n", ds.cap(), seg->dsNodePtr);
1120 * Update node pointer information on an exisitng NSegment
1122 void vrm_update_segptr(NSegment *seg, l4_cap_idx_t dscap, unsigned offset, unsigned flags)
1124 typedef L4Re::Util::Region_handler<L4::Cap<L4Re::Dataspace>,
1125 Vcap::Rm::Region_ops> Region_handler;
1127 L4::Cap<L4Re::Dataspace> dummy(dscap);
1129 * If vcap is not yet running, we may assume, that Valgrind's dynamic memory
1130 * manager is not running yet as well. Therefore, using new() to create a node
1131 * would not work. Instead, we use a placement allocator in the early stages.
1133 * -> exists for calls from vrm_segment_fixup() that end up here
1135 static unsigned _mbuf_index = 0; //< next index into early placement buf
1136 static char _early_malloc_buf[8192]; //< early placement buf
1141 n = new Vcap::Rm::node(L4Re::Util::Region(seg->start, seg->end),
1142 Region_handler(dummy, dummy.cap(),
1145 n = new (_early_malloc_buf + _mbuf_index)
1146 Vcap::Rm::node(L4Re::Util::Region(seg->start, seg->end),
1147 Region_handler(dummy, dummy.cap(),
1149 _mbuf_index += sizeof(Vcap::Rm::node);
1152 VG_(am_set_nodeptr)(seg, (Addr)n);
1154 VG_(debugLog)(4, "vcap", "\033[32mupdate_segment %p (%p): node %p\033[0m\n",
1155 seg->start, seg, seg->dsNodePtr);