2 /*--------------------------------------------------------------------*/
3 /*--- The address space manager: segment initialisation and ---*/
4 /*--- tracking, stack operations ---*/
6 /*--- Implementation for Linux m_aspacemgr-linux.c ---*/
7 /*--------------------------------------------------------------------*/
10 This file is part of Valgrind, a dynamic binary instrumentation
13 Copyright (C) 2000-2008 Julian Seward
16 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
31 The GNU General Public License is contained in the file COPYING.
34 /* *************************************************************
35 DO NOT INCLUDE ANY OTHER FILES HERE.
36 ADD NEW INCLUDES ONLY TO priv_aspacemgr.h
37 AND THEN ONLY AFTER READING DIRE WARNINGS THERE TOO.
38 ************************************************************* */
40 #include "priv_aspacemgr.h"
42 /* Note: many of the exported functions implemented below are
43 described more fully in comments in pub_core_aspacemgr.h.
47 /*-----------------------------------------------------------------*/
51 /*-----------------------------------------------------------------*/
55 The purpose of the address space manager (aspacem) is:
57 (1) to record the disposition of all parts of the process' address
60 (2) to the extent that it can, influence layout in ways favourable
63 It is important to appreciate that whilst it can and does attempt
64 to influence layout, and usually succeeds, it isn't possible to
65 impose absolute control: in the end, the kernel is the final
66 arbiter, and can always bounce our requests.
70 The strategy is therefore as follows:
72 * Track ownership of mappings. Each one can belong either to
73 Valgrind or to the client.
75 * Try to place the client's fixed and hinted mappings at the
76 requested addresses. Fixed mappings are allowed anywhere except
77 in areas reserved by Valgrind; the client can trash its own
78 mappings if it wants. Hinted mappings are allowed providing they
79 fall entirely in free areas; if not, they will be placed by
80 aspacem in a free area.
82 * Anonymous mappings are allocated so as to keep Valgrind and
83 client areas widely separated when possible. If address space
84 runs low, then they may become intermingled: aspacem will attempt
85 to use all possible space. But under most circumstances lack of
86 address space is not a problem and so the areas will remain far
89 Searches for client space start at aspacem_cStart and will wrap
90 around the end of the available space if needed. Searches for
91 Valgrind space start at aspacem_vStart and will also wrap around.
92 Because aspacem_cStart is approximately at the start of the
93 available space and aspacem_vStart is approximately in the
94 middle, for the most part the client anonymous mappings will be
95 clustered towards the start of available space, and Valgrind ones
98 The available space is delimited by aspacem_minAddr and
99 aspacem_maxAddr. aspacem is flexible and can operate with these
100 at any (sane) setting. For 32-bit Linux, aspacem_minAddr is set
101 to some low-ish value at startup (64M) and aspacem_maxAddr is
102 derived from the stack pointer at system startup. This seems a
103 reliable way to establish the initial boundaries.
105 64-bit Linux is similar except for the important detail that the
106 upper boundary is set to 32G. The reason is so that all
107 anonymous mappings (basically all client data areas) are kept
108 below 32G, since that is the maximum range that memcheck can
109 track shadow memory using a fast 2-level sparse array. It can go
110 beyond that but runs much more slowly. The 32G limit is
111 arbitrary and is trivially changed. So, with the current
112 settings, programs on 64-bit Linux will appear to run out of
113 address space and presumably fail at the 32G limit. Given the
114 9/8 space overhead of Memcheck, that means you should be able to
115 memcheckify programs that use up to about 14G natively.
117 Note that the aspacem_minAddr/aspacem_maxAddr limits apply only to
118 anonymous mappings. The client can still do fixed and hinted maps
119 at any addresses provided they do not overlap Valgrind's segments.
120 This makes Valgrind able to load prelinked .so's at their requested
121 addresses on 64-bit platforms, even if they are very high (eg,
124 At startup, aspacem establishes the usable limits, and advises
125 m_main to place the client stack at the top of the range, which on
126 a 32-bit machine will be just below the real initial stack. One
127 effect of this is that self-hosting sort-of works, because an inner
128 valgrind will then place its client's stack just below its own
131 The segment array and segment kinds
132 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
133 The central data structure is the segment array (segments[0
134 .. nsegments_used-1]). This covers the entire address space in
135 order, giving account of every byte of it. Free spaces are
136 represented explicitly as this makes many operations simpler.
137 Mergeable adjacent segments are aggressively merged so as to create
138 a "normalised" representation (preen_nsegments).
140 There are 7 (mutually-exclusive) segment kinds, the meaning of
143 SkFree: a free space, which may be allocated either to Valgrind (V)
146 SkAnonC: an anonymous mapping belonging to C. For these, aspacem
147 tracks a boolean indicating whether or not is is part of the
148 client's heap area (can't remember why).
150 SkFileC: a file mapping belonging to C.
152 SkShmC: a shared memory segment belonging to C.
154 SkAnonV: an anonymous mapping belonging to V. These cover all V's
155 dynamic memory needs, including non-client malloc/free areas,
156 shadow memory, and the translation cache.
158 SkFileV: a file mapping belonging to V. As far as I know these are
159 only created transiently for the purposes of reading debug info.
161 SkResvn: a reservation segment.
163 These are mostly straightforward. Reservation segments have some
166 A reservation segment is unmapped from the kernel's point of view,
167 but is an area in which aspacem will not create anonymous maps
168 (either Vs or Cs). The idea is that we will try to keep it clear
169 when the choice to do so is ours. Reservation segments are
170 'invisible' from the client's point of view: it may choose to park
171 a fixed mapping in the middle of one, and that's just tough -- we
172 can't do anything about that. From the client's perspective
173 reservations are semantically equivalent to (although
174 distinguishable from, if it makes enquiries) free areas.
176 Reservations are a primitive mechanism provided for whatever
177 purposes the rest of the system wants. Currently they are used to
178 reserve the expansion space into which a growdown stack is
179 expanded, and into which the data segment is extended. Note,
180 though, those uses are entirely external to this module, which only
181 supplies the primitives.
183 Reservations may be shrunk in order that an adjoining anonymous
184 mapping may be extended. This makes dataseg/stack expansion work.
185 A reservation may not be shrunk below one page.
187 The advise/notify concept
188 ~~~~~~~~~~~~~~~~~~~~~~~~~
189 All mmap-related calls must be routed via aspacem. Calling
190 sys_mmap directly from the rest of the system is very dangerous
191 because aspacem's data structures will become out of date.
193 The fundamental mode of operation of aspacem is to support client
194 mmaps. Here's what happens (in ML_(generic_PRE_sys_mmap)):
196 * m_syswrap intercepts the mmap call. It examines the parameters
197 and identifies the requested placement constraints. There are
198 three possibilities: no constraint (MAny), hinted (MHint, "I
199 prefer X but will accept anything"), and fixed (MFixed, "X or
202 * This request is passed to VG_(am_get_advisory). This decides on
203 a placement as described in detail in Strategy above. It may
204 also indicate that the map should fail, because it would trash
205 one of Valgrind's areas, which would probably kill the system.
207 * Control returns to the wrapper. If VG_(am_get_advisory) has
208 declared that the map should fail, then it must be made to do so.
209 Usually, though, the request is considered acceptable, in which
210 case an "advised" address is supplied. The advised address
211 replaces the original address supplied by the client, and
214 Note at this point that although aspacem has been asked for
215 advice on where to place the mapping, no commitment has yet been
216 made by either it or the kernel.
218 * The adjusted request is handed off to the kernel.
220 * The kernel's result is examined. If the map succeeded, aspacem
221 is told of the outcome (VG_(am_notify_client_mmap)), so it can
222 update its records accordingly.
224 This then is the central advise-notify idiom for handling client
225 mmap/munmap/mprotect/shmat:
227 * ask aspacem for an advised placement (or a veto)
229 * if not vetoed, hand request to kernel, using the advised placement
231 * examine result, and if successful, notify aspacem of the result.
233 There are also many convenience functions, eg
234 VG_(am_mmap_anon_fixed_client), which do both phases entirely within
237 To debug all this, a sync-checker is provided. It reads
238 /proc/self/maps, compares what it sees with aspacem's records, and
239 complains if there is a difference. --sanity-level=3 runs it before
240 and after each syscall, which is a powerful, if slow way of finding
241 buggy syscall wrappers.
245 Up to and including Valgrind 2.4.1, x86 segmentation was used to
246 enforce seperation of V and C, so that wild writes by C could not
247 trash V. This got called "pointercheck". Unfortunately, the new
248 more flexible memory layout, plus the need to be portable across
249 different architectures, means doing this in hardware is no longer
250 viable, and doing it in software is expensive. So at the moment we
255 #include "pub_l4re.h"
259 /*-----------------------------------------------------------------*/
261 /*--- The Address Space Manager's state. ---*/
263 /*-----------------------------------------------------------------*/
265 /* ------ start of STATE for the address-space manager ------ */
267 /* Max number of segments we can track. */
268 #define VG_N_SEGMENTS 5000
270 /* Max number of segment file names we can track. */
271 #define VG_N_SEGNAMES 1000
273 /* Max length of a segment file name. */
274 #define VG_MAX_SEGNAMELEN 1000
281 HChar fname[VG_MAX_SEGNAMELEN];
285 /* Filename table. _used is the high water mark; an entry is only
286 valid if its index >= 0, < _used, and its .inUse field == True.
287 The .mark field is used to garbage-collect dead entries.
289 static SegName segnames[VG_N_SEGNAMES];
290 static Int segnames_used = 0;
293 /* Array [0 .. nsegments_used-1] of all mappings. */
294 /* Sorted by .addr field. */
295 /* I: len may not be zero. */
296 /* I: overlapping segments are not allowed. */
297 /* I: the segments cover the entire address space precisely. */
298 /* Each segment can optionally hold an index into the filename table. */
300 static NSegment nsegments[VG_N_SEGMENTS];
301 static Int nsegments_used = 0;
303 #define Addr_MIN ((Addr)0)
304 #define Addr_MAX ((Addr)(-1ULL))
308 // The smallest address that aspacem will try to allocate
309 static Addr aspacem_minAddr = 0;
311 // The largest address that aspacem will try to allocate
312 static Addr aspacem_maxAddr = 0;
314 // Where aspacem will start looking for client space
315 static Addr aspacem_cStart = 0;
317 // Where aspacem will start looking for Valgrind space
318 static Addr aspacem_vStart = 0;
321 #define AM_SANITY_CHECK \
323 if (VG_(clo_sanity_level >= 3)) \
324 aspacem_assert(VG_(am_do_sync_check) \
325 (__PRETTY_FUNCTION__,__FILE__,__LINE__)); \
329 // some memory for the region list
330 #define N_REGIONS 128
333 static struct vrm_region_lists r_ls;
334 static struct vrm_region regions[N_REGIONS];
335 static struct vrm_area areas[N_AREAS];
337 /* ------ end of STATE for the address-space manager ------ */
339 /* ------ Forwards decls ------ */
341 static Int find_nsegment_idx ( Addr a );
343 static void parse_regionlist (
344 void (*record_mapping)( Addr addr, SizeT len, UInt prot,
345 ULong dev, ULong ino, ULong offset,
346 const UChar* filename ),
347 void (*record_gap)( Addr addr, SizeT len )
350 static void fetch_regionlist(void);
352 /*-----------------------------------------------------------------*/
354 /*--- Functions for finding information about file descriptors. ---*/
356 /*-----------------------------------------------------------------*/
358 /* Extract the device, inode and mode numbers for a fd. */
360 Bool get_inode_for_fd ( Int fd, /*OUT*/ULong* dev,
361 /*OUT*/ULong* ino, /*OUT*/UInt* mode )
363 return ML_(am_get_fd_d_i_m)(fd, dev, ino, mode);
366 /* Given a file descriptor, attempt to deduce its filename. To do
367 this, we use /proc/self/fd/<FD>. If this doesn't point to a file,
368 or if it doesn't exist, we return False. */
370 Bool get_name_for_fd ( Int fd, /*OUT*/HChar* buf, Int nbuf )
372 #define DEBUG_MYSELF 0
374 VG_(strncpy)(buf, vrm_get_filename(fd), nbuf);
377 VG_(debugLog)(0, "aspacemgr", "## %s: filename for fd %d is %s\n", __func__, fd, buf);
385 /*-----------------------------------------------------------------*/
387 /*--- SegName array management. ---*/
389 /*-----------------------------------------------------------------*/
391 /* Searches the filename table to find an index for the given name.
392 If none is found, an index is allocated and the name stored. If no
393 space is available we just give up. If the string is too long to
396 #if !defined(VGO_l4re)
399 Int allocate_segname ( const HChar* name )
403 aspacem_assert(name);
405 if (0) VG_(debugLog)(0,"aspacem","allocate_segname %s\n", name);
407 len = VG_(strlen)(name);
408 if (len >= VG_MAX_SEGNAMELEN-1) {
412 /* first see if we already have the name. */
413 for (i = 0; i < segnames_used; i++) {
414 if (!segnames[i].inUse)
416 if (0 == VG_(strcmp)(name, &segnames[i].fname[0])) {
421 /* no we don't. So look for a free slot. */
422 for (i = 0; i < segnames_used; i++)
423 if (!segnames[i].inUse)
426 if (i == segnames_used) {
427 /* no free slots .. advance the high-water mark. */
428 if (segnames_used+1 < VG_N_SEGNAMES) {
432 ML_(am_barf_toolow)("VG_N_SEGNAMES");
437 segnames[i].inUse = True;
438 for (j = 0; j < len; j++)
439 segnames[i].fname[j] = name[j];
440 aspacem_assert(len < VG_MAX_SEGNAMELEN);
441 segnames[i].fname[len] = 0;
446 /*-----------------------------------------------------------------*/
448 /*--- Displaying the segment array. ---*/
450 /*-----------------------------------------------------------------*/
452 static HChar* show_SegKind ( SegKind sk )
455 case SkFree: return " ";
456 case SkAnonC: return "anon";
457 case SkAnonV: return "ANON";
458 case SkFileC: return "file";
459 case SkFileV: return "FILE";
460 case SkShmC: return "shm ";
461 case SkResvn: return "RSVN";
462 default: return "????";
466 static HChar* show_ShrinkMode ( ShrinkMode sm )
469 case SmLower: return "SmLower";
470 case SmUpper: return "SmUpper";
471 case SmFixed: return "SmFixed";
472 default: return "Sm?????";
476 static void show_Addr_concisely ( /*OUT*/HChar* buf, Addr aA )
481 if (a < 10*1000*1000ULL) {
484 else if (a < 999999ULL * (1ULL<<20)) {
488 else if (a < 999999ULL * (1ULL<<30)) {
492 else if (a < 999999ULL * (1ULL<<40)) {
500 ML_(am_sprintf)(buf, fmt, a);
504 /* Show full details of an NSegment */
506 static void __attribute__ ((unused))
507 show_nsegment_full ( Int logLevel, NSegment* seg )
509 HChar* name = "(none)";
510 if (seg->fnIdx >= 0 && seg->fnIdx < segnames_used
511 && segnames[seg->fnIdx].inUse
512 && segnames[seg->fnIdx].fname[0] != 0)
513 name = segnames[seg->fnIdx].fname;
515 VG_(debugLog)(logLevel, "aspacem",
516 "NSegment{%s, start=0x%llx, end=0x%llx, smode=%s, dev=%llu, "
517 "ino=%llu, offset=%llu, fnIdx=%d, hasR=%d, hasW=%d, hasX=%d, "
518 "hasT=%d, mark=%d, name=\"%s\"}\n",
519 show_SegKind(seg->kind),
522 show_ShrinkMode(seg->smode),
523 seg->dev, seg->ino, seg->offset, seg->fnIdx,
524 (Int)seg->hasR, (Int)seg->hasW, (Int)seg->hasX, (Int)seg->hasT,
531 /* Show an NSegment in a user-friendly-ish way. */
533 static void show_nsegment ( Int logLevel, Int segNo, NSegment* seg )
536 ULong len = ((ULong)seg->end) - ((ULong)seg->start) + 1;
537 show_Addr_concisely(len_buf, len);
544 "%3d: %s %010llx-%010llx %s\n",
545 segNo, show_SegKind(seg->kind),
546 (ULong)seg->start, (ULong)seg->end, len_buf
550 case SkAnonC: case SkAnonV: case SkShmC:
553 "%3d: %s %010llx-%010llx %s %c%c%c%c%c N: %08lx\n",
554 segNo, show_SegKind(seg->kind),
555 (ULong)seg->start, (ULong)seg->end, len_buf,
556 seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
557 seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
558 seg->isCH ? 'H' : '-',
563 case SkFileC: case SkFileV:
566 "%3d: %s %010llx-%010llx %s %c%c%c%c%c d=0x%03llx "
567 "i=%-7lld o=%-7lld (%d)\n",
568 segNo, show_SegKind(seg->kind),
569 (ULong)seg->start, (ULong)seg->end, len_buf,
570 seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
571 seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
572 seg->isCH ? 'H' : '-',
573 seg->dev, seg->ino, (Long)seg->offset, seg->fnIdx
580 "%3d: %s %010llx-%010llx %s %c%c%c%c%c %s\n",
581 segNo, show_SegKind(seg->kind),
582 (ULong)seg->start, (ULong)seg->end, len_buf,
583 seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
584 seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
585 seg->isCH ? 'H' : '-',
586 show_ShrinkMode(seg->smode)
593 "%3d: ???? UNKNOWN SEGMENT KIND\n",
600 /* Print out the segment array (debugging only!). */
601 void VG_(am_show_nsegment) ( Int logLevel, Int segNo, NSegment *seg )
603 show_nsegment(logLevel, segNo, seg);
605 /* Print out the segment array (debugging only!). */
606 void VG_(am_show_nsegments) ( Int logLevel, HChar* who )
609 VG_(debugLog)(logLevel, "aspacem",
610 "<<< SHOW_SEGMENTS: %s (%d segments, %d segnames)\n",
611 who, nsegments_used, segnames_used);
612 for (i = 0; i < segnames_used; i++) {
613 if (!segnames[i].inUse)
615 VG_(debugLog)(logLevel, "aspacem",
616 "(%2d) %s\n", i, segnames[i].fname);
618 for (i = 0; i < nsegments_used; i++)
619 show_nsegment( logLevel, i, &nsegments[i] );
620 VG_(debugLog)(logLevel, "aspacem",
625 /* Get the filename corresponding to this segment, if known and if it
626 has one. The returned name's storage cannot be assumed to be
627 persistent, so the caller should immediately copy the name
629 HChar* VG_(am_get_filename)( NSegment const * seg )
634 if (i < 0 || i >= segnames_used || !segnames[i].inUse)
637 return &segnames[i].fname[0];
640 /* Collect up the start addresses of all non-free, non-resvn segments.
641 The interface is a bit strange in order to avoid potential
642 segment-creation races caused by dynamic allocation of the result
645 The function first computes how many entries in the result
646 buffer *starts will be needed. If this number <= nStarts,
647 they are placed in starts[0..], and the number is returned.
648 If nStarts is not large enough, nothing is written to
649 starts[0..], and the negation of the size is returned.
651 Correct use of this function may mean calling it multiple times in
652 order to establish a suitably-sized buffer. */
654 Int VG_(am_get_segment_starts)( Addr* starts, Int nStarts )
658 /* don't pass dumbass arguments */
659 aspacem_assert(nStarts >= 0);
662 for (i = 0; i < nsegments_used; i++) {
663 if (nsegments[i].kind == SkFree || nsegments[i].kind == SkResvn)
668 if (nSegs > nStarts) {
669 /* The buffer isn't big enough. Tell the caller how big it needs
674 /* There's enough space. So write into the result buffer. */
675 aspacem_assert(nSegs <= nStarts);
678 for (i = 0; i < nsegments_used; i++) {
679 if (nsegments[i].kind == SkFree || nsegments[i].kind == SkResvn)
681 starts[j] = nsegments[i].start;
685 aspacem_assert(j == nSegs); /* this should not fail */
690 /*-----------------------------------------------------------------*/
692 /*--- Sanity checking and preening of the segment array. ---*/
694 /*-----------------------------------------------------------------*/
696 /* Check representational invariants for NSegments. */
698 static Bool sane_NSegment ( NSegment* s )
700 if (s == NULL) return False;
702 /* No zero sized segments and no wraparounds. */
703 if (s->start >= s->end) {
704 if (0) VG_(debugLog)(1,"aspacem", "s->start >=s->end\n");
708 /* .mark is used for admin purposes only. */
709 if (s->mark) return False;
711 /* require page alignment */
712 if (!VG_IS_PAGE_ALIGNED(s->start)) {
713 if (0) VG_(debugLog)(1,"aspacem", "%s: s->start is no page aligned\n", __func__);
716 if (!VG_IS_PAGE_ALIGNED(s->end+1)) {
717 if (0) VG_(debugLog)(1,"aspacem", "%s: s->end+1 is not page aligned\n", __func__);
724 if (0) VG_(debugLog)(1,"aspacem", "%s: case SkFree\n", __func__);
727 && s->dev == 0 && s->ino == 0 && s->offset == 0 && s->fnIdx == -1
728 && !s->hasR && !s->hasW && !s->hasX && !s->hasT
731 case SkAnonC: case SkAnonV: case SkShmC:
733 VG_(debugLog)(1,"aspacem", "%s: case SkAnon\n", __func__);
736 && s->dev == 0 && s->ino == 0 && s->offset == 0 && s->fnIdx == -1
737 && (s->kind==SkAnonC ? True : !s->isCH);
739 case SkFileC: case SkFileV:
740 if (0) VG_(debugLog)(1,"aspacem", "%s: case SkFile\n", __func__);
743 && (s->fnIdx == -1 ||
744 (s->fnIdx >= 0 && s->fnIdx < segnames_used
745 && segnames[s->fnIdx].inUse))
749 if (0) VG_(debugLog)(1,"aspacem", "%s: case SkResvn\n", __func__);
751 s->dev == 0 && s->ino == 0 && s->offset == 0 && s->fnIdx == -1
752 && !s->hasR && !s->hasW && !s->hasX && !s->hasT
756 if (0) VG_(debugLog)(1,"aspacem", "%s: case default\n", __func__);
762 /* Try merging s2 into s1, if possible. If successful, s1 is
763 modified, and True is returned. Otherwise s1 is unchanged and
764 False is returned. */
766 static Bool maybe_merge_nsegments ( NSegment* s1, NSegment* s2 )
768 if (s1->kind != s2->kind)
771 if (s1->end+1 != s2->start)
774 /* reject cases which would cause wraparound */
775 if (s1->start > s2->end)
784 case SkAnonC: case SkAnonV:
785 if (s1->hasR == s2->hasR && s1->hasW == s2->hasW
786 && s1->hasX == s2->hasX && s1->isCH == s2->isCH
787 #if defined(VGO_l4re)
789 * On L4Re, we can only merge anonymous memory, if they are
790 * backed by the same dataspace (which potentially never happens?)
792 && s1->dsNodePtr && s2->dsNodePtr && s1->dsNodePtr == s2->dsNodePtr
796 s1->hasT |= s2->hasT;
801 case SkFileC: case SkFileV:
802 if (s1->hasR == s2->hasR
803 && s1->hasW == s2->hasW && s1->hasX == s2->hasX
804 && s1->dev == s2->dev && s1->ino == s2->ino
805 && s2->offset == s1->offset
806 + ((ULong)s2->start) - ((ULong)s1->start) ) {
808 s1->hasT |= s2->hasT;
817 if (s1->smode == SmFixed && s2->smode == SmFixed) {
831 /* Sanity-check and canonicalise the segment array (merge mergable
832 segments). Returns True if any segments were merged. */
834 static Bool preen_nsegments ( void )
836 Int i, j, r, w, nsegments_used_old = nsegments_used;
838 /* Pass 1: check the segment array covers the entire address space
839 exactly once, and also that each segment is sane. */
840 aspacem_assert(nsegments_used > 0);
841 aspacem_assert(nsegments[0].start == Addr_MIN);
842 aspacem_assert(nsegments[nsegments_used-1].end == Addr_MAX);
844 aspacem_assert(sane_NSegment(&nsegments[0]));
845 for (i = 1; i < nsegments_used; i++) {
846 aspacem_assert(sane_NSegment(&nsegments[i]));
847 aspacem_assert(nsegments[i-1].end+1 == nsegments[i].start);
850 /* Pass 2: merge as much as possible, using
851 maybe_merge_segments. */
853 for (r = 1; r < nsegments_used; r++) {
854 if (maybe_merge_nsegments(&nsegments[w], &nsegments[r])) {
859 nsegments[w] = nsegments[r];
863 aspacem_assert(w > 0 && w <= nsegments_used);
866 /* Pass 3: free up unused string table slots */
867 /* clear mark bits */
868 for (i = 0; i < segnames_used; i++)
869 segnames[i].mark = False;
871 for (i = 0; i < nsegments_used; i++) {
872 j = nsegments[i].fnIdx;
873 aspacem_assert(j >= -1 && j < segnames_used);
875 aspacem_assert(segnames[j].inUse);
876 segnames[j].mark = True;
880 for (i = 0; i < segnames_used; i++) {
881 if (segnames[i].mark == False) {
882 segnames[i].inUse = False;
883 segnames[i].fname[0] = 0;
887 return nsegments_used != nsegments_used_old;
891 /* Check the segment array corresponds with the kernel's view of
892 memory layout. sync_check_ok returns True if no anomalies were
893 found, else False. In the latter case the mismatching segments are
896 The general idea is: we get the kernel to show us all its segments
897 and also the gaps in between. For each such interval, try and find
898 a sequence of appropriate intervals in our segment array which
899 cover or more than cover the kernel's interval, and which all have
900 suitable kinds/permissions etc.
902 Although any specific kernel interval is not matched exactly to a
903 valgrind interval or sequence thereof, eventually any disagreement
904 on mapping boundaries will be detected. This is because, if for
905 example valgrind's intervals cover a greater range than the current
906 kernel interval, it must be the case that a neighbouring free-space
907 interval belonging to valgrind cannot cover the neighbouring
908 free-space interval belonging to the kernel. So the disagreement
911 In other words, we examine each kernel interval in turn, and check
912 we do not disagree over the range of that interval. Because all of
913 the address space is examined, any disagreements must eventually be
917 static Bool sync_check_ok = False;
919 static void sync_check_mapping_callback ( Addr addr, SizeT len, UInt prot,
920 ULong dev, ULong ino, ULong offset,
921 const UChar* filename )
926 /* If a problem has already been detected, don't continue comparing
927 segments, so as to avoid flooding the output with error
935 /* The kernel should not give us wraparounds. */
936 aspacem_assert(addr <= addr + len - 1);
938 iLo = find_nsegment_idx( addr );
939 iHi = find_nsegment_idx( addr + len - 1 );
941 /* These 5 should be guaranteed by find_nsegment_idx. */
942 aspacem_assert(0 <= iLo && iLo < nsegments_used);
943 aspacem_assert(0 <= iHi && iHi < nsegments_used);
944 aspacem_assert(iLo <= iHi);
945 aspacem_assert(nsegments[iLo].start <= addr );
946 aspacem_assert(nsegments[iHi].end >= addr + len - 1 );
948 /* x86 doesn't differentiate 'x' and 'r' (at least, all except the
949 most recent NX-bit enabled CPUs) and so recent kernels attempt
950 to provide execute protection by placing all executable mappings
951 low down in the address space and then reducing the size of the
952 code segment to prevent code at higher addresses being executed.
954 These kernels report which mappings are really executable in
955 the /proc/self/maps output rather than mirroring what was asked
956 for when each mapping was created. In order to cope with this we
957 have a sloppyXcheck mode which we enable on x86 - in this mode we
958 allow the kernel to report execute permission when we weren't
959 expecting it but not vice versa. */
960 sloppyXcheck = False;
961 # if defined(VGA_x86)
965 /* NSegments iLo .. iHi inclusive should agree with the presented
967 for (i = iLo; i <= iHi; i++) {
969 Bool same, cmp_offsets, cmp_devino;
972 /* compare the kernel's offering against ours. */
973 same = nsegments[i].kind == SkAnonC
974 || nsegments[i].kind == SkAnonV
975 || nsegments[i].kind == SkFileC
976 || nsegments[i].kind == SkFileV
977 || nsegments[i].kind == SkShmC;
980 if (nsegments[i].hasR) seg_prot |= VKI_PROT_READ;
981 if (nsegments[i].hasW) seg_prot |= VKI_PROT_WRITE;
982 if (nsegments[i].hasX) seg_prot |= VKI_PROT_EXEC;
985 = nsegments[i].kind == SkFileC || nsegments[i].kind == SkFileV;
988 = nsegments[i].dev != 0 || nsegments[i].ino != 0;
990 /* Consider other reasons to not compare dev/inode */
992 /* bproc does some godawful hack on /dev/zero at process
993 migration, which changes the name of it, and its dev & ino */
994 if (filename && 0==VG_(strcmp)(filename, "/dev/zero (deleted)"))
997 /* hack apparently needed on MontaVista Linux */
998 if (filename && VG_(strstr)(filename, "/.lib-ro/"))
1001 /* If we are doing sloppy execute permission checks then we
1002 allow segment to have X permission when we weren't expecting
1003 it (but not vice versa) so if the kernel reported execute
1004 permission then pretend that this segment has it regardless
1005 of what we were expecting. */
1006 if (sloppyXcheck && (prot & VKI_PROT_EXEC) != 0) {
1007 seg_prot |= VKI_PROT_EXEC;
1013 ? (nsegments[i].dev == dev && nsegments[i].ino == ino)
1016 ? nsegments[i].start-nsegments[i].offset == addr-offset
1019 sync_check_ok = False;
1022 "sync_check_mapping_callback: segment mismatch: V's seg:\n");
1023 show_nsegment_full( 0, &nsegments[i] );
1028 /* Looks harmless. Keep going. */
1032 VG_(debugLog)(0,"aspacem",
1033 "sync_check_mapping_callback: "
1034 "segment mismatch: kernel's seg:\n");
1035 VG_(debugLog)(0,"aspacem",
1036 "start=0x%llx end=0x%llx prot=%u "
1037 "dev=%llu ino=%llu offset=%lld name=\"%s\"\n",
1038 (ULong)addr, ((ULong)addr) + ((ULong)len) - 1,
1039 prot, dev, ino, offset,
1040 filename ? (HChar*)filename : "(none)" );
1044 static void sync_check_gap_callback ( Addr addr, SizeT len )
1048 /* If a problem has already been detected, don't continue comparing
1049 segments, so as to avoid flooding the output with error
1057 /* The kernel should not give us wraparounds. */
1058 aspacem_assert(addr <= addr + len - 1);
1060 iLo = find_nsegment_idx( addr );
1061 iHi = find_nsegment_idx( addr + len - 1 );
1063 /* These 5 should be guaranteed by find_nsegment_idx. */
1064 aspacem_assert(0 <= iLo && iLo < nsegments_used);
1065 aspacem_assert(0 <= iHi && iHi < nsegments_used);
1066 aspacem_assert(iLo <= iHi);
1067 aspacem_assert(nsegments[iLo].start <= addr );
1068 aspacem_assert(nsegments[iHi].end >= addr + len - 1 );
1070 /* NSegments iLo .. iHi inclusive should agree with the presented
1072 for (i = iLo; i <= iHi; i++) {
1076 /* compare the kernel's offering against ours. */
1077 same = nsegments[i].kind == SkFree
1078 || nsegments[i].kind == SkResvn;
1081 sync_check_ok = False;
1084 "sync_check_mapping_callback: segment mismatch: V's gap:\n");
1085 show_nsegment_full( 0, &nsegments[i] );
1090 /* Looks harmless. Keep going. */
1094 VG_(debugLog)(0,"aspacem",
1095 "sync_check_gap_callback: segment mismatch: kernel's gap:\n");
1096 VG_(debugLog)(0,"aspacem",
1097 "start=0x%llx end=0x%llx\n",
1098 (ULong)addr, ((ULong)addr) + ((ULong)len) - 1 );
1103 /* Sanity check: check that Valgrind and the kernel agree on the
1104 address space layout. Prints offending segments and call point if
1105 a discrepancy is detected, but does not abort the system. Returned
1106 Bool is False if a discrepancy was found. */
1108 Bool VG_(am_do_sync_check) ( const HChar* fn,
1109 const HChar* file, Int line )
1111 sync_check_ok = True;
1113 VG_(debugLog)(0,"aspacem", "do_sync_check %s:%d\n", file,line);
1114 parse_regionlist( sync_check_mapping_callback,
1115 sync_check_gap_callback );
1116 if (!sync_check_ok) {
1117 VG_(debugLog)(0,"aspacem",
1118 "sync check at %s:%d (%s): FAILED\n",
1120 VG_(debugLog)(0,"aspacem", "\n");
1125 VG_(am_show_nsegments)(0,"post syncheck failure");
1126 VG_(sprintf)(buf, "/bin/cat /proc/%d/maps", VG_(getpid)());
1132 return sync_check_ok;
1135 /* Hook to allow sanity checks to be done from aspacemgr-common.c. */
1136 void ML_(am_do_sanity_check)( void )
1142 /*-----------------------------------------------------------------*/
1144 /*--- Low level access / modification of the segment array. ---*/
1146 /*-----------------------------------------------------------------*/
1148 /* Binary search the interval array for a given address. Since the
1149 array covers the entire address space the search cannot fail. The
1150 _WRK function does the real work. Its caller (just below) caches
1151 the results thereof, to save time. With N_CACHE of 63 we get a hit
1152 rate exceeding 90% when running OpenOffice.
1154 Re ">> 12", it doesn't matter that the page size of some targets
1155 might be different from 12. Really "(a >> 12) % N_CACHE" is merely
1156 a hash function, and the actual cache entry is always validated
1157 correctly against the selected cache entry before use.
1159 /* Don't call find_nsegment_idx_WRK; use find_nsegment_idx instead. */
1160 __attribute__((noinline))
1161 static Int find_nsegment_idx_WRK ( Addr a )
1163 Addr a_mid_lo, a_mid_hi;
1166 hi = nsegments_used-1;
1168 /* current unsearched space is from lo to hi, inclusive. */
1170 /* Not found. This can't happen. */
1171 ML_(am_barf)("find_nsegment_idx: not found");
1173 mid = (lo + hi) / 2;
1174 a_mid_lo = nsegments[mid].start;
1175 a_mid_hi = nsegments[mid].end;
1177 if (a < a_mid_lo) { hi = mid-1; continue; }
1178 if (a > a_mid_hi) { lo = mid+1; continue; }
1179 aspacem_assert(a >= a_mid_lo && a <= a_mid_hi);
1180 aspacem_assert(0 <= mid && mid < nsegments_used);
1185 inline static Int find_nsegment_idx ( Addr a )
1188 static Addr cache_pageno[N_CACHE];
1189 static Int cache_segidx[N_CACHE];
1190 static Bool cache_inited = False;
1192 static UWord n_q = 0;
1193 static UWord n_m = 0;
1197 if (LIKELY(cache_inited)) {
1200 for (ix = 0; ix < N_CACHE; ix++) {
1201 cache_pageno[ix] = 0;
1202 cache_segidx[ix] = -1;
1204 cache_inited = True;
1207 ix = (a >> 12) % N_CACHE;
1210 if (0 && 0 == (n_q & 0xFFFF))
1211 VG_(debugLog)(0,"xxx","find_nsegment_idx: %lu %lu\n", n_q, n_m);
1213 if ((a >> 12) == cache_pageno[ix]
1214 && cache_segidx[ix] >= 0
1215 && cache_segidx[ix] < nsegments_used
1216 && nsegments[cache_segidx[ix]].start <= a
1217 && a <= nsegments[cache_segidx[ix]].end) {
1219 /* aspacem_assert( cache_segidx[ix] == find_nsegment_idx_WRK(a) ); */
1220 return cache_segidx[ix];
1224 cache_segidx[ix] = find_nsegment_idx_WRK(a);
1225 cache_pageno[ix] = a >> 12;
1226 return cache_segidx[ix];
1230 int __callcount = 0;
1233 /* Finds the segment containing 'a'. Only returns file/anon/resvn
1234 segments. This returns a 'NSegment const *' - a pointer to
1236 NSegment const * VG_(am_find_nsegment) ( Addr a )
1239 Int i = find_nsegment_idx(a);
1240 aspacem_assert(i >= 0 && i < nsegments_used);
1241 aspacem_assert(nsegments[i].start <= a);
1242 aspacem_assert(a <= nsegments[i].end);
1243 if (nsegments[i].kind == SkFree)
1246 return &nsegments[i];
1250 /* Given a pointer to a seg, tries to figure out which one it is in
1251 nsegments[..]. Very paranoid. */
1252 static Int segAddr_to_index ( NSegment* seg )
1255 if (seg < &nsegments[0] || seg >= &nsegments[nsegments_used])
1257 i = ((UChar*)seg - (UChar*)(&nsegments[0])) / sizeof(NSegment);
1258 if (i < 0 || i >= nsegments_used)
1260 if (seg == &nsegments[i])
1266 /* Find the next segment along from 'here', if it is a file/anon/resvn
1268 NSegment const * VG_(am_next_nsegment) ( NSegment* here, Bool fwds )
1270 Int i = segAddr_to_index(here);
1271 if (i < 0 || i >= nsegments_used)
1275 if (i >= nsegments_used)
1282 switch (nsegments[i].kind) {
1283 case SkFileC: case SkFileV: case SkShmC:
1284 case SkAnonC: case SkAnonV: case SkResvn:
1285 return &nsegments[i];
1293 /* Trivial fn: return the total amount of space in anonymous mappings,
1294 both for V and the client. Is used for printing stats in
1295 out-of-memory messages. */
1296 ULong VG_(am_get_anonsize_total)( void )
1300 for (i = 0; i < nsegments_used; i++) {
1301 if (nsegments[i].kind == SkAnonC || nsegments[i].kind == SkAnonV) {
1302 total += (ULong)nsegments[i].end
1303 - (ULong)nsegments[i].start + 1ULL;
1310 /* Test if a piece of memory is addressable by the client with at
1311 least the "prot" protection permissions by examining the underlying
1312 segments. If freeOk is True then SkFree areas are also allowed.
1315 Bool is_valid_for_client( Addr start, SizeT len, UInt prot, Bool freeOk )
1318 Bool needR, needW, needX;
1321 return True; /* somewhat dubious case */
1322 if (start + len < start)
1323 return False; /* reject wraparounds */
1325 needR = toBool(prot & VKI_PROT_READ);
1326 needW = toBool(prot & VKI_PROT_WRITE);
1327 needX = toBool(prot & VKI_PROT_EXEC);
1329 iLo = find_nsegment_idx(start);
1330 aspacem_assert(start >= nsegments[iLo].start);
1332 if (start+len-1 <= nsegments[iLo].end) {
1333 /* This is a speedup hack which avoids calling find_nsegment_idx
1334 a second time when possible. It is always correct to just
1335 use the "else" clause below, but is_valid_for_client is
1336 called a lot by the leak checker, so avoiding pointless calls
1337 to find_nsegment_idx, which can be expensive, is helpful. */
1340 iHi = find_nsegment_idx(start + len - 1);
1343 for (i = iLo; i <= iHi; i++) {
1344 if ( (nsegments[i].kind == SkFileC
1345 || nsegments[i].kind == SkAnonC
1346 || nsegments[i].kind == SkShmC
1347 || (nsegments[i].kind == SkFree && freeOk)
1348 || (nsegments[i].kind == SkResvn && freeOk))
1349 && (needR ? nsegments[i].hasR : True)
1350 && (needW ? nsegments[i].hasW : True)
1351 && (needX ? nsegments[i].hasX : True) ) {
1360 /* Test if a piece of memory is addressable by the client with at
1361 least the "prot" protection permissions by examining the underlying
1363 Bool VG_(am_is_valid_for_client)( Addr start, SizeT len,
1366 return is_valid_for_client( start, len, prot, False/*free not OK*/ );
1369 /* Variant of VG_(am_is_valid_for_client) which allows free areas to
1370 be consider part of the client's addressable space. It also
1371 considers reservations to be allowable, since from the client's
1372 point of view they don't exist. */
1373 Bool VG_(am_is_valid_for_client_or_free_or_resvn)
1374 ( Addr start, SizeT len, UInt prot )
1376 return is_valid_for_client( start, len, prot, True/*free is OK*/ );
1380 /* Test if a piece of memory is addressable by valgrind with at least
1381 PROT_NONE protection permissions by examining the underlying
1383 static Bool is_valid_for_valgrind( Addr start, SizeT len )
1388 return True; /* somewhat dubious case */
1389 if (start + len < start)
1390 return False; /* reject wraparounds */
1392 iLo = find_nsegment_idx(start);
1393 iHi = find_nsegment_idx(start + len - 1);
1394 for (i = iLo; i <= iHi; i++) {
1395 if (nsegments[i].kind == SkFileV || nsegments[i].kind == SkAnonV) {
1405 /* Returns True if any part of the address range is marked as having
1406 translations made from it. This is used to determine when to
1407 discard code, so if in doubt return True. */
1409 static Bool any_Ts_in_range ( Addr start, SizeT len )
1412 aspacem_assert(len > 0);
1413 aspacem_assert(start + len > start);
1414 iLo = find_nsegment_idx(start);
1415 iHi = find_nsegment_idx(start + len - 1);
1416 for (i = iLo; i <= iHi; i++) {
1417 if (nsegments[i].hasT)
1424 /*-----------------------------------------------------------------*/
1426 /*--- Modifying the segment array, and constructing segments. ---*/
1428 /*-----------------------------------------------------------------*/
1430 /* Split the segment containing 'a' into two, so that 'a' is
1431 guaranteed to be the start of a new segment. If 'a' is already the
1432 start of a segment, do nothing. */
1434 static void split_nsegment_at ( Addr a )
1438 aspacem_assert(a > 0);
1439 aspacem_assert(VG_IS_PAGE_ALIGNED(a));
1441 i = find_nsegment_idx(a);
1442 aspacem_assert(i >= 0 && i < nsegments_used);
1444 if (nsegments[i].start == a)
1445 /* 'a' is already the start point of a segment, so nothing to be
1449 /* else we have to slide the segments upwards to make a hole */
1450 if (nsegments_used >= VG_N_SEGMENTS)
1451 ML_(am_barf_toolow)("VG_N_SEGMENTS");
1452 for (j = nsegments_used-1; j > i; j--)
1453 nsegments[j+1] = nsegments[j];
1456 nsegments[i+1] = nsegments[i];
1457 nsegments[i+1].start = a;
1458 nsegments[i].end = a-1;
1460 if (nsegments[i].kind == SkFileV || nsegments[i].kind == SkFileC)
1461 nsegments[i+1].offset
1462 += ((ULong)nsegments[i+1].start) - ((ULong)nsegments[i].start);
1464 aspacem_assert(sane_NSegment(&nsegments[i]));
1465 aspacem_assert(sane_NSegment(&nsegments[i+1]));
1469 /* Do the minimum amount of segment splitting necessary to ensure that
1470 sLo is the first address denoted by some segment and sHi is the
1471 highest address denoted by some other segment. Returns the indices
1472 of the lowest and highest segments in the range. */
1475 void split_nsegments_lo_and_hi ( Addr sLo, Addr sHi,
1479 aspacem_assert(sLo < sHi);
1480 aspacem_assert(VG_IS_PAGE_ALIGNED(sLo));
1481 aspacem_assert(VG_IS_PAGE_ALIGNED(sHi+1));
1484 split_nsegment_at(sLo);
1486 split_nsegment_at(sHi+1);
1488 *iLo = find_nsegment_idx(sLo);
1489 *iHi = find_nsegment_idx(sHi);
1490 aspacem_assert(0 <= *iLo && *iLo < nsegments_used);
1491 aspacem_assert(0 <= *iHi && *iHi < nsegments_used);
1492 aspacem_assert(*iLo <= *iHi);
1493 aspacem_assert(nsegments[*iLo].start == sLo);
1494 aspacem_assert(nsegments[*iHi].end == sHi);
1495 /* Not that I'm overly paranoid or anything, definitely not :-) */
1499 /* Add SEG to the collection, deleting/truncating any it overlaps.
1500 This deals with all the tricky cases of splitting up segments as
1503 static void add_segment ( NSegment* seg )
1505 Int i, iLo, iHi, delta;
1506 Bool segment_is_sane;
1508 Addr sStart = seg->start;
1509 Addr sEnd = seg->end;
1511 aspacem_assert(sStart <= sEnd);
1512 aspacem_assert(VG_IS_PAGE_ALIGNED(sStart));
1513 aspacem_assert(VG_IS_PAGE_ALIGNED(sEnd+1));
1515 segment_is_sane = sane_NSegment(seg);
1516 if (!segment_is_sane) show_nsegment_full(0,seg);
1517 aspacem_assert(segment_is_sane);
1519 split_nsegments_lo_and_hi( sStart, sEnd, &iLo, &iHi );
1521 /* Now iLo .. iHi inclusive is the range of segment indices which
1522 seg will replace. If we're replacing more than one segment,
1523 slide those above the range down to fill the hole. */
1525 aspacem_assert(delta >= 0);
1527 for (i = iLo; i < nsegments_used-delta; i++)
1528 nsegments[i] = nsegments[i+delta];
1529 nsegments_used -= delta;
1532 nsegments[iLo] = *seg;
1534 (void)preen_nsegments();
1535 if (0) VG_(am_show_nsegments)(0,"AFTER preen (add_segment)");
1539 void VG_(am_set_nodeptr)(NSegment* const seg, Addr const ptr)
1541 seg->dsNodePtr = ptr;
1545 /* Clear out an NSegment record. */
1547 static void init_nsegment ( /*OUT*/NSegment* seg )
1552 seg->smode = SmFixed;
1558 seg->hasR = seg->hasW = seg->hasX = seg->hasT = seg->isCH = False;
1560 seg->dsNodePtr = (Addr) NULL;
1563 /* Make an NSegment which holds a reservation. */
1565 static void init_resvn ( /*OUT*/NSegment* seg, Addr start, Addr end )
1567 aspacem_assert(start < end);
1568 aspacem_assert(VG_IS_PAGE_ALIGNED(start));
1569 aspacem_assert(VG_IS_PAGE_ALIGNED(end+1));
1571 seg->kind = SkResvn;
1577 /*-----------------------------------------------------------------*/
1579 /*--- Startup, including reading /proc/self/maps. ---*/
1581 /*-----------------------------------------------------------------*/
1583 static void read_maps_callback ( Addr addr, SizeT len, UInt prot,
1584 ULong dev, ULong ino, ULong offset,
1585 const UChar* filename )
1588 init_nsegment( &seg );
1590 seg.end = addr+len-1;
1593 seg.offset = offset;
1594 seg.hasR = toBool(prot & VKI_PROT_READ);
1595 seg.hasW = toBool(prot & VKI_PROT_WRITE);
1596 seg.hasX = toBool(prot & VKI_PROT_EXEC);
1599 /* Don't use the presence of a filename to decide if a segment in
1600 the initial /proc/self/maps to decide if the segment is an AnonV
1601 or FileV segment as some systems don't report the filename. Use
1602 the device and inode numbers instead. Fixes bug #124528. */
1605 // if (seg.start == 0xaffff000)
1606 // seg.kind = SkAnonC;
1608 if (dev != 0 && ino != 0)
1611 seg.fnIdx = allocate_segname( filename );
1613 if (0) show_nsegment( 2,0, &seg );
1614 add_segment( &seg );
1617 /* Initialise the address space manager, setting up the initial
1618 segment list, and reading /proc/self/maps into it. This must
1619 be called before any other function.
1621 Takes a pointer to the SP at the time V gained control. This is
1622 taken to be the highest usable address (more or less). Based on
1623 that (and general consultation of tea leaves, etc) return a
1624 suggested end address for the client's stack. */
1626 Addr VG_(am_startup) ( Addr sp_at_startup )
1629 Addr suggested_clstack_top;
1631 aspacem_assert(sizeof(Word) == sizeof(void*));
1632 aspacem_assert(sizeof(Addr) == sizeof(void*));
1633 aspacem_assert(sizeof(SizeT) == sizeof(void*));
1634 aspacem_assert(sizeof(SSizeT) == sizeof(void*));
1636 /* Check that we can store the largest imaginable dev, ino and
1637 offset numbers in an NSegment. */
1638 aspacem_assert(sizeof(seg.dev) == 8);
1639 aspacem_assert(sizeof(seg.ino) == 8);
1640 aspacem_assert(sizeof(seg.offset) == 8);
1641 aspacem_assert(sizeof(seg.mode) == 4);
1643 /* Add a single interval covering the entire address space. */
1644 init_nsegment(&seg);
1646 seg.start = Addr_MIN;
1651 /* Establish address limits and block out unusable parts
1654 VG_(debugLog)(2, "aspacem",
1655 " sp_at_startup = 0x%010llx (supplied)\n",
1656 (ULong)sp_at_startup );
1658 /* To determine maxAddr, we ask the region manager */
1661 aspacem_minAddr = 0x1000;
1662 aspacem_maxAddr = r_ls.max_addr;
1664 aspacem_cStart = aspacem_minAddr; // 64M
1665 aspacem_vStart = VG_PGROUNDUP((aspacem_minAddr + aspacem_maxAddr + 1) / 2);
1666 # ifdef ENABLE_INNER
1667 aspacem_vStart -= 0x10000000; // 256M
1670 suggested_clstack_top = aspacem_maxAddr - 16*1024*1024ULL
1673 aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_minAddr));
1674 aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_maxAddr + 1));
1675 aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_cStart));
1676 aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_vStart));
1677 aspacem_assert(VG_IS_PAGE_ALIGNED(suggested_clstack_top + 1));
1679 VG_(debugLog)(0, "aspacem",
1680 " minAddr = 0x%010llx (computed)\n",
1681 (ULong)aspacem_minAddr);
1682 VG_(debugLog)(0, "aspacem",
1683 " maxAddr = 0x%010llx (computed)\n",
1684 (ULong)aspacem_maxAddr);
1685 VG_(debugLog)(0, "aspacem",
1686 " cStart = 0x%010llx (computed)\n",
1687 (ULong)aspacem_cStart);
1688 VG_(debugLog)(0, "aspacem",
1689 " vStart = 0x%010llx (computed)\n",
1690 (ULong)aspacem_vStart);
1691 VG_(debugLog)(0, "aspacem",
1692 "suggested_clstack_top = 0x%010llx (computed)\n",
1693 (ULong)suggested_clstack_top);
1695 if (aspacem_cStart > Addr_MIN) {
1696 init_resvn(&seg, Addr_MIN, aspacem_cStart-1);
1699 if (aspacem_maxAddr < Addr_MAX) {
1700 init_resvn(&seg, aspacem_maxAddr+1, Addr_MAX);
1704 /* Create a 1-page reservation at the notional initial
1705 client/valgrind boundary. This isn't strictly necessary, but
1706 because the advisor does first-fit and starts searches for
1707 valgrind allocations at the boundary, this is kind of necessary
1708 in order to get it to start allocating in the right place. */
1709 init_resvn(&seg, aspacem_vStart, aspacem_vStart + VKI_PAGE_SIZE - 1);
1712 VG_(am_show_nsegments)(2, "Initial layout");
1714 VG_(debugLog)(2, "aspacem", "Reading /proc/self/maps\n");
1715 parse_regionlist( read_maps_callback, NULL );
1717 VG_(am_show_nsegments)(2, "With contents of /proc/self/maps");
1720 return suggested_clstack_top;
1724 /*-----------------------------------------------------------------*/
1726 /*--- The core query-notify mechanism. ---*/
1728 /*-----------------------------------------------------------------*/
1729 /* Query aspacem to ask where a mapping should go. */
1730 Addr VG_(am_get_advisory) ( MapRequest* req,
1734 /* This function implements allocation policy.
1736 The nature of the allocation request is determined by req, which
1737 specifies the start and length of the request and indicates
1738 whether the start address is mandatory, a hint, or irrelevant,
1739 and by forClient, which says whether this is for the client or
1742 Return values: the request can be vetoed (*ok is set to False),
1743 in which case the caller should not attempt to proceed with
1744 making the mapping. Otherwise, *ok is set to True, the caller
1745 may proceed, and the preferred address at which the mapping
1746 should happen is returned.
1748 Note that this is an advisory system only: the kernel can in
1749 fact do whatever it likes as far as placement goes, and we have
1750 no absolute control over it.
1752 Allocations will never be granted in a reserved area.
1754 The Default Policy is:
1756 Search the address space for two free intervals: one of them
1757 big enough to contain the request without regard to the
1758 specified address (viz, as if it was a floating request) and
1759 the other being able to contain the request at the specified
1760 address (viz, as if were a fixed request). Then, depending on
1761 the outcome of the search and the kind of request made, decide
1762 whether the request is allowable and what address to advise.
1764 The Default Policy is overriden by Policy Exception #1:
1766 If the request is for a fixed client map, we are prepared to
1767 grant it providing all areas inside the request are either
1768 free, reservations, or mappings belonging to the client. In
1769 other words we are prepared to let the client trash its own
1770 mappings if it wants to.
1772 The Default Policy is overriden by Policy Exception #2:
1774 If the request is for a hinted client map, we are prepared to
1775 grant it providing all areas inside the request are either
1776 free or reservations. In other words we are prepared to let
1777 the client have a hinted mapping anywhere it likes provided
1778 it does not trash either any of its own mappings or any of
1779 valgrind's mappings.
1782 Addr holeStart, holeEnd, holeLen;
1783 Bool fixed_not_required;
1785 Addr startPoint = forClient ? aspacem_cStart : aspacem_vStart;
1787 Addr reqStart = req->rkind==MAny ? 0 : req->start;
1788 Addr reqEnd = reqStart + req->len - 1;
1789 Addr reqLen = req->len;
1791 /* These hold indices for segments found during search, or -1 if not
1796 aspacem_assert(nsegments_used > 0);
1799 VG_(am_show_nsegments)(0,"getAdvisory");
1800 l4re_rm_show_lists();
1801 VG_(debugLog)(0,"aspacem", "getAdvisory 0x%llx %lld\n",
1802 (ULong)req->start, (ULong)req->len);
1805 /* Reject zero-length requests */
1806 if (req->len == 0) {
1808 VG_(debugLog)(0,"aspacm", "%s: reject zero-length request\n", __func__);
1814 /* Reject wraparounds */
1815 if ((req->rkind==MFixed || req->rkind==MHint)
1816 && req->start + req->len < req->start) {
1818 VG_(debugLog)(0,"aspacm", "%s: reject wraparound\n", __func__);
1824 /* ------ Implement Policy Exception #1 ------ */
1826 if (forClient && req->rkind == MFixed) {
1827 Int iLo = find_nsegment_idx(reqStart);
1828 Int iHi = find_nsegment_idx(reqEnd);
1830 for (i = iLo; i <= iHi; i++) {
1831 if (nsegments[i].kind == SkFree
1832 || nsegments[i].kind == SkFileC
1833 || nsegments[i].kind == SkAnonC
1834 || nsegments[i].kind == SkShmC
1835 || nsegments[i].kind == SkResvn) {
1843 /* Acceptable. Granted. */
1847 /* Not acceptable. Fail. */
1850 VG_(debugLog)(0,"aspacm", "%s: Policy Exception #1 not acceptable\n", __func__);
1855 /* ------ Implement Policy Exception #2 ------ */
1857 if (forClient && req->rkind == MHint) {
1858 Int iLo = find_nsegment_idx(reqStart);
1859 Int iHi = find_nsegment_idx(reqEnd);
1861 for (i = iLo; i <= iHi; i++) {
1862 if (nsegments[i].kind == SkFree
1863 || nsegments[i].kind == SkResvn) {
1871 /* Acceptable. Granted. */
1875 /* Not acceptable. Fall through to the default policy. */
1878 /* ------ Implement the Default Policy ------ */
1880 /* Don't waste time looking for a fixed match if not requested to. */
1881 fixed_not_required = req->rkind == MAny;
1883 i = find_nsegment_idx(startPoint);
1885 /* Examine holes from index i back round to i-1. Record the
1886 index first fixed hole and the first floating hole which would
1887 satisfy the request. */
1888 for (j = 0; j < nsegments_used; j++) {
1890 if (nsegments[i].kind != SkFree) {
1892 if (i >= nsegments_used) i = 0;
1896 holeStart = nsegments[i].start;
1897 holeEnd = nsegments[i].end;
1900 aspacem_assert(holeStart <= holeEnd);
1901 aspacem_assert(aspacem_minAddr <= holeStart);
1902 aspacem_assert(holeEnd <= aspacem_maxAddr);
1904 /* See if it's any use to us. */
1905 holeLen = holeEnd - holeStart + 1;
1907 if (fixedIdx == -1 && holeStart <= reqStart && reqEnd <= holeEnd)
1910 if (floatIdx == -1 && holeLen >= reqLen)
1913 /* Don't waste time searching once we've found what we wanted. */
1914 if ((fixed_not_required || fixedIdx >= 0) && floatIdx >= 0)
1918 if (i >= nsegments_used) i = 0;
1921 aspacem_assert(fixedIdx >= -1 && fixedIdx < nsegments_used);
1923 aspacem_assert(nsegments[fixedIdx].kind == SkFree);
1925 aspacem_assert(floatIdx >= -1 && floatIdx < nsegments_used);
1927 aspacem_assert(nsegments[floatIdx].kind == SkFree);
1931 /* Now see if we found anything which can satisfy the request. */
1932 switch (req->rkind) {
1934 if (fixedIdx >= 0) {
1939 VG_(debugLog)(0,"aspacm", "%s: Default policy not acceptable; MFixed\n", __func__);
1946 if (fixedIdx >= 0) {
1950 if (floatIdx >= 0) {
1952 return nsegments[floatIdx].start;
1957 if (floatIdx >= 0) {
1959 return nsegments[floatIdx].start;
1962 VG_(debugLog)(0,"aspacm", "%s: Default policy not acceptable; MAny\n", __func__);
1971 ML_(am_barf)("getAdvisory: unknown request kind");
1976 /* Convenience wrapper for VG_(am_get_advisory) for client floating or
1977 fixed requests. If start is zero, a floating request is issued; if
1978 nonzero, a fixed request at that address is issued. Same comments
1979 about return values apply. */
1981 Addr VG_(am_get_advisory_client_simple) ( Addr start, SizeT len,
1985 mreq.rkind = start==0 ? MAny : MFixed;
1988 return VG_(am_get_advisory)( &mreq, True/*client*/, ok );
1992 /* Notifies aspacem that the client completed an mmap successfully.
1993 The segment array is updated accordingly. If the returned Bool is
1994 True, the caller should immediately discard translations from the
1995 specified address range. */
1998 VG_(am_notify_client_mmap)( Addr a, SizeT len, UInt prot, UInt flags,
1999 Int fd, Off64T offset )
2001 #define DEBUG_MYSELF 0
2002 HChar buf[VKI_PATH_MAX];
2010 enter_kdebug("!len>0");
2012 aspacem_assert(len > 0);
2013 aspacem_assert(VG_IS_PAGE_ALIGNED(a));
2014 aspacem_assert(VG_IS_PAGE_ALIGNED(len));
2015 aspacem_assert(VG_IS_PAGE_ALIGNED(offset));
2017 /* Discard is needed if any of the just-trashed range had T. */
2018 needDiscard = any_Ts_in_range( a, len );
2020 init_nsegment( &seg );
2021 seg.kind = (flags & VKI_MAP_ANONYMOUS) ? SkAnonC : SkFileC;
2023 seg.end = a + len - 1;
2024 seg.hasR = toBool(prot & VKI_PROT_READ);
2025 seg.hasW = toBool(prot & VKI_PROT_WRITE);
2026 seg.hasX = toBool(prot & VKI_PROT_EXEC);
2027 if (!(flags & VKI_MAP_ANONYMOUS)) {
2028 // Nb: We ignore offset requests in anonymous mmaps (see bug #126722)
2029 seg.offset = offset;
2030 if (get_inode_for_fd(fd, &dev, &ino, &mode)) {
2035 if (get_name_for_fd(fd, buf, VKI_PATH_MAX)) {
2036 seg.fnIdx = allocate_segname( buf );
2039 add_segment( &seg );
2045 /* Notifies aspacem that the valgrind completed an mmap successfully.
2046 The segment array is updated accordingly. If the returned Bool is
2047 True, the caller should immediately discard translations from the
2048 specified address range. */
2051 VG_(am_notify_valgrind_mmap)( Addr a, SizeT len, UInt prot, UInt flags,
2052 Int fd, Off64T offset )
2054 HChar buf[VKI_PATH_MAX];
2058 aspacem_assert(len > 0);
2059 aspacem_assert(VG_IS_PAGE_ALIGNED(a));
2060 aspacem_assert(VG_IS_PAGE_ALIGNED(len));
2061 aspacem_assert(VG_IS_PAGE_ALIGNED(offset));
2063 /* Discard is needed if any of the just-trashed range had T. */
2064 needDiscard = any_Ts_in_range( a, len );
2066 init_nsegment( &seg );
2067 seg.kind = (flags & VKI_MAP_ANONYMOUS) ? SkAnonV : SkFileV;
2069 seg.end = a + len - 1;
2070 seg.hasR = toBool(prot & VKI_PROT_READ);
2071 seg.hasW = toBool(prot & VKI_PROT_WRITE);
2072 seg.hasX = toBool(prot & VKI_PROT_EXEC);
2073 if (!(flags & VKI_MAP_ANONYMOUS)) {
2074 // Nb: We ignore offset requests in anonymous mmaps (see bug #126722)
2075 seg.offset = offset;
2076 if (get_name_for_fd(fd, buf, VKI_PATH_MAX)) {
2077 seg.fnIdx = allocate_segname( buf );
2080 add_segment( &seg );
2086 /* Notifies aspacem that the client completed a shmat successfully.
2087 The segment array is updated accordingly. If the returned Bool is
2088 True, the caller should immediately discard translations from the
2089 specified address range. */
2092 VG_(am_notify_client_shmat)( Addr a, SizeT len, UInt prot )
2097 aspacem_assert(len > 0);
2098 aspacem_assert(VG_IS_PAGE_ALIGNED(a));
2099 aspacem_assert(VG_IS_PAGE_ALIGNED(len));
2101 /* Discard is needed if any of the just-trashed range had T. */
2102 needDiscard = any_Ts_in_range( a, len );
2104 init_nsegment( &seg );
2107 seg.end = a + len - 1;
2109 seg.hasR = toBool(prot & VKI_PROT_READ);
2110 seg.hasW = toBool(prot & VKI_PROT_WRITE);
2111 seg.hasX = toBool(prot & VKI_PROT_EXEC);
2112 add_segment( &seg );
2117 /* Notifies aspacem that an mprotect was completed successfully. The
2118 segment array is updated accordingly. Note, as with
2119 VG_(am_notify_munmap), it is not the job of this function to reject
2120 stupid mprotects, for example the client doing mprotect of
2121 non-client areas. Such requests should be intercepted earlier, by
2122 the syscall wrapper for mprotect. This function merely records
2123 whatever it is told. If the returned Bool is True, the caller
2124 should immediately discard translations from the specified address
2127 Bool VG_(am_notify_mprotect)( Addr start, SizeT len, UInt prot )
2130 Bool newR, newW, newX, needDiscard;
2132 aspacem_assert(VG_IS_PAGE_ALIGNED(start));
2133 aspacem_assert(VG_IS_PAGE_ALIGNED(len));
2138 newR = toBool(prot & VKI_PROT_READ);
2139 newW = toBool(prot & VKI_PROT_WRITE);
2140 newX = toBool(prot & VKI_PROT_EXEC);
2142 /* Discard is needed if we're dumping X permission */
2143 needDiscard = any_Ts_in_range( start, len ) && !newX;
2145 split_nsegments_lo_and_hi( start, start+len-1, &iLo, &iHi );
2147 iLo = find_nsegment_idx(start);
2148 iHi = find_nsegment_idx(start + len - 1);
2150 for (i = iLo; i <= iHi; i++) {
2151 /* Apply the permissions to all relevant segments. */
2152 switch (nsegments[i].kind) {
2153 case SkAnonC: case SkAnonV: case SkFileC: case SkFileV: case SkShmC:
2154 nsegments[i].hasR = newR;
2155 nsegments[i].hasW = newW;
2156 nsegments[i].hasX = newX;
2157 aspacem_assert(sane_NSegment(&nsegments[i]));
2164 /* Changing permissions could have made previously un-mergable
2165 segments mergeable. Therefore have to re-preen them. */
2166 (void)preen_nsegments();
2172 /* Notifies aspacem that an munmap completed successfully. The
2173 segment array is updated accordingly. As with
2174 VG_(am_notify_munmap), we merely record the given info, and don't
2175 check it for sensibleness. If the returned Bool is True, the
2176 caller should immediately discard translations from the specified
2179 Bool VG_(am_notify_munmap)( Addr start, SizeT len )
2183 aspacem_assert(VG_IS_PAGE_ALIGNED(start));
2184 aspacem_assert(VG_IS_PAGE_ALIGNED(len));
2189 needDiscard = any_Ts_in_range( start, len );
2191 init_nsegment( &seg );
2193 seg.end = start + len - 1;
2195 /* The segment becomes unused (free). Segments from above
2196 aspacem_maxAddr were originally SkResvn and so we make them so
2197 again. Note, this isn't really right when the segment straddles
2198 the aspacem_maxAddr boundary - then really it should be split in
2199 two, the lower part marked as SkFree and the upper part as
2200 SkResvn. Ah well. */
2201 if (start > aspacem_maxAddr
2202 && /* check previous comparison is meaningful */
2203 aspacem_maxAddr < Addr_MAX)
2206 /* Ditto for segments from below aspacem_minAddr. */
2207 if (seg.end < aspacem_minAddr && aspacem_minAddr > 0)
2212 add_segment( &seg );
2214 /* Unmapping could create two adjacent free segments, so a preen is
2215 needed. add_segment() will do that, so no need to here. */
2221 /*-----------------------------------------------------------------*/
2223 /*--- Handling mappings which do not arise directly from the ---*/
2224 /*--- simulation of the client. ---*/
2226 /*-----------------------------------------------------------------*/
2228 /* --- --- --- map, unmap, protect --- --- --- */
2230 /* Map a file at a fixed address for the client, and update the
2231 segment array accordingly. */
2233 SysRes VG_(am_mmap_file_fixed_client)
2234 ( Addr start, SizeT length, UInt prot, Int fd, Off64T offset )
2243 HChar buf[VKI_PATH_MAX];
2245 /* Not allowable. */
2247 || !VG_IS_PAGE_ALIGNED(start)
2248 || !VG_IS_PAGE_ALIGNED(offset))
2249 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2251 /* Ask for an advisory. If it's negative, fail immediately. */
2255 advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
2256 if (!ok || advised != start)
2257 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2259 /* We have been advised that the mapping is allowable at the
2260 specified address. So hand it off to the kernel, and propagate
2261 any resulting failure immediately. */
2262 sres = VG_(am_do_mmap_NO_NOTIFY)(
2263 start, length, prot,
2264 VKI_MAP_FIXED|VKI_MAP_PRIVATE,
2267 if (sr_isError(sres))
2270 if (sr_Res(sres) != start) {
2271 /* I don't think this can happen. It means the kernel made a
2272 fixed map succeed but not at the requested location. Try to
2273 repair the damage, then return saying the mapping failed. */
2274 (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
2275 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2279 /* Ok, the mapping succeeded. Now notify the interval map. */
2280 init_nsegment( &seg );
2283 seg.end = seg.start + VG_PGROUNDUP(length) - 1;
2284 seg.offset = offset;
2285 seg.hasR = toBool(prot & VKI_PROT_READ);
2286 seg.hasW = toBool(prot & VKI_PROT_WRITE);
2287 seg.hasX = toBool(prot & VKI_PROT_EXEC);
2289 if (get_inode_for_fd(fd, &dev, &ino, &mode)) {
2295 if (get_name_for_fd(fd, buf, VKI_PATH_MAX)) {
2296 seg.fnIdx = allocate_segname( buf );
2298 add_segment( &seg );
2300 /* In L4Re the NO_NOTIFY part is actually bogus as we use the segment array
2301 * for region management and thus now already have a segment set up for this
2302 * region. Therefore we simply look it up and adapt it a bit.
2304 NSegment *s = VG_(am_find_nsegment)(sr_Res(sres));
2307 s->hasR = toBool(prot & VKI_PROT_READ);
2308 s->hasW = toBool(prot & VKI_PROT_WRITE);
2309 s->hasX = toBool(prot & VKI_PROT_EXEC);
2311 if (get_inode_for_fd(fd, &dev, &ino, &mode)) {
2317 if (get_name_for_fd(fd, buf, VKI_PATH_MAX)) {
2318 s->fnIdx = allocate_segname( buf );
2328 /* Map anonymously at a fixed address for the client, and update
2329 the segment array accordingly. */
2331 SysRes VG_(am_mmap_anon_fixed_client) ( Addr start, SizeT length, UInt prot )
2340 /* Not allowable. */
2341 if (length == 0 || !VG_IS_PAGE_ALIGNED(start))
2342 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2344 /* Ask for an advisory. If it's negative, fail immediately. */
2348 advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
2349 if (!ok || advised != start)
2350 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2352 /* We have been advised that the mapping is allowable at the
2353 specified address. So hand it off to the kernel, and propagate
2354 any resulting failure immediately. */
2355 sres = VG_(am_do_mmap_NO_NOTIFY)(
2356 start, length, prot,
2357 VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2360 if (sr_isError(sres))
2363 if (sr_Res(sres) != start) {
2364 /* I don't think this can happen. It means the kernel made a
2365 fixed map succeed but not at the requested location. Try to
2366 repair the damage, then return saying the mapping failed. */
2367 (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
2368 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2372 /* Ok, the mapping succeeded. Now notify the interval map. */
2373 init_nsegment( &seg );
2376 seg.end = seg.start + VG_PGROUNDUP(length) - 1;
2377 seg.hasR = toBool(prot & VKI_PROT_READ);
2378 seg.hasW = toBool(prot & VKI_PROT_WRITE);
2379 seg.hasX = toBool(prot & VKI_PROT_EXEC);
2380 add_segment( &seg );
2382 /* In L4Re the NO_NOTIFY part is actually bogus as we use the segment array
2383 * for region management and thus now already have a segment set up for this
2384 * region. Therefore we simply look it up and adapt it a bit.
2386 NSegment *seg = VG_(am_find_nsegment)(sr_Res(sres));
2387 seg->kind = SkAnonC;
2395 /* Map anonymously at an unconstrained address for the client, and
2396 update the segment array accordingly. */
2398 SysRes VG_(am_mmap_anon_float_client) ( SizeT length, Int prot )
2408 /* Not allowable. */
2410 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2412 /* Ask for an advisory. If it's negative, fail immediately. */
2416 advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
2418 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2420 /* We have been advised that the mapping is allowable at the
2421 advised address. So hand it off to the kernel, and propagate
2422 any resulting failure immediately. */
2423 sres = VG_(am_do_mmap_NO_NOTIFY)(
2424 advised, length, prot,
2425 VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2428 if (sr_isError(sres))
2431 if (sr_Res(sres) != advised) {
2432 /* I don't think this can happen. It means the kernel made a
2433 fixed map succeed but not at the requested location. Try to
2434 repair the damage, then return saying the mapping failed. */
2435 (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
2436 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2440 /* Ok, the mapping succeeded. Now notify the interval map. */
2441 init_nsegment( &seg );
2443 seg.start = advised;
2444 seg.end = seg.start + VG_PGROUNDUP(length) - 1;
2445 seg.hasR = toBool(prot & VKI_PROT_READ);
2446 seg.hasW = toBool(prot & VKI_PROT_WRITE);
2447 seg.hasX = toBool(prot & VKI_PROT_EXEC);
2448 add_segment( &seg );
2450 NSegment *seg = VG_(am_find_nsegment)(sr_Res(sres));
2451 seg->kind = SkAnonC;
2452 seg->hasR = toBool(prot & VKI_PROT_READ);
2453 seg->hasW = toBool(prot & VKI_PROT_WRITE);
2454 seg->hasX = toBool(prot & VKI_PROT_EXEC);
2462 /* Similarly, acquire new address space for the client but with
2463 considerable restrictions on what can be done with it: (1) the
2464 actual protections may exceed those stated in 'prot', (2) the
2465 area's protections cannot be later changed using any form of
2466 mprotect, and (3) the area cannot be freed using any form of
2467 munmap. On Linux this behaves the same as
2468 VG_(am_mmap_anon_float_client). On AIX5 this *may* allocate memory
2469 by using sbrk, so as to make use of large pages on AIX. */
2471 SysRes VG_(am_sbrk_anon_float_client) ( SizeT length, Int prot )
2473 return VG_(am_mmap_anon_float_client) ( length, prot );
2477 /* Map anonymously at an unconstrained address for V, and update the
2478 segment array accordingly. This is fundamentally how V allocates
2479 itself more address space when needed. */
2481 SysRes VG_(am_mmap_anon_float_valgrind)( SizeT length )
2489 /* Not allowable. */
2491 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2493 /* Ask for an advisory. If it's negative, fail immediately. */
2497 advised = VG_(am_get_advisory)( &req, False/*valgrind*/, &ok );
2499 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2501 /* We have been advised that the mapping is allowable at the
2502 specified address. So hand it off to the kernel, and propagate
2503 any resulting failure immediately. */
2504 sres = VG_(am_do_mmap_NO_NOTIFY)(
2506 VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC,
2507 VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2510 if (sr_isError(sres))
2513 if (sr_Res(sres) != advised) {
2514 /* I don't think this can happen. It means the kernel made a
2515 fixed map succeed but not at the requested location. Try to
2516 repair the damage, then return saying the mapping failed. */
2517 (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
2518 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2521 #if defined(VGO_l4re)
2522 if (!vcap_running) {
2524 /* Ok, the mapping succeeded. Now notify the interval map. */
2525 init_nsegment( &seg );
2527 seg.start = advised;
2528 seg.end = seg.start + VG_PGROUNDUP(length) - 1;
2532 vrm_segment_fixup(&seg);
2533 add_segment( &seg );
2534 #if defined(VGO_l4re)
2536 NSegment *s = VG_(am_find_nsegment)(sr_Res(sres));
2538 VG_(printf)("Could not find segment that should be existing!\n");
2539 enter_kdebug("error");
2552 /* Really just a wrapper around VG_(am_mmap_anon_float_valgrind). */
2554 void* VG_(am_shadow_alloc)(SizeT size)
2556 SysRes sres = VG_(am_mmap_anon_float_valgrind)( size );
2557 return sr_isError(sres) ? NULL : (void*)sr_Res(sres);
2560 /* Same comments apply as per VG_(am_sbrk_anon_float_client). On
2561 Linux this behaves the same as VG_(am_mmap_anon_float_valgrind). */
2563 SysRes VG_(am_sbrk_anon_float_valgrind)( SizeT cszB )
2565 return VG_(am_mmap_anon_float_valgrind)( cszB );
2569 /* Map a file at an unconstrained address for V, and update the
2570 segment array accordingly. This is used by V for transiently
2571 mapping in object files to read their debug info. */
2573 SysRes VG_(am_mmap_file_float_valgrind) ( SizeT length, UInt prot,
2574 Int fd, Off64T offset )
2583 HChar buf[VKI_PATH_MAX];
2585 /* Not allowable. */
2586 if (length == 0 || !VG_IS_PAGE_ALIGNED(offset))
2587 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2589 /* Ask for an advisory. If it's negative, fail immediately. */
2593 advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
2595 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2597 /* We have been advised that the mapping is allowable at the
2598 specified address. So hand it off to the kernel, and propagate
2599 any resulting failure immediately. */
2600 sres = VG_(am_do_mmap_NO_NOTIFY)(
2601 advised, length, prot,
2602 VKI_MAP_FIXED|VKI_MAP_PRIVATE,
2605 if (sr_isError(sres))
2608 if (sr_Res(sres) != advised) {
2609 /* I don't think this can happen. It means the kernel made a
2610 fixed map succeed but not at the requested location. Try to
2611 repair the damage, then return saying the mapping failed. */
2612 (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
2613 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2617 /* Ok, the mapping succeeded. Now notify the interval map. */
2618 init_nsegment( &seg );
2620 seg.start = sr_Res(sres);
2621 seg.end = seg.start + VG_PGROUNDUP(length) - 1;
2622 seg.offset = offset;
2623 seg.hasR = toBool(prot & VKI_PROT_READ);
2624 seg.hasW = toBool(prot & VKI_PROT_WRITE);
2625 seg.hasX = toBool(prot & VKI_PROT_EXEC);
2627 if (get_inode_for_fd(fd, &dev, &ino, &mode)) {
2632 if (get_name_for_fd(fd, buf, VKI_PATH_MAX)) {
2633 seg.fnIdx = allocate_segname( buf );
2635 add_segment( &seg );
2637 (void)seg; (void)dev; (void)ino; (void)buf;
2639 NSegment *s = VG_(am_find_nsegment)(sr_Res(sres));
2641 VG_(printf)("error!!!\n");
2647 if (get_name_for_fd(fd, buf, VKI_PATH_MAX)) {
2648 s->fnIdx = allocate_segname( buf );
2658 /* --- --- munmap helper --- --- */
2661 SysRes am_munmap_both_wrk ( /*OUT*/Bool* need_discard,
2662 Addr start, SizeT len, Bool forClient )
2667 if (!VG_IS_PAGE_ALIGNED(start))
2671 *need_discard = False;
2672 return VG_(mk_SysRes_Success)( 0 );
2675 if (start + len < len)
2678 len = VG_PGROUNDUP(len);
2679 aspacem_assert(VG_IS_PAGE_ALIGNED(start));
2680 aspacem_assert(VG_IS_PAGE_ALIGNED(len));
2683 if (!VG_(am_is_valid_for_client_or_free_or_resvn)
2684 ( start, len, VKI_PROT_NONE ))
2687 if (!is_valid_for_valgrind( start, len ))
2691 d = any_Ts_in_range( start, len );
2693 sres = ML_(am_do_munmap_NO_NOTIFY)( start, len );
2694 if (sr_isError(sres))
2697 VG_(am_notify_munmap)( start, len );
2703 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2706 /* Unmap the given address range and update the segment array
2707 accordingly. This fails if the range isn't valid for the client.
2708 If *need_discard is True after a successful return, the caller
2709 should immediately discard translations from the specified address
2712 SysRes VG_(am_munmap_client)( /*OUT*/Bool* need_discard,
2713 Addr start, SizeT len )
2715 return am_munmap_both_wrk( need_discard, start, len, True/*client*/ );
2718 /* Unmap the given address range and update the segment array
2719 accordingly. This fails if the range isn't valid for valgrind. */
2721 SysRes VG_(am_munmap_valgrind)( Addr start, SizeT len )
2724 SysRes r = am_munmap_both_wrk( &need_discard,
2725 start, len, False/*valgrind*/ );
2726 /* If this assertion fails, it means we allowed translations to be
2727 made from a V-owned section. Which shouldn't happen. */
2729 aspacem_assert(!need_discard);
2733 /* Let (start,len) denote an area within a single Valgrind-owned
2734 segment (anon or file). Change the ownership of [start, start+len)
2735 to the client instead. Fails if (start,len) does not denote a
2736 suitable segment. */
2738 Bool VG_(am_change_ownership_v_to_c)( Addr start, SizeT len )
2744 if (start + len < start)
2746 if (!VG_IS_PAGE_ALIGNED(start) || !VG_IS_PAGE_ALIGNED(len))
2749 i = find_nsegment_idx(start);
2750 if (nsegments[i].kind != SkFileV && nsegments[i].kind != SkAnonV)
2752 if (start+len-1 > nsegments[i].end)
2755 aspacem_assert(start >= nsegments[i].start);
2756 aspacem_assert(start+len-1 <= nsegments[i].end);
2758 /* This scheme is like how mprotect works: split the to-be-changed
2759 range into its own segment(s), then mess with them (it). There
2760 should be only one. */
2761 split_nsegments_lo_and_hi( start, start+len-1, &iLo, &iHi );
2762 aspacem_assert(iLo == iHi);
2763 switch (nsegments[iLo].kind) {
2764 case SkFileV: nsegments[iLo].kind = SkFileC; break;
2765 case SkAnonV: nsegments[iLo].kind = SkAnonC; break;
2766 default: aspacem_assert(0); /* can't happen - guarded above */
2773 /* 'seg' must be NULL or have been obtained from
2774 VG_(am_find_nsegment), and still valid. If non-NULL, and if it
2775 denotes a SkAnonC (anonymous client mapping) area, set the .isCH
2776 (is-client-heap) flag for that area. Otherwise do nothing.
2777 (Bizarre interface so that the same code works for both Linux and
2778 AIX and does not impose inefficiencies on the Linux version.) */
2779 void VG_(am_set_segment_isCH_if_SkAnonC)( NSegment* seg )
2781 Int i = segAddr_to_index( seg );
2782 aspacem_assert(i >= 0 && i < nsegments_used);
2783 if (nsegments[i].kind == SkAnonC) {
2784 nsegments[i].isCH = True;
2786 aspacem_assert(nsegments[i].isCH == False);
2790 /* Same idea as VG_(am_set_segment_isCH_if_SkAnonC), except set the
2791 segment's hasT bit (has-cached-code) if this is SkFileC or SkAnonC
2793 void VG_(am_set_segment_hasT_if_SkFileC_or_SkAnonC)( NSegment* seg )
2795 Int i = segAddr_to_index( seg );
2796 aspacem_assert(i >= 0 && i < nsegments_used);
2797 if (nsegments[i].kind == SkAnonC || nsegments[i].kind == SkFileC) {
2798 nsegments[i].hasT = True;
2803 /* --- --- --- reservations --- --- --- */
2805 /* Create a reservation from START .. START+LENGTH-1, with the given
2806 ShrinkMode. When checking whether the reservation can be created,
2807 also ensure that at least abs(EXTRA) extra free bytes will remain
2808 above (> 0) or below (< 0) the reservation.
2810 The reservation will only be created if it, plus the extra-zone,
2811 falls entirely within a single free segment. The returned Bool
2812 indicates whether the creation succeeded. */
2814 Bool VG_(am_create_reservation) ( Addr start, SizeT length,
2815 ShrinkMode smode, SSizeT extra )
2820 /* start and end, not taking into account the extra space. */
2821 Addr start1 = start;
2822 Addr end1 = start + length - 1;
2824 /* start and end, taking into account the extra space. */
2825 Addr start2 = start1;
2828 if (extra < 0) start2 += extra; // this moves it down :-)
2829 if (extra > 0) end2 += extra;
2831 aspacem_assert(VG_IS_PAGE_ALIGNED(start));
2832 aspacem_assert(VG_IS_PAGE_ALIGNED(start+length));
2833 aspacem_assert(VG_IS_PAGE_ALIGNED(start2));
2834 aspacem_assert(VG_IS_PAGE_ALIGNED(end2+1));
2836 startI = find_nsegment_idx( start2 );
2837 endI = find_nsegment_idx( end2 );
2839 /* If the start and end points don't fall within the same (free)
2840 segment, we're hosed. This does rely on the assumption that all
2841 mergeable adjacent segments can be merged, but add_segment()
2842 should ensure that. */
2846 if (nsegments[startI].kind != SkFree)
2849 /* Looks good - make the reservation. */
2850 aspacem_assert(nsegments[startI].start <= start2);
2851 aspacem_assert(end2 <= nsegments[startI].end);
2853 init_nsegment( &seg );
2856 VG_(debugLog)(0, "aspacem", "%s: setting segment.kind to SkResvn\n", __func__);
2857 show_nsegment_full(0, &seg);
2860 seg.start = start1; /* NB: extra space is not included in the
2864 add_segment( &seg );
2871 /* Let SEG be an anonymous client mapping. This fn extends the
2872 mapping by DELTA bytes, taking the space from a reservation section
2873 which must be adjacent. If DELTA is positive, the segment is
2874 extended forwards in the address space, and the reservation must be
2875 the next one along. If DELTA is negative, the segment is extended
2876 backwards in the address space and the reservation must be the
2877 previous one. DELTA must be page aligned. abs(DELTA) must not
2878 exceed the size of the reservation segment minus one page, that is,
2879 the reservation segment after the operation must be at least one
2882 Bool VG_(am_extend_into_adjacent_reservation_client) ( NSegment* seg,
2889 /* Find the segment array index for SEG. If the assertion fails it
2890 probably means you passed in a bogus SEG. */
2891 segA = segAddr_to_index( seg );
2892 aspacem_assert(segA >= 0 && segA < nsegments_used);
2894 if (nsegments[segA].kind != SkAnonC)
2900 prot = (nsegments[segA].hasR ? VKI_PROT_READ : 0)
2901 | (nsegments[segA].hasW ? VKI_PROT_WRITE : 0)
2902 | (nsegments[segA].hasX ? VKI_PROT_EXEC : 0);
2904 aspacem_assert(VG_IS_PAGE_ALIGNED(delta<0 ? -delta : delta));
2908 /* Extending the segment forwards. */
2910 if (segR >= nsegments_used
2911 || nsegments[segR].kind != SkResvn
2912 || nsegments[segR].smode != SmLower
2913 || nsegments[segR].start != nsegments[segA].end + 1
2914 || delta + VKI_PAGE_SIZE
2915 > (nsegments[segR].end - nsegments[segR].start + 1))
2918 /* Extend the kernel's mapping. */
2919 sres = VG_(am_do_mmap_NO_NOTIFY)(
2920 nsegments[segR].start, delta,
2922 VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2925 if (sr_isError(sres))
2926 return False; /* kernel bug if this happens? */
2927 if (sr_Res(sres) != nsegments[segR].start) {
2928 /* kernel bug if this happens? */
2929 (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), delta );
2933 /* Ok, success with the kernel. Update our structures. */
2934 nsegments[segR].start += delta;
2935 nsegments[segA].end += delta;
2936 aspacem_assert(nsegments[segR].start <= nsegments[segR].end);
2940 /* Extending the segment backwards. */
2942 aspacem_assert(delta > 0);
2946 || nsegments[segR].kind != SkResvn
2947 || nsegments[segR].smode != SmUpper
2948 || nsegments[segR].end + 1 != nsegments[segA].start
2949 || delta + VKI_PAGE_SIZE
2950 > (nsegments[segR].end - nsegments[segR].start + 1))
2953 /* Extend the kernel's mapping. */
2954 sres = VG_(am_do_mmap_NO_NOTIFY)(
2955 nsegments[segA].start-delta, delta,
2957 VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2960 if (sr_isError(sres))
2961 return False; /* kernel bug if this happens? */
2962 if (sr_Res(sres) != nsegments[segA].start-delta) {
2963 /* kernel bug if this happens? */
2964 (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), delta );
2968 /* Ok, success with the kernel. Update our structures. */
2969 nsegments[segR].end -= delta;
2970 nsegments[segA].start -= delta;
2971 aspacem_assert(nsegments[segR].start <= nsegments[segR].end);
2980 /* --- --- --- resizing/move a mapping --- --- --- */
2982 /* Let SEG be a client mapping (anonymous or file). This fn extends
2983 the mapping forwards only by DELTA bytes, and trashes whatever was
2984 in the new area. Fails if SEG is not a single client mapping or if
2985 the new area is not accessible to the client. Fails if DELTA is
2986 not page aligned. *seg is invalid after a successful return. If
2987 *need_discard is True after a successful return, the caller should
2988 immediately discard translations from the new area. */
2990 Bool VG_(am_extend_map_client)( /*OUT*/Bool* need_discard,
2991 NSegment* seg, SizeT delta )
2995 NSegment seg_copy = *seg;
2996 SizeT seg_old_len = seg->end + 1 - seg->start;
2999 VG_(am_show_nsegments)(0, "VG_(am_extend_map_client) BEFORE");
3001 if (seg->kind != SkFileC && seg->kind != SkAnonC)
3004 if (delta == 0 || !VG_IS_PAGE_ALIGNED(delta))
3007 xStart = seg->end+1;
3008 if (xStart + delta < delta)
3011 if (!VG_(am_is_valid_for_client_or_free_or_resvn)( xStart, delta,
3016 sres = ML_(am_do_extend_mapping_NO_NOTIFY)( seg->start,
3018 seg_old_len + delta );
3019 if (sr_isError(sres)) {
3023 /* the area must not have moved */
3024 aspacem_assert(sr_Res(sres) == seg->start);
3027 *need_discard = any_Ts_in_range( seg_copy.end+1, delta );
3029 seg_copy.end += delta;
3030 add_segment( &seg_copy );
3033 VG_(am_show_nsegments)(0, "VG_(am_extend_map_client) AFTER");
3040 /* Remap the old address range to the new address range. Fails if any
3041 parameter is not page aligned, if the either size is zero, if any
3042 wraparound is implied, if the old address range does not fall
3043 entirely within a single segment, if the new address range overlaps
3044 with the old one, or if the old address range is not a valid client
3045 mapping. If *need_discard is True after a successful return, the
3046 caller should immediately discard translations from both specified
3049 Bool VG_(am_relocate_nooverlap_client)( /*OUT*/Bool* need_discard,
3050 Addr old_addr, SizeT old_len,
3051 Addr new_addr, SizeT new_len )
3057 if (old_len == 0 || new_len == 0)
3060 if (!VG_IS_PAGE_ALIGNED(old_addr) || !VG_IS_PAGE_ALIGNED(old_len)
3061 || !VG_IS_PAGE_ALIGNED(new_addr) || !VG_IS_PAGE_ALIGNED(new_len))
3064 if (old_addr + old_len < old_addr
3065 || new_addr + new_len < new_addr)
3068 if (old_addr + old_len - 1 < new_addr
3069 || new_addr + new_len - 1 < old_addr) {
3074 iLo = find_nsegment_idx( old_addr );
3075 iHi = find_nsegment_idx( old_addr + old_len - 1 );
3079 if (nsegments[iLo].kind != SkFileC && nsegments[iLo].kind != SkAnonC)
3082 sres = ML_(am_do_relocate_nooverlap_mapping_NO_NOTIFY)
3083 ( old_addr, old_len, new_addr, new_len );
3084 if (sr_isError(sres)) {
3088 aspacem_assert(sr_Res(sres) == new_addr);
3091 *need_discard = any_Ts_in_range( old_addr, old_len )
3092 || any_Ts_in_range( new_addr, new_len );
3094 seg = nsegments[iLo];
3096 /* Mark the new area based on the old seg. */
3097 if (seg.kind == SkFileC) {
3098 seg.offset += ((ULong)old_addr) - ((ULong)seg.start);
3100 aspacem_assert(seg.kind == SkAnonC);
3101 aspacem_assert(seg.offset == 0);
3103 seg.start = new_addr;
3104 seg.end = new_addr + new_len - 1;
3105 add_segment( &seg );
3107 /* Create a free hole in the old location. */
3108 init_nsegment( &seg );
3109 seg.start = old_addr;
3110 seg.end = old_addr + old_len - 1;
3111 /* See comments in VG_(am_notify_munmap) about this SkResvn vs
3113 if (old_addr > aspacem_maxAddr
3114 && /* check previous comparison is meaningful */
3115 aspacem_maxAddr < Addr_MAX)
3120 add_segment( &seg );
3127 /*-----------------------------------------------------------------*/
3129 /*--- A simple parser for /proc/self/maps on Linux 2.4.X/2.6.X. ---*/
3130 /*--- Almost completely independent of the stuff above. The ---*/
3131 /*--- only function it 'exports' to the code above this comment ---*/
3132 /*--- is parse_procselfmaps. ---*/
3134 /*-----------------------------------------------------------------*/
3136 /* Size of a smallish table used to read /proc/self/map entries. */
3137 #define M_PROCMAP_BUF 100000
3139 /* static ... to keep it out of the stack frame. */
3140 static Char procmap_buf[M_PROCMAP_BUF];
3142 /* Records length of /proc/self/maps read into procmap_buf. */
3143 static Int buf_n_tot;
3147 static Int hexdigit ( Char c )
3149 if (c >= '0' && c <= '9') return (Int)(c - '0');
3150 if (c >= 'a' && c <= 'f') return 10 + (Int)(c - 'a');
3151 if (c >= 'A' && c <= 'F') return 10 + (Int)(c - 'A');
3155 static Int decdigit ( Char c )
3157 if (c >= '0' && c <= '9') return (Int)(c - '0');
3161 static Int readchar ( const Char* buf, Char* ch )
3163 if (*buf == 0) return 0;
3168 static Int readhex ( const Char* buf, UWord* val )
3170 /* Read a word-sized hex number. */
3173 while (hexdigit(*buf) >= 0) {
3174 *val = (*val << 4) + hexdigit(*buf);
3180 static Int readhex64 ( const Char* buf, ULong* val )
3182 /* Read a potentially 64-bit hex number. */
3185 while (hexdigit(*buf) >= 0) {
3186 *val = (*val << 4) + hexdigit(*buf);
3192 static Int readdec64 ( const Char* buf, ULong* val )
3196 while (hexdigit(*buf) >= 0) {
3197 *val = (*val * 10) + decdigit(*buf);
3204 /* Get the contents of /proc/self/maps into a static buffer. If
3205 there's a syntax error, it won't fit, or other failure, just
3208 static void read_procselfmaps_into_buf ( void )
3213 /* Read the initial memory mapping from the /proc filesystem. */
3214 fd = ML_(am_open)( "/proc/self/maps", VKI_O_RDONLY, 0 );
3216 ML_(am_barf)("can't open /proc/self/maps");
3220 n_chunk = ML_(am_read)( sr_Res(fd), &procmap_buf[buf_n_tot],
3221 M_PROCMAP_BUF - buf_n_tot );
3223 buf_n_tot += n_chunk;
3224 } while ( n_chunk > 0 && buf_n_tot < M_PROCMAP_BUF );
3226 ML_(am_close)(sr_Res(fd));
3228 if (buf_n_tot >= M_PROCMAP_BUF-5)
3229 ML_(am_barf_toolow)("M_PROCMAP_BUF");
3231 ML_(am_barf)("I/O error on /proc/self/maps");
3233 procmap_buf[buf_n_tot] = 0;
3236 /* Parse /proc/self/maps. For each map entry, call
3237 record_mapping, passing it, in this order:
3239 start address in memory
3241 page protections (using the VKI_PROT_* flags)
3242 mapped file device and inode
3243 offset in file, or zero if no file
3244 filename, zero terminated, or NULL if no file
3246 So the sig of the called fn might be
3248 void (*record_mapping)( Addr start, SizeT size, UInt prot,
3249 UInt dev, UInt info,
3250 ULong foffset, UChar* filename )
3252 Note that the supplied filename is transiently stored; record_mapping
3253 should make a copy if it wants to keep it.
3255 Nb: it is important that this function does not alter the contents of
3259 static void fetch_regionlist(void) {
3260 VG_(memset)(&r_ls, 0, sizeof(struct vrm_region_lists));
3261 VG_(memset)(regions, 0, sizeof(struct vrm_region) * N_REGIONS);
3263 r_ls.regions = regions;
3266 vrm_get_lists(&r_ls, N_REGIONS, N_AREAS);
3270 static void parse_regionlist (
3271 void (*record_mapping)( Addr addr, SizeT len, UInt prot,
3272 ULong dev, ULong ino, ULong offset,
3273 const UChar* filename ),
3274 void (*record_gap)( Addr addr, SizeT len )
3278 Addr start, endPlusOne, gapStart;
3279 ULong foffset, dev, ino;
3281 UChar *filename = NULL;
3283 if (0) l4re_rm_show_lists();
3285 VG_(debugLog)(1, "main", "region_count = %d, area_count = %d\n", r_ls.region_count, r_ls.area_count);
3286 VG_(debugLog)(1, "main", "Region maping: limits [%lx-%lx]\n", r_ls.min_addr, r_ls.max_addr);
3288 gapStart = Addr_MIN;
3290 VG_(debugLog)(1, "main", " Area map:\n");
3291 for (i = 0; i < r_ls.area_count; i++)
3292 VG_(debugLog)(1, "main", " [%10lx-%10lx] -> flags=%x\n", r_ls.areas[i].start, r_ls.areas[i].end, r_ls.areas[i].flags);
3294 VG_(debugLog)(1, "main", " Region map:\n");
3296 for (i = 0; i < r_ls.region_count; i++) {
3297 start = r_ls.regions[i].start;
3298 endPlusOne = r_ls.regions[i].end + 1;
3300 // TODO offset from ds not from region
3301 foffset = 0; //r_ls.regions[i].offset;
3305 prot |= VKI_PROT_READ;
3306 if ((r_ls.regions[i].flags | 1) == 1)
3307 prot |= VKI_PROT_WRITE;
3308 prot |= VKI_PROT_EXEC;
3310 if (start == L4RE_KIP_ADDR)
3311 prot = VKI_PROT_READ | VKI_PROT_WRITE | VKI_PROT_EXEC;
3313 VG_(debugLog)(1, "main", " [%10lx-%10lx] -> (offs=%lx, ds=%lx, flags=%x)\n",
3314 r_ls.regions[i].start, r_ls.regions[i].end,
3315 r_ls.regions[i].offset_in_ds, r_ls.regions[i].ds,
3316 r_ls.regions[i].flags);
3318 if (record_gap && gapStart < start)
3319 (*record_gap) ( gapStart, start-gapStart );
3321 if (record_mapping && start < endPlusOne) {
3322 (*record_mapping) ( start, endPlusOne-start,
3324 foffset, filename );
3325 vrm_segment_notify(r_ls.regions[i].start, r_ls.regions[i].end, r_ls.regions[i].ds, r_ls.regions[i].offset_in_ds, r_ls.regions[i].flags);
3328 gapStart = endPlusOne;
3330 if (record_gap && gapStart < Addr_MAX)
3331 (*record_gap) ( gapStart, Addr_MAX - gapStart + 1 );
3334 /*--------------------------------------------------------------------*/
3336 /*--------------------------------------------------------------------*/