2 /*--------------------------------------------------------------------*/
3 /*--- The address space manager: segment initialisation and ---*/
4 /*--- tracking, stack operations ---*/
6 /*--- Implementation for Linux (and Darwin!) m_aspacemgr-linux.c ---*/
7 /*--------------------------------------------------------------------*/
10 This file is part of Valgrind, a dynamic binary instrumentation
13 Copyright (C) 2000-2010 Julian Seward
16 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
31 The GNU General Public License is contained in the file COPYING.
34 #if defined(VGO_linux) || defined(VGO_darwin)
36 /* *************************************************************
37 DO NOT INCLUDE ANY OTHER FILES HERE.
38 ADD NEW INCLUDES ONLY TO priv_aspacemgr.h
39 AND THEN ONLY AFTER READING DIRE WARNINGS THERE TOO.
40 ************************************************************* */
42 #include "priv_aspacemgr.h"
46 /* Note: many of the exported functions implemented below are
47 described more fully in comments in pub_core_aspacemgr.h.
51 /*-----------------------------------------------------------------*/
55 /*-----------------------------------------------------------------*/
59 The purpose of the address space manager (aspacem) is:
61 (1) to record the disposition of all parts of the process' address
64 (2) to the extent that it can, influence layout in ways favourable
67 It is important to appreciate that whilst it can and does attempt
68 to influence layout, and usually succeeds, it isn't possible to
69 impose absolute control: in the end, the kernel is the final
70 arbiter, and can always bounce our requests.
74 The strategy is therefore as follows:
76 * Track ownership of mappings. Each one can belong either to
77 Valgrind or to the client.
79 * Try to place the client's fixed and hinted mappings at the
80 requested addresses. Fixed mappings are allowed anywhere except
81 in areas reserved by Valgrind; the client can trash its own
82 mappings if it wants. Hinted mappings are allowed providing they
83 fall entirely in free areas; if not, they will be placed by
84 aspacem in a free area.
86 * Anonymous mappings are allocated so as to keep Valgrind and
87 client areas widely separated when possible. If address space
88 runs low, then they may become intermingled: aspacem will attempt
89 to use all possible space. But under most circumstances lack of
90 address space is not a problem and so the areas will remain far
93 Searches for client space start at aspacem_cStart and will wrap
94 around the end of the available space if needed. Searches for
95 Valgrind space start at aspacem_vStart and will also wrap around.
96 Because aspacem_cStart is approximately at the start of the
97 available space and aspacem_vStart is approximately in the
98 middle, for the most part the client anonymous mappings will be
99 clustered towards the start of available space, and Valgrind ones
102 The available space is delimited by aspacem_minAddr and
103 aspacem_maxAddr. aspacem is flexible and can operate with these
104 at any (sane) setting. For 32-bit Linux, aspacem_minAddr is set
105 to some low-ish value at startup (64M) and aspacem_maxAddr is
106 derived from the stack pointer at system startup. This seems a
107 reliable way to establish the initial boundaries.
109 64-bit Linux is similar except for the important detail that the
110 upper boundary is set to 32G. The reason is so that all
111 anonymous mappings (basically all client data areas) are kept
112 below 32G, since that is the maximum range that memcheck can
113 track shadow memory using a fast 2-level sparse array. It can go
114 beyond that but runs much more slowly. The 32G limit is
115 arbitrary and is trivially changed. So, with the current
116 settings, programs on 64-bit Linux will appear to run out of
117 address space and presumably fail at the 32G limit. Given the
118 9/8 space overhead of Memcheck, that means you should be able to
119 memcheckify programs that use up to about 14G natively.
121 Note that the aspacem_minAddr/aspacem_maxAddr limits apply only to
122 anonymous mappings. The client can still do fixed and hinted maps
123 at any addresses provided they do not overlap Valgrind's segments.
124 This makes Valgrind able to load prelinked .so's at their requested
125 addresses on 64-bit platforms, even if they are very high (eg,
128 At startup, aspacem establishes the usable limits, and advises
129 m_main to place the client stack at the top of the range, which on
130 a 32-bit machine will be just below the real initial stack. One
131 effect of this is that self-hosting sort-of works, because an inner
132 valgrind will then place its client's stack just below its own
135 The segment array and segment kinds
136 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
137 The central data structure is the segment array (segments[0
138 .. nsegments_used-1]). This covers the entire address space in
139 order, giving account of every byte of it. Free spaces are
140 represented explicitly as this makes many operations simpler.
141 Mergeable adjacent segments are aggressively merged so as to create
142 a "normalised" representation (preen_nsegments).
144 There are 7 (mutually-exclusive) segment kinds, the meaning of
147 SkFree: a free space, which may be allocated either to Valgrind (V)
150 SkAnonC: an anonymous mapping belonging to C. For these, aspacem
151 tracks a boolean indicating whether or not is is part of the
152 client's heap area (can't remember why).
154 SkFileC: a file mapping belonging to C.
156 SkShmC: a shared memory segment belonging to C.
158 SkAnonV: an anonymous mapping belonging to V. These cover all V's
159 dynamic memory needs, including non-client malloc/free areas,
160 shadow memory, and the translation cache.
162 SkFileV: a file mapping belonging to V. As far as I know these are
163 only created transiently for the purposes of reading debug info.
165 SkResvn: a reservation segment.
167 These are mostly straightforward. Reservation segments have some
170 A reservation segment is unmapped from the kernel's point of view,
171 but is an area in which aspacem will not create anonymous maps
172 (either Vs or Cs). The idea is that we will try to keep it clear
173 when the choice to do so is ours. Reservation segments are
174 'invisible' from the client's point of view: it may choose to park
175 a fixed mapping in the middle of one, and that's just tough -- we
176 can't do anything about that. From the client's perspective
177 reservations are semantically equivalent to (although
178 distinguishable from, if it makes enquiries) free areas.
180 Reservations are a primitive mechanism provided for whatever
181 purposes the rest of the system wants. Currently they are used to
182 reserve the expansion space into which a growdown stack is
183 expanded, and into which the data segment is extended. Note,
184 though, those uses are entirely external to this module, which only
185 supplies the primitives.
187 Reservations may be shrunk in order that an adjoining anonymous
188 mapping may be extended. This makes dataseg/stack expansion work.
189 A reservation may not be shrunk below one page.
191 The advise/notify concept
192 ~~~~~~~~~~~~~~~~~~~~~~~~~
193 All mmap-related calls must be routed via aspacem. Calling
194 sys_mmap directly from the rest of the system is very dangerous
195 because aspacem's data structures will become out of date.
197 The fundamental mode of operation of aspacem is to support client
198 mmaps. Here's what happens (in ML_(generic_PRE_sys_mmap)):
200 * m_syswrap intercepts the mmap call. It examines the parameters
201 and identifies the requested placement constraints. There are
202 three possibilities: no constraint (MAny), hinted (MHint, "I
203 prefer X but will accept anything"), and fixed (MFixed, "X or
206 * This request is passed to VG_(am_get_advisory). This decides on
207 a placement as described in detail in Strategy above. It may
208 also indicate that the map should fail, because it would trash
209 one of Valgrind's areas, which would probably kill the system.
211 * Control returns to the wrapper. If VG_(am_get_advisory) has
212 declared that the map should fail, then it must be made to do so.
213 Usually, though, the request is considered acceptable, in which
214 case an "advised" address is supplied. The advised address
215 replaces the original address supplied by the client, and
218 Note at this point that although aspacem has been asked for
219 advice on where to place the mapping, no commitment has yet been
220 made by either it or the kernel.
222 * The adjusted request is handed off to the kernel.
224 * The kernel's result is examined. If the map succeeded, aspacem
225 is told of the outcome (VG_(am_notify_client_mmap)), so it can
226 update its records accordingly.
228 This then is the central advise-notify idiom for handling client
229 mmap/munmap/mprotect/shmat:
231 * ask aspacem for an advised placement (or a veto)
233 * if not vetoed, hand request to kernel, using the advised placement
235 * examine result, and if successful, notify aspacem of the result.
237 There are also many convenience functions, eg
238 VG_(am_mmap_anon_fixed_client), which do both phases entirely within
241 To debug all this, a sync-checker is provided. It reads
242 /proc/self/maps, compares what it sees with aspacem's records, and
243 complains if there is a difference. --sanity-level=3 runs it before
244 and after each syscall, which is a powerful, if slow way of finding
245 buggy syscall wrappers.
249 Up to and including Valgrind 2.4.1, x86 segmentation was used to
250 enforce seperation of V and C, so that wild writes by C could not
251 trash V. This got called "pointercheck". Unfortunately, the new
252 more flexible memory layout, plus the need to be portable across
253 different architectures, means doing this in hardware is no longer
254 viable, and doing it in software is expensive. So at the moment we
259 /*-----------------------------------------------------------------*/
261 /*--- The Address Space Manager's state. ---*/
263 /*-----------------------------------------------------------------*/
265 /* ------ start of STATE for the address-space manager ------ */
267 /* Max number of segments we can track. */
268 #define VG_N_SEGMENTS 5000
270 /* Max number of segment file names we can track. */
271 #define VG_N_SEGNAMES 1000
273 /* Max length of a segment file name. */
274 #define VG_MAX_SEGNAMELEN 1000
281 HChar fname[VG_MAX_SEGNAMELEN];
285 /* Filename table. _used is the high water mark; an entry is only
286 valid if its index >= 0, < _used, and its .inUse field == True.
287 The .mark field is used to garbage-collect dead entries.
289 static SegName segnames[VG_N_SEGNAMES];
290 static Int segnames_used = 0;
293 /* Array [0 .. nsegments_used-1] of all mappings. */
294 /* Sorted by .addr field. */
295 /* I: len may not be zero. */
296 /* I: overlapping segments are not allowed. */
297 /* I: the segments cover the entire address space precisely. */
298 /* Each segment can optionally hold an index into the filename table. */
300 static NSegment nsegments[VG_N_SEGMENTS];
301 static Int nsegments_used = 0;
303 #define Addr_MIN ((Addr)0)
304 #define Addr_MAX ((Addr)(-1ULL))
308 // The smallest address that aspacem will try to allocate
309 static Addr aspacem_minAddr = 0;
311 // The largest address that aspacem will try to allocate
312 static Addr aspacem_maxAddr = 0;
314 // Where aspacem will start looking for client space
315 static Addr aspacem_cStart = 0;
317 // Where aspacem will start looking for Valgrind space
318 static Addr aspacem_vStart = 0;
321 #define AM_SANITY_CHECK \
323 if (VG_(clo_sanity_level >= 3)) \
324 aspacem_assert(VG_(am_do_sync_check) \
325 (__PRETTY_FUNCTION__,__FILE__,__LINE__)); \
328 /* ------ end of STATE for the address-space manager ------ */
330 /* ------ Forwards decls ------ */
332 static Int find_nsegment_idx ( Addr a );
334 static void parse_procselfmaps (
335 void (*record_mapping)( Addr addr, SizeT len, UInt prot,
336 ULong dev, ULong ino, Off64T offset,
337 const UChar* filename ),
338 void (*record_gap)( Addr addr, SizeT len )
341 /* ----- Hacks to do with the "commpage" on arm-linux ----- */
342 /* Not that I have anything against the commpage per se. It's just
343 that it's not listed in /proc/self/maps, which is a royal PITA --
344 we have to fake it up, in parse_procselfmaps.
346 But note also bug 254556 comment #2: this is now fixed in newer
347 kernels -- it is listed as a "[vectors]" entry. Presumably the
348 fake entry made here duplicates the [vectors] entry, and so, if at
349 some point in the future, we can stop supporting buggy kernels,
350 then this kludge can be removed entirely, since the procmap parser
351 below will read that entry in the normal way. */
352 #if defined(VGP_arm_linux)
353 # define ARM_LINUX_FAKE_COMMPAGE_START 0xFFFF0000
354 # define ARM_LINUX_FAKE_COMMPAGE_END1 0xFFFF1000
358 /*-----------------------------------------------------------------*/
360 /*--- SegName array management. ---*/
362 /*-----------------------------------------------------------------*/
364 /* Searches the filename table to find an index for the given name.
365 If none is found, an index is allocated and the name stored. If no
366 space is available we just give up. If the string is too long to
369 static Int allocate_segname ( const HChar* name )
373 aspacem_assert(name);
375 if (0) VG_(debugLog)(0,"aspacem","allocate_segname %s\n", name);
377 len = VG_(strlen)(name);
378 if (len >= VG_MAX_SEGNAMELEN-1) {
382 /* first see if we already have the name. */
383 for (i = 0; i < segnames_used; i++) {
384 if (!segnames[i].inUse)
386 if (0 == VG_(strcmp)(name, &segnames[i].fname[0])) {
391 /* no we don't. So look for a free slot. */
392 for (i = 0; i < segnames_used; i++)
393 if (!segnames[i].inUse)
396 if (i == segnames_used) {
397 /* no free slots .. advance the high-water mark. */
398 if (segnames_used+1 < VG_N_SEGNAMES) {
402 ML_(am_barf_toolow)("VG_N_SEGNAMES");
407 segnames[i].inUse = True;
408 for (j = 0; j < len; j++)
409 segnames[i].fname[j] = name[j];
410 aspacem_assert(len < VG_MAX_SEGNAMELEN);
411 segnames[i].fname[len] = 0;
416 /*-----------------------------------------------------------------*/
418 /*--- Displaying the segment array. ---*/
420 /*-----------------------------------------------------------------*/
422 static HChar* show_SegKind ( SegKind sk )
425 case SkFree: return " ";
426 case SkAnonC: return "anon";
427 case SkAnonV: return "ANON";
428 case SkFileC: return "file";
429 case SkFileV: return "FILE";
430 case SkShmC: return "shm ";
431 case SkResvn: return "RSVN";
432 default: return "????";
436 static HChar* show_ShrinkMode ( ShrinkMode sm )
439 case SmLower: return "SmLower";
440 case SmUpper: return "SmUpper";
441 case SmFixed: return "SmFixed";
442 default: return "Sm?????";
446 static void show_len_concisely ( /*OUT*/HChar* buf, Addr start, Addr end )
449 ULong len = ((ULong)end) - ((ULong)start) + 1;
451 if (len < 10*1000*1000ULL) {
454 else if (len < 999999ULL * (1ULL<<20)) {
458 else if (len < 999999ULL * (1ULL<<30)) {
462 else if (len < 999999ULL * (1ULL<<40)) {
470 ML_(am_sprintf)(buf, fmt, len);
474 /* Show full details of an NSegment */
476 static void __attribute__ ((unused))
477 show_nsegment_full ( Int logLevel, Int segNo, NSegment* seg )
480 HChar* name = "(none)";
482 if (seg->fnIdx >= 0 && seg->fnIdx < segnames_used
483 && segnames[seg->fnIdx].inUse
484 && segnames[seg->fnIdx].fname[0] != 0)
485 name = segnames[seg->fnIdx].fname;
487 show_len_concisely(len_buf, seg->start, seg->end);
491 "%3d: %s %010llx-%010llx %s %c%c%c%c%c %s "
492 "d=0x%03llx i=%-7lld o=%-7lld (%d) m=%d %s\n",
493 segNo, show_SegKind(seg->kind),
494 (ULong)seg->start, (ULong)seg->end, len_buf,
495 seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
496 seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
497 seg->isCH ? 'H' : '-',
498 show_ShrinkMode(seg->smode),
499 seg->dev, seg->ino, seg->offset, seg->fnIdx,
505 /* Show an NSegment in a user-friendly-ish way. */
507 static void show_nsegment ( Int logLevel, Int segNo, NSegment* seg )
510 show_len_concisely(len_buf, seg->start, seg->end);
517 "%3d: %s %010llx-%010llx %s\n",
518 segNo, show_SegKind(seg->kind),
519 (ULong)seg->start, (ULong)seg->end, len_buf
523 case SkAnonC: case SkAnonV: case SkShmC:
526 "%3d: %s %010llx-%010llx %s %c%c%c%c%c\n",
527 segNo, show_SegKind(seg->kind),
528 (ULong)seg->start, (ULong)seg->end, len_buf,
529 seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
530 seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
531 seg->isCH ? 'H' : '-'
535 case SkFileC: case SkFileV:
538 "%3d: %s %010llx-%010llx %s %c%c%c%c%c d=0x%03llx "
539 "i=%-7lld o=%-7lld (%d)\n",
540 segNo, show_SegKind(seg->kind),
541 (ULong)seg->start, (ULong)seg->end, len_buf,
542 seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
543 seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
544 seg->isCH ? 'H' : '-',
545 seg->dev, seg->ino, seg->offset, seg->fnIdx
552 "%3d: %s %010llx-%010llx %s %c%c%c%c%c %s\n",
553 segNo, show_SegKind(seg->kind),
554 (ULong)seg->start, (ULong)seg->end, len_buf,
555 seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
556 seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
557 seg->isCH ? 'H' : '-',
558 show_ShrinkMode(seg->smode)
565 "%3d: ???? UNKNOWN SEGMENT KIND\n",
572 /* Print out the segment array (debugging only!). */
573 void VG_(am_show_nsegments) ( Int logLevel, HChar* who )
576 VG_(debugLog)(logLevel, "aspacem",
577 "<<< SHOW_SEGMENTS: %s (%d segments, %d segnames)\n",
578 who, nsegments_used, segnames_used);
579 for (i = 0; i < segnames_used; i++) {
580 if (!segnames[i].inUse)
582 VG_(debugLog)(logLevel, "aspacem",
583 "(%2d) %s\n", i, segnames[i].fname);
585 for (i = 0; i < nsegments_used; i++)
586 show_nsegment( logLevel, i, &nsegments[i] );
587 VG_(debugLog)(logLevel, "aspacem",
592 /* Get the filename corresponding to this segment, if known and if it
593 has one. The returned name's storage cannot be assumed to be
594 persistent, so the caller should immediately copy the name
596 HChar* VG_(am_get_filename)( NSegment const * seg )
601 if (i < 0 || i >= segnames_used || !segnames[i].inUse)
604 return &segnames[i].fname[0];
607 /* Collect up the start addresses of all non-free, non-resvn segments.
608 The interface is a bit strange in order to avoid potential
609 segment-creation races caused by dynamic allocation of the result
612 The function first computes how many entries in the result
613 buffer *starts will be needed. If this number <= nStarts,
614 they are placed in starts[0..], and the number is returned.
615 If nStarts is not large enough, nothing is written to
616 starts[0..], and the negation of the size is returned.
618 Correct use of this function may mean calling it multiple times in
619 order to establish a suitably-sized buffer. */
621 Int VG_(am_get_segment_starts)( Addr* starts, Int nStarts )
625 /* don't pass dumbass arguments */
626 aspacem_assert(nStarts >= 0);
629 for (i = 0; i < nsegments_used; i++) {
630 if (nsegments[i].kind == SkFree || nsegments[i].kind == SkResvn)
635 if (nSegs > nStarts) {
636 /* The buffer isn't big enough. Tell the caller how big it needs
641 /* There's enough space. So write into the result buffer. */
642 aspacem_assert(nSegs <= nStarts);
645 for (i = 0; i < nsegments_used; i++) {
646 if (nsegments[i].kind == SkFree || nsegments[i].kind == SkResvn)
648 starts[j] = nsegments[i].start;
652 aspacem_assert(j == nSegs); /* this should not fail */
657 /*-----------------------------------------------------------------*/
659 /*--- Sanity checking and preening of the segment array. ---*/
661 /*-----------------------------------------------------------------*/
663 /* Check representational invariants for NSegments. */
665 static Bool sane_NSegment ( NSegment* s )
667 if (s == NULL) return False;
669 /* No zero sized segments and no wraparounds. */
670 if (s->start >= s->end) return False;
672 /* .mark is used for admin purposes only. */
673 if (s->mark) return False;
675 /* require page alignment */
676 if (!VG_IS_PAGE_ALIGNED(s->start)) return False;
677 if (!VG_IS_PAGE_ALIGNED(s->end+1)) return False;
684 && s->dev == 0 && s->ino == 0 && s->offset == 0 && s->fnIdx == -1
685 && !s->hasR && !s->hasW && !s->hasX && !s->hasT
688 case SkAnonC: case SkAnonV: case SkShmC:
691 && s->dev == 0 && s->ino == 0 && s->offset == 0 && s->fnIdx == -1
692 && (s->kind==SkAnonC ? True : !s->isCH);
694 case SkFileC: case SkFileV:
697 && (s->fnIdx == -1 ||
698 (s->fnIdx >= 0 && s->fnIdx < segnames_used
699 && segnames[s->fnIdx].inUse))
704 s->dev == 0 && s->ino == 0 && s->offset == 0 && s->fnIdx == -1
705 && !s->hasR && !s->hasW && !s->hasX && !s->hasT
714 /* Try merging s2 into s1, if possible. If successful, s1 is
715 modified, and True is returned. Otherwise s1 is unchanged and
716 False is returned. */
718 static Bool maybe_merge_nsegments ( NSegment* s1, NSegment* s2 )
720 if (s1->kind != s2->kind)
723 if (s1->end+1 != s2->start)
726 /* reject cases which would cause wraparound */
727 if (s1->start > s2->end)
736 case SkAnonC: case SkAnonV:
737 if (s1->hasR == s2->hasR && s1->hasW == s2->hasW
738 && s1->hasX == s2->hasX && s1->isCH == s2->isCH) {
740 s1->hasT |= s2->hasT;
745 case SkFileC: case SkFileV:
746 if (s1->hasR == s2->hasR
747 && s1->hasW == s2->hasW && s1->hasX == s2->hasX
748 && s1->dev == s2->dev && s1->ino == s2->ino
749 && s2->offset == s1->offset
750 + ((ULong)s2->start) - ((ULong)s1->start) ) {
752 s1->hasT |= s2->hasT;
761 if (s1->smode == SmFixed && s2->smode == SmFixed) {
775 /* Sanity-check and canonicalise the segment array (merge mergable
776 segments). Returns True if any segments were merged. */
778 static Bool preen_nsegments ( void )
780 Int i, j, r, w, nsegments_used_old = nsegments_used;
782 /* Pass 1: check the segment array covers the entire address space
783 exactly once, and also that each segment is sane. */
784 aspacem_assert(nsegments_used > 0);
785 aspacem_assert(nsegments[0].start == Addr_MIN);
786 aspacem_assert(nsegments[nsegments_used-1].end == Addr_MAX);
788 aspacem_assert(sane_NSegment(&nsegments[0]));
789 for (i = 1; i < nsegments_used; i++) {
790 aspacem_assert(sane_NSegment(&nsegments[i]));
791 aspacem_assert(nsegments[i-1].end+1 == nsegments[i].start);
794 /* Pass 2: merge as much as possible, using
795 maybe_merge_segments. */
797 for (r = 1; r < nsegments_used; r++) {
798 if (maybe_merge_nsegments(&nsegments[w], &nsegments[r])) {
803 nsegments[w] = nsegments[r];
807 aspacem_assert(w > 0 && w <= nsegments_used);
810 /* Pass 3: free up unused string table slots */
811 /* clear mark bits */
812 for (i = 0; i < segnames_used; i++)
813 segnames[i].mark = False;
815 for (i = 0; i < nsegments_used; i++) {
816 j = nsegments[i].fnIdx;
817 aspacem_assert(j >= -1 && j < segnames_used);
819 aspacem_assert(segnames[j].inUse);
820 segnames[j].mark = True;
824 for (i = 0; i < segnames_used; i++) {
825 if (segnames[i].mark == False) {
826 segnames[i].inUse = False;
827 segnames[i].fname[0] = 0;
831 return nsegments_used != nsegments_used_old;
835 /* Check the segment array corresponds with the kernel's view of
836 memory layout. sync_check_ok returns True if no anomalies were
837 found, else False. In the latter case the mismatching segments are
840 The general idea is: we get the kernel to show us all its segments
841 and also the gaps in between. For each such interval, try and find
842 a sequence of appropriate intervals in our segment array which
843 cover or more than cover the kernel's interval, and which all have
844 suitable kinds/permissions etc.
846 Although any specific kernel interval is not matched exactly to a
847 valgrind interval or sequence thereof, eventually any disagreement
848 on mapping boundaries will be detected. This is because, if for
849 example valgrind's intervals cover a greater range than the current
850 kernel interval, it must be the case that a neighbouring free-space
851 interval belonging to valgrind cannot cover the neighbouring
852 free-space interval belonging to the kernel. So the disagreement
855 In other words, we examine each kernel interval in turn, and check
856 we do not disagree over the range of that interval. Because all of
857 the address space is examined, any disagreements must eventually be
861 static Bool sync_check_ok = False;
863 static void sync_check_mapping_callback ( Addr addr, SizeT len, UInt prot,
864 ULong dev, ULong ino, Off64T offset,
865 const UChar* filename )
870 /* If a problem has already been detected, don't continue comparing
871 segments, so as to avoid flooding the output with error
873 #if !defined(VGO_darwin)
881 /* The kernel should not give us wraparounds. */
882 aspacem_assert(addr <= addr + len - 1);
884 iLo = find_nsegment_idx( addr );
885 iHi = find_nsegment_idx( addr + len - 1 );
887 /* These 5 should be guaranteed by find_nsegment_idx. */
888 aspacem_assert(0 <= iLo && iLo < nsegments_used);
889 aspacem_assert(0 <= iHi && iHi < nsegments_used);
890 aspacem_assert(iLo <= iHi);
891 aspacem_assert(nsegments[iLo].start <= addr );
892 aspacem_assert(nsegments[iHi].end >= addr + len - 1 );
894 /* x86 doesn't differentiate 'x' and 'r' (at least, all except the
895 most recent NX-bit enabled CPUs) and so recent kernels attempt
896 to provide execute protection by placing all executable mappings
897 low down in the address space and then reducing the size of the
898 code segment to prevent code at higher addresses being executed.
900 These kernels report which mappings are really executable in
901 the /proc/self/maps output rather than mirroring what was asked
902 for when each mapping was created. In order to cope with this we
903 have a sloppyXcheck mode which we enable on x86 and s390 - in this
904 mode we allow the kernel to report execute permission when we weren't
905 expecting it but not vice versa. */
906 # if defined(VGA_x86) || defined (VGA_s390x)
909 sloppyXcheck = False;
912 /* NSegments iLo .. iHi inclusive should agree with the presented
914 for (i = iLo; i <= iHi; i++) {
916 Bool same, cmp_offsets, cmp_devino;
919 /* compare the kernel's offering against ours. */
920 same = nsegments[i].kind == SkAnonC
921 || nsegments[i].kind == SkAnonV
922 || nsegments[i].kind == SkFileC
923 || nsegments[i].kind == SkFileV
924 || nsegments[i].kind == SkShmC;
927 if (nsegments[i].hasR) seg_prot |= VKI_PROT_READ;
928 if (nsegments[i].hasW) seg_prot |= VKI_PROT_WRITE;
929 if (nsegments[i].hasX) seg_prot |= VKI_PROT_EXEC;
932 = nsegments[i].kind == SkFileC || nsegments[i].kind == SkFileV;
935 = nsegments[i].dev != 0 || nsegments[i].ino != 0;
937 /* Consider other reasons to not compare dev/inode */
938 #if defined(VGO_linux)
939 /* bproc does some godawful hack on /dev/zero at process
940 migration, which changes the name of it, and its dev & ino */
941 if (filename && 0==VG_(strcmp)(filename, "/dev/zero (deleted)"))
944 /* hack apparently needed on MontaVista Linux */
945 if (filename && VG_(strstr)(filename, "/.lib-ro/"))
949 #if defined(VGO_darwin)
950 // GrP fixme kernel info doesn't have dev/inode
953 // GrP fixme V and kernel don't agree on offsets
957 /* If we are doing sloppy execute permission checks then we
958 allow segment to have X permission when we weren't expecting
959 it (but not vice versa) so if the kernel reported execute
960 permission then pretend that this segment has it regardless
961 of what we were expecting. */
962 if (sloppyXcheck && (prot & VKI_PROT_EXEC) != 0) {
963 seg_prot |= VKI_PROT_EXEC;
969 ? (nsegments[i].dev == dev && nsegments[i].ino == ino)
972 ? nsegments[i].start-nsegments[i].offset == addr-offset
976 Addr end = start + len - 1;
978 show_len_concisely(len_buf, start, end);
980 sync_check_ok = False;
984 "segment mismatch: V's seg 1st, kernel's 2nd:\n");
985 show_nsegment_full( 0, i, &nsegments[i] );
986 VG_(debugLog)(0,"aspacem",
987 "...: .... %010llx-%010llx %s %c%c%c.. ....... "
988 "d=0x%03llx i=%-7lld o=%-7lld (.) m=. %s\n",
989 (ULong)start, (ULong)end, len_buf,
990 prot & VKI_PROT_READ ? 'r' : '-',
991 prot & VKI_PROT_WRITE ? 'w' : '-',
992 prot & VKI_PROT_EXEC ? 'x' : '-',
993 dev, ino, offset, filename ? (HChar*)filename : "(none)" );
999 /* Looks harmless. Keep going. */
1003 static void sync_check_gap_callback ( Addr addr, SizeT len )
1007 /* If a problem has already been detected, don't continue comparing
1008 segments, so as to avoid flooding the output with error
1010 #if !defined(VGO_darwin)
1018 /* The kernel should not give us wraparounds. */
1019 aspacem_assert(addr <= addr + len - 1);
1021 iLo = find_nsegment_idx( addr );
1022 iHi = find_nsegment_idx( addr + len - 1 );
1024 /* These 5 should be guaranteed by find_nsegment_idx. */
1025 aspacem_assert(0 <= iLo && iLo < nsegments_used);
1026 aspacem_assert(0 <= iHi && iHi < nsegments_used);
1027 aspacem_assert(iLo <= iHi);
1028 aspacem_assert(nsegments[iLo].start <= addr );
1029 aspacem_assert(nsegments[iHi].end >= addr + len - 1 );
1031 /* NSegments iLo .. iHi inclusive should agree with the presented
1033 for (i = iLo; i <= iHi; i++) {
1037 /* compare the kernel's offering against ours. */
1038 same = nsegments[i].kind == SkFree
1039 || nsegments[i].kind == SkResvn;
1043 Addr end = start + len - 1;
1045 show_len_concisely(len_buf, start, end);
1047 sync_check_ok = False;
1051 "segment mismatch: V's gap 1st, kernel's 2nd:\n");
1052 show_nsegment_full( 0, i, &nsegments[i] );
1053 VG_(debugLog)(0,"aspacem",
1054 " : .... %010llx-%010llx %s",
1055 (ULong)start, (ULong)end, len_buf);
1060 /* Looks harmless. Keep going. */
1065 /* Sanity check: check that Valgrind and the kernel agree on the
1066 address space layout. Prints offending segments and call point if
1067 a discrepancy is detected, but does not abort the system. Returned
1068 Bool is False if a discrepancy was found. */
1070 Bool VG_(am_do_sync_check) ( const HChar* fn,
1071 const HChar* file, Int line )
1073 sync_check_ok = True;
1075 VG_(debugLog)(0,"aspacem", "do_sync_check %s:%d\n", file,line);
1076 parse_procselfmaps( sync_check_mapping_callback,
1077 sync_check_gap_callback );
1078 if (!sync_check_ok) {
1079 VG_(debugLog)(0,"aspacem",
1080 "sync check at %s:%d (%s): FAILED\n",
1082 VG_(debugLog)(0,"aspacem", "\n");
1087 VG_(am_show_nsegments)(0,"post syncheck failure");
1088 VG_(sprintf)(buf, "/bin/cat /proc/%d/maps", VG_(getpid)());
1094 return sync_check_ok;
1097 /* Hook to allow sanity checks to be done from aspacemgr-common.c. */
1098 void ML_(am_do_sanity_check)( void )
1104 /*-----------------------------------------------------------------*/
1106 /*--- Low level access / modification of the segment array. ---*/
1108 /*-----------------------------------------------------------------*/
1110 /* Binary search the interval array for a given address. Since the
1111 array covers the entire address space the search cannot fail. The
1112 _WRK function does the real work. Its caller (just below) caches
1113 the results thereof, to save time. With N_CACHE of 63 we get a hit
1114 rate exceeding 90% when running OpenOffice.
1116 Re ">> 12", it doesn't matter that the page size of some targets
1117 might be different from 12. Really "(a >> 12) % N_CACHE" is merely
1118 a hash function, and the actual cache entry is always validated
1119 correctly against the selected cache entry before use.
1121 /* Don't call find_nsegment_idx_WRK; use find_nsegment_idx instead. */
1122 __attribute__((noinline))
1123 static Int find_nsegment_idx_WRK ( Addr a )
1125 Addr a_mid_lo, a_mid_hi;
1128 hi = nsegments_used-1;
1130 /* current unsearched space is from lo to hi, inclusive. */
1132 /* Not found. This can't happen. */
1133 ML_(am_barf)("find_nsegment_idx: not found");
1135 mid = (lo + hi) / 2;
1136 a_mid_lo = nsegments[mid].start;
1137 a_mid_hi = nsegments[mid].end;
1139 if (a < a_mid_lo) { hi = mid-1; continue; }
1140 if (a > a_mid_hi) { lo = mid+1; continue; }
1141 aspacem_assert(a >= a_mid_lo && a <= a_mid_hi);
1142 aspacem_assert(0 <= mid && mid < nsegments_used);
1147 inline static Int find_nsegment_idx ( Addr a )
1150 static Addr cache_pageno[N_CACHE];
1151 static Int cache_segidx[N_CACHE];
1152 static Bool cache_inited = False;
1154 static UWord n_q = 0;
1155 static UWord n_m = 0;
1159 if (LIKELY(cache_inited)) {
1162 for (ix = 0; ix < N_CACHE; ix++) {
1163 cache_pageno[ix] = 0;
1164 cache_segidx[ix] = -1;
1166 cache_inited = True;
1169 ix = (a >> 12) % N_CACHE;
1172 if (0 && 0 == (n_q & 0xFFFF))
1173 VG_(debugLog)(0,"xxx","find_nsegment_idx: %lu %lu\n", n_q, n_m);
1175 if ((a >> 12) == cache_pageno[ix]
1176 && cache_segidx[ix] >= 0
1177 && cache_segidx[ix] < nsegments_used
1178 && nsegments[cache_segidx[ix]].start <= a
1179 && a <= nsegments[cache_segidx[ix]].end) {
1181 /* aspacem_assert( cache_segidx[ix] == find_nsegment_idx_WRK(a) ); */
1182 return cache_segidx[ix];
1186 cache_segidx[ix] = find_nsegment_idx_WRK(a);
1187 cache_pageno[ix] = a >> 12;
1188 return cache_segidx[ix];
1194 /* Finds the segment containing 'a'. Only returns file/anon/resvn
1195 segments. This returns a 'NSegment const *' - a pointer to
1197 NSegment const * VG_(am_find_nsegment) ( Addr a )
1199 Int i = find_nsegment_idx(a);
1200 aspacem_assert(i >= 0 && i < nsegments_used);
1201 aspacem_assert(nsegments[i].start <= a);
1202 aspacem_assert(a <= nsegments[i].end);
1203 if (nsegments[i].kind == SkFree)
1206 return &nsegments[i];
1210 /* Given a pointer to a seg, tries to figure out which one it is in
1211 nsegments[..]. Very paranoid. */
1212 static Int segAddr_to_index ( NSegment* seg )
1215 if (seg < &nsegments[0] || seg >= &nsegments[nsegments_used])
1217 i = ((UChar*)seg - (UChar*)(&nsegments[0])) / sizeof(NSegment);
1218 if (i < 0 || i >= nsegments_used)
1220 if (seg == &nsegments[i])
1226 /* Find the next segment along from 'here', if it is a file/anon/resvn
1228 NSegment const * VG_(am_next_nsegment) ( NSegment* here, Bool fwds )
1230 Int i = segAddr_to_index(here);
1231 if (i < 0 || i >= nsegments_used)
1235 if (i >= nsegments_used)
1242 switch (nsegments[i].kind) {
1243 case SkFileC: case SkFileV: case SkShmC:
1244 case SkAnonC: case SkAnonV: case SkResvn:
1245 return &nsegments[i];
1253 /* Trivial fn: return the total amount of space in anonymous mappings,
1254 both for V and the client. Is used for printing stats in
1255 out-of-memory messages. */
1256 ULong VG_(am_get_anonsize_total)( void )
1260 for (i = 0; i < nsegments_used; i++) {
1261 if (nsegments[i].kind == SkAnonC || nsegments[i].kind == SkAnonV) {
1262 total += (ULong)nsegments[i].end
1263 - (ULong)nsegments[i].start + 1ULL;
1270 /* Test if a piece of memory is addressable by the client with at
1271 least the "prot" protection permissions by examining the underlying
1272 segments. If freeOk is True then SkFree areas are also allowed.
1275 Bool is_valid_for_client( Addr start, SizeT len, UInt prot, Bool freeOk )
1278 Bool needR, needW, needX;
1281 return True; /* somewhat dubious case */
1282 if (start + len < start)
1283 return False; /* reject wraparounds */
1285 needR = toBool(prot & VKI_PROT_READ);
1286 needW = toBool(prot & VKI_PROT_WRITE);
1287 needX = toBool(prot & VKI_PROT_EXEC);
1289 iLo = find_nsegment_idx(start);
1290 aspacem_assert(start >= nsegments[iLo].start);
1292 if (start+len-1 <= nsegments[iLo].end) {
1293 /* This is a speedup hack which avoids calling find_nsegment_idx
1294 a second time when possible. It is always correct to just
1295 use the "else" clause below, but is_valid_for_client is
1296 called a lot by the leak checker, so avoiding pointless calls
1297 to find_nsegment_idx, which can be expensive, is helpful. */
1300 iHi = find_nsegment_idx(start + len - 1);
1303 for (i = iLo; i <= iHi; i++) {
1304 if ( (nsegments[i].kind == SkFileC
1305 || nsegments[i].kind == SkAnonC
1306 || nsegments[i].kind == SkShmC
1307 || (nsegments[i].kind == SkFree && freeOk)
1308 || (nsegments[i].kind == SkResvn && freeOk))
1309 && (needR ? nsegments[i].hasR : True)
1310 && (needW ? nsegments[i].hasW : True)
1311 && (needX ? nsegments[i].hasX : True) ) {
1320 /* Test if a piece of memory is addressable by the client with at
1321 least the "prot" protection permissions by examining the underlying
1323 Bool VG_(am_is_valid_for_client)( Addr start, SizeT len,
1326 return is_valid_for_client( start, len, prot, False/*free not OK*/ );
1329 /* Variant of VG_(am_is_valid_for_client) which allows free areas to
1330 be consider part of the client's addressable space. It also
1331 considers reservations to be allowable, since from the client's
1332 point of view they don't exist. */
1333 Bool VG_(am_is_valid_for_client_or_free_or_resvn)
1334 ( Addr start, SizeT len, UInt prot )
1336 return is_valid_for_client( start, len, prot, True/*free is OK*/ );
1340 /* Test if a piece of memory is addressable by valgrind with at least
1341 PROT_NONE protection permissions by examining the underlying
1343 static Bool is_valid_for_valgrind( Addr start, SizeT len )
1348 return True; /* somewhat dubious case */
1349 if (start + len < start)
1350 return False; /* reject wraparounds */
1352 iLo = find_nsegment_idx(start);
1353 iHi = find_nsegment_idx(start + len - 1);
1354 for (i = iLo; i <= iHi; i++) {
1355 if (nsegments[i].kind == SkFileV || nsegments[i].kind == SkAnonV) {
1365 /* Returns True if any part of the address range is marked as having
1366 translations made from it. This is used to determine when to
1367 discard code, so if in doubt return True. */
1369 static Bool any_Ts_in_range ( Addr start, SizeT len )
1372 aspacem_assert(len > 0);
1373 aspacem_assert(start + len > start);
1374 iLo = find_nsegment_idx(start);
1375 iHi = find_nsegment_idx(start + len - 1);
1376 for (i = iLo; i <= iHi; i++) {
1377 if (nsegments[i].hasT)
1384 /*-----------------------------------------------------------------*/
1386 /*--- Modifying the segment array, and constructing segments. ---*/
1388 /*-----------------------------------------------------------------*/
1390 /* Split the segment containing 'a' into two, so that 'a' is
1391 guaranteed to be the start of a new segment. If 'a' is already the
1392 start of a segment, do nothing. */
1394 static void split_nsegment_at ( Addr a )
1398 aspacem_assert(a > 0);
1399 aspacem_assert(VG_IS_PAGE_ALIGNED(a));
1401 i = find_nsegment_idx(a);
1402 aspacem_assert(i >= 0 && i < nsegments_used);
1404 if (nsegments[i].start == a)
1405 /* 'a' is already the start point of a segment, so nothing to be
1409 /* else we have to slide the segments upwards to make a hole */
1410 if (nsegments_used >= VG_N_SEGMENTS)
1411 ML_(am_barf_toolow)("VG_N_SEGMENTS");
1412 for (j = nsegments_used-1; j > i; j--)
1413 nsegments[j+1] = nsegments[j];
1416 nsegments[i+1] = nsegments[i];
1417 nsegments[i+1].start = a;
1418 nsegments[i].end = a-1;
1420 if (nsegments[i].kind == SkFileV || nsegments[i].kind == SkFileC)
1421 nsegments[i+1].offset
1422 += ((ULong)nsegments[i+1].start) - ((ULong)nsegments[i].start);
1424 aspacem_assert(sane_NSegment(&nsegments[i]));
1425 aspacem_assert(sane_NSegment(&nsegments[i+1]));
1429 /* Do the minimum amount of segment splitting necessary to ensure that
1430 sLo is the first address denoted by some segment and sHi is the
1431 highest address denoted by some other segment. Returns the indices
1432 of the lowest and highest segments in the range. */
1435 void split_nsegments_lo_and_hi ( Addr sLo, Addr sHi,
1439 aspacem_assert(sLo < sHi);
1440 aspacem_assert(VG_IS_PAGE_ALIGNED(sLo));
1441 aspacem_assert(VG_IS_PAGE_ALIGNED(sHi+1));
1444 split_nsegment_at(sLo);
1446 split_nsegment_at(sHi+1);
1448 *iLo = find_nsegment_idx(sLo);
1449 *iHi = find_nsegment_idx(sHi);
1450 aspacem_assert(0 <= *iLo && *iLo < nsegments_used);
1451 aspacem_assert(0 <= *iHi && *iHi < nsegments_used);
1452 aspacem_assert(*iLo <= *iHi);
1453 aspacem_assert(nsegments[*iLo].start == sLo);
1454 aspacem_assert(nsegments[*iHi].end == sHi);
1455 /* Not that I'm overly paranoid or anything, definitely not :-) */
1459 /* Add SEG to the collection, deleting/truncating any it overlaps.
1460 This deals with all the tricky cases of splitting up segments as
1463 static void add_segment ( NSegment* seg )
1465 Int i, iLo, iHi, delta;
1466 Bool segment_is_sane;
1468 Addr sStart = seg->start;
1469 Addr sEnd = seg->end;
1471 aspacem_assert(sStart <= sEnd);
1472 aspacem_assert(VG_IS_PAGE_ALIGNED(sStart));
1473 aspacem_assert(VG_IS_PAGE_ALIGNED(sEnd+1));
1475 segment_is_sane = sane_NSegment(seg);
1476 if (!segment_is_sane) show_nsegment_full(0,-1,seg);
1477 aspacem_assert(segment_is_sane);
1479 split_nsegments_lo_and_hi( sStart, sEnd, &iLo, &iHi );
1481 /* Now iLo .. iHi inclusive is the range of segment indices which
1482 seg will replace. If we're replacing more than one segment,
1483 slide those above the range down to fill the hole. */
1485 aspacem_assert(delta >= 0);
1487 for (i = iLo; i < nsegments_used-delta; i++)
1488 nsegments[i] = nsegments[i+delta];
1489 nsegments_used -= delta;
1492 nsegments[iLo] = *seg;
1494 (void)preen_nsegments();
1495 if (0) VG_(am_show_nsegments)(0,"AFTER preen (add_segment)");
1499 /* Clear out an NSegment record. */
1501 static void init_nsegment ( /*OUT*/NSegment* seg )
1506 seg->smode = SmFixed;
1512 seg->hasR = seg->hasW = seg->hasX = seg->hasT = seg->isCH = False;
1516 /* Make an NSegment which holds a reservation. */
1518 static void init_resvn ( /*OUT*/NSegment* seg, Addr start, Addr end )
1520 aspacem_assert(start < end);
1521 aspacem_assert(VG_IS_PAGE_ALIGNED(start));
1522 aspacem_assert(VG_IS_PAGE_ALIGNED(end+1));
1524 seg->kind = SkResvn;
1530 /*-----------------------------------------------------------------*/
1532 /*--- Startup, including reading /proc/self/maps. ---*/
1534 /*-----------------------------------------------------------------*/
1536 static void read_maps_callback ( Addr addr, SizeT len, UInt prot,
1537 ULong dev, ULong ino, Off64T offset,
1538 const UChar* filename )
1541 init_nsegment( &seg );
1543 seg.end = addr+len-1;
1546 seg.offset = offset;
1547 seg.hasR = toBool(prot & VKI_PROT_READ);
1548 seg.hasW = toBool(prot & VKI_PROT_WRITE);
1549 seg.hasX = toBool(prot & VKI_PROT_EXEC);
1552 /* Don't use the presence of a filename to decide if a segment in
1553 the initial /proc/self/maps to decide if the segment is an AnonV
1554 or FileV segment as some systems don't report the filename. Use
1555 the device and inode numbers instead. Fixes bug #124528. */
1557 if (dev != 0 && ino != 0)
1560 # if defined(VGO_darwin)
1561 // GrP fixme no dev/ino on darwin
1564 # endif // defined(VGO_darwin)
1566 # if defined(VGP_arm_linux)
1567 /* The standard handling of entries read from /proc/self/maps will
1568 cause the faked up commpage segment to have type SkAnonV, which
1569 is a problem because it contains code we want the client to
1570 execute, and so later m_translate will segfault the client when
1571 it tries to go in there. Hence change the ownership of it here
1572 to the client (SkAnonC). The least-worst kludge I could think
1574 if (addr == ARM_LINUX_FAKE_COMMPAGE_START
1575 && addr + len == ARM_LINUX_FAKE_COMMPAGE_END1
1576 && seg.kind == SkAnonV)
1578 # endif // defined(VGP_arm_linux)
1581 seg.fnIdx = allocate_segname( filename );
1583 if (0) show_nsegment( 2,0, &seg );
1584 add_segment( &seg );
1587 /* Initialise the address space manager, setting up the initial
1588 segment list, and reading /proc/self/maps into it. This must
1589 be called before any other function.
1591 Takes a pointer to the SP at the time V gained control. This is
1592 taken to be the highest usable address (more or less). Based on
1593 that (and general consultation of tea leaves, etc) return a
1594 suggested end address for the client's stack. */
1596 Addr VG_(am_startup) ( Addr sp_at_startup )
1599 Addr suggested_clstack_top;
1601 aspacem_assert(sizeof(Word) == sizeof(void*));
1602 aspacem_assert(sizeof(Addr) == sizeof(void*));
1603 aspacem_assert(sizeof(SizeT) == sizeof(void*));
1604 aspacem_assert(sizeof(SSizeT) == sizeof(void*));
1606 /* Check that we can store the largest imaginable dev, ino and
1607 offset numbers in an NSegment. */
1608 aspacem_assert(sizeof(seg.dev) == 8);
1609 aspacem_assert(sizeof(seg.ino) == 8);
1610 aspacem_assert(sizeof(seg.offset) == 8);
1611 aspacem_assert(sizeof(seg.mode) == 4);
1613 /* Add a single interval covering the entire address space. */
1614 init_nsegment(&seg);
1616 seg.start = Addr_MIN;
1621 #if defined(VGO_darwin)
1623 # if VG_WORDSIZE == 4
1624 aspacem_minAddr = (Addr) 0x00001000;
1625 aspacem_maxAddr = (Addr) 0xffffffff;
1627 aspacem_cStart = aspacem_minAddr;
1628 aspacem_vStart = 0xf0000000; // 0xc0000000..0xf0000000 available
1630 aspacem_minAddr = (Addr) 0x100000000; // 4GB page zero
1631 aspacem_maxAddr = (Addr) 0x7fffffffffff;
1633 aspacem_cStart = aspacem_minAddr;
1634 aspacem_vStart = 0x700000000000; // 0x7000:00000000..0x7fff:5c000000 avail
1635 // 0x7fff:5c000000..0x7fff:ffe00000? is stack, dyld, shared cache
1638 suggested_clstack_top = -1; // ignored; Mach-O specifies its stack
1642 /* Establish address limits and block out unusable parts
1645 VG_(debugLog)(2, "aspacem",
1646 " sp_at_startup = 0x%010llx (supplied)\n",
1647 (ULong)sp_at_startup );
1649 aspacem_minAddr = (Addr) 0x04000000; // 64M
1651 # if VG_WORDSIZE == 8
1652 aspacem_maxAddr = (Addr)0x800000000 - 1; // 32G
1653 # ifdef ENABLE_INNER
1654 { Addr cse = VG_PGROUNDDN( sp_at_startup ) - 1;
1655 if (aspacem_maxAddr > cse)
1656 aspacem_maxAddr = cse;
1660 aspacem_maxAddr = VG_PGROUNDDN( sp_at_startup ) - 1;
1663 aspacem_cStart = aspacem_minAddr; // 64M
1664 aspacem_vStart = VG_PGROUNDUP((aspacem_minAddr + aspacem_maxAddr + 1) / 2);
1665 # ifdef ENABLE_INNER
1666 aspacem_vStart -= 0x10000000; // 256M
1669 suggested_clstack_top = aspacem_maxAddr - 16*1024*1024ULL
1674 aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_minAddr));
1675 aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_maxAddr + 1));
1676 aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_cStart));
1677 aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_vStart));
1678 aspacem_assert(VG_IS_PAGE_ALIGNED(suggested_clstack_top + 1));
1680 VG_(debugLog)(2, "aspacem",
1681 " minAddr = 0x%010llx (computed)\n",
1682 (ULong)aspacem_minAddr);
1683 VG_(debugLog)(2, "aspacem",
1684 " maxAddr = 0x%010llx (computed)\n",
1685 (ULong)aspacem_maxAddr);
1686 VG_(debugLog)(2, "aspacem",
1687 " cStart = 0x%010llx (computed)\n",
1688 (ULong)aspacem_cStart);
1689 VG_(debugLog)(2, "aspacem",
1690 " vStart = 0x%010llx (computed)\n",
1691 (ULong)aspacem_vStart);
1692 VG_(debugLog)(2, "aspacem",
1693 "suggested_clstack_top = 0x%010llx (computed)\n",
1694 (ULong)suggested_clstack_top);
1696 if (aspacem_cStart > Addr_MIN) {
1697 init_resvn(&seg, Addr_MIN, aspacem_cStart-1);
1700 if (aspacem_maxAddr < Addr_MAX) {
1701 init_resvn(&seg, aspacem_maxAddr+1, Addr_MAX);
1705 /* Create a 1-page reservation at the notional initial
1706 client/valgrind boundary. This isn't strictly necessary, but
1707 because the advisor does first-fit and starts searches for
1708 valgrind allocations at the boundary, this is kind of necessary
1709 in order to get it to start allocating in the right place. */
1710 init_resvn(&seg, aspacem_vStart, aspacem_vStart + VKI_PAGE_SIZE - 1);
1713 VG_(am_show_nsegments)(2, "Initial layout");
1715 VG_(debugLog)(2, "aspacem", "Reading /proc/self/maps\n");
1716 parse_procselfmaps( read_maps_callback, NULL );
1717 /* NB: on arm-linux, parse_procselfmaps automagically kludges up
1718 (iow, hands to its callbacks) a description of the ARM Commpage,
1719 since that's not listed in /proc/self/maps (kernel bug IMO). We
1720 have to fake up its existence in parse_procselfmaps and not
1721 merely add it here as an extra segment, because doing the latter
1722 causes sync checking to fail: we see we have an extra segment in
1723 the segments array, which isn't listed in /proc/self/maps.
1724 Hence we must make it appear that /proc/self/maps contained this
1725 segment all along. Sigh. */
1727 VG_(am_show_nsegments)(2, "With contents of /proc/self/maps");
1730 return suggested_clstack_top;
1734 /*-----------------------------------------------------------------*/
1736 /*--- The core query-notify mechanism. ---*/
1738 /*-----------------------------------------------------------------*/
1740 /* Query aspacem to ask where a mapping should go. */
1742 Addr VG_(am_get_advisory) ( MapRequest* req,
1746 /* This function implements allocation policy.
1748 The nature of the allocation request is determined by req, which
1749 specifies the start and length of the request and indicates
1750 whether the start address is mandatory, a hint, or irrelevant,
1751 and by forClient, which says whether this is for the client or
1754 Return values: the request can be vetoed (*ok is set to False),
1755 in which case the caller should not attempt to proceed with
1756 making the mapping. Otherwise, *ok is set to True, the caller
1757 may proceed, and the preferred address at which the mapping
1758 should happen is returned.
1760 Note that this is an advisory system only: the kernel can in
1761 fact do whatever it likes as far as placement goes, and we have
1762 no absolute control over it.
1764 Allocations will never be granted in a reserved area.
1766 The Default Policy is:
1768 Search the address space for two free intervals: one of them
1769 big enough to contain the request without regard to the
1770 specified address (viz, as if it was a floating request) and
1771 the other being able to contain the request at the specified
1772 address (viz, as if were a fixed request). Then, depending on
1773 the outcome of the search and the kind of request made, decide
1774 whether the request is allowable and what address to advise.
1776 The Default Policy is overriden by Policy Exception #1:
1778 If the request is for a fixed client map, we are prepared to
1779 grant it providing all areas inside the request are either
1780 free, reservations, or mappings belonging to the client. In
1781 other words we are prepared to let the client trash its own
1782 mappings if it wants to.
1784 The Default Policy is overriden by Policy Exception #2:
1786 If the request is for a hinted client map, we are prepared to
1787 grant it providing all areas inside the request are either
1788 free or reservations. In other words we are prepared to let
1789 the client have a hinted mapping anywhere it likes provided
1790 it does not trash either any of its own mappings or any of
1791 valgrind's mappings.
1794 Addr holeStart, holeEnd, holeLen;
1795 Bool fixed_not_required;
1797 Addr startPoint = forClient ? aspacem_cStart : aspacem_vStart;
1799 Addr reqStart = req->rkind==MAny ? 0 : req->start;
1800 Addr reqEnd = reqStart + req->len - 1;
1801 Addr reqLen = req->len;
1803 /* These hold indices for segments found during search, or -1 if not
1808 aspacem_assert(nsegments_used > 0);
1811 VG_(am_show_nsegments)(0,"getAdvisory");
1812 VG_(debugLog)(0,"aspacem", "getAdvisory 0x%llx %lld\n",
1813 (ULong)req->start, (ULong)req->len);
1816 /* Reject zero-length requests */
1817 if (req->len == 0) {
1822 /* Reject wraparounds */
1823 if ((req->rkind==MFixed || req->rkind==MHint)
1824 && req->start + req->len < req->start) {
1829 /* ------ Implement Policy Exception #1 ------ */
1831 if (forClient && req->rkind == MFixed) {
1832 Int iLo = find_nsegment_idx(reqStart);
1833 Int iHi = find_nsegment_idx(reqEnd);
1835 for (i = iLo; i <= iHi; i++) {
1836 if (nsegments[i].kind == SkFree
1837 || nsegments[i].kind == SkFileC
1838 || nsegments[i].kind == SkAnonC
1839 || nsegments[i].kind == SkShmC
1840 || nsegments[i].kind == SkResvn) {
1848 /* Acceptable. Granted. */
1852 /* Not acceptable. Fail. */
1857 /* ------ Implement Policy Exception #2 ------ */
1859 if (forClient && req->rkind == MHint) {
1860 Int iLo = find_nsegment_idx(reqStart);
1861 Int iHi = find_nsegment_idx(reqEnd);
1863 for (i = iLo; i <= iHi; i++) {
1864 if (nsegments[i].kind == SkFree
1865 || nsegments[i].kind == SkResvn) {
1873 /* Acceptable. Granted. */
1877 /* Not acceptable. Fall through to the default policy. */
1880 /* ------ Implement the Default Policy ------ */
1882 /* Don't waste time looking for a fixed match if not requested to. */
1883 fixed_not_required = req->rkind == MAny;
1885 i = find_nsegment_idx(startPoint);
1887 /* Examine holes from index i back round to i-1. Record the
1888 index first fixed hole and the first floating hole which would
1889 satisfy the request. */
1890 for (j = 0; j < nsegments_used; j++) {
1892 if (nsegments[i].kind != SkFree) {
1894 if (i >= nsegments_used) i = 0;
1898 holeStart = nsegments[i].start;
1899 holeEnd = nsegments[i].end;
1902 aspacem_assert(holeStart <= holeEnd);
1903 aspacem_assert(aspacem_minAddr <= holeStart);
1904 aspacem_assert(holeEnd <= aspacem_maxAddr);
1906 /* See if it's any use to us. */
1907 holeLen = holeEnd - holeStart + 1;
1909 if (fixedIdx == -1 && holeStart <= reqStart && reqEnd <= holeEnd)
1912 if (floatIdx == -1 && holeLen >= reqLen)
1915 /* Don't waste time searching once we've found what we wanted. */
1916 if ((fixed_not_required || fixedIdx >= 0) && floatIdx >= 0)
1920 if (i >= nsegments_used) i = 0;
1923 aspacem_assert(fixedIdx >= -1 && fixedIdx < nsegments_used);
1925 aspacem_assert(nsegments[fixedIdx].kind == SkFree);
1927 aspacem_assert(floatIdx >= -1 && floatIdx < nsegments_used);
1929 aspacem_assert(nsegments[floatIdx].kind == SkFree);
1933 /* Now see if we found anything which can satisfy the request. */
1934 switch (req->rkind) {
1936 if (fixedIdx >= 0) {
1945 if (fixedIdx >= 0) {
1949 if (floatIdx >= 0) {
1951 return nsegments[floatIdx].start;
1956 if (floatIdx >= 0) {
1958 return nsegments[floatIdx].start;
1967 ML_(am_barf)("getAdvisory: unknown request kind");
1972 /* Convenience wrapper for VG_(am_get_advisory) for client floating or
1973 fixed requests. If start is zero, a floating request is issued; if
1974 nonzero, a fixed request at that address is issued. Same comments
1975 about return values apply. */
1977 Addr VG_(am_get_advisory_client_simple) ( Addr start, SizeT len,
1981 mreq.rkind = start==0 ? MAny : MFixed;
1984 return VG_(am_get_advisory)( &mreq, True/*client*/, ok );
1988 /* Notifies aspacem that the client completed an mmap successfully.
1989 The segment array is updated accordingly. If the returned Bool is
1990 True, the caller should immediately discard translations from the
1991 specified address range. */
1994 VG_(am_notify_client_mmap)( Addr a, SizeT len, UInt prot, UInt flags,
1995 Int fd, Off64T offset )
1997 HChar buf[VKI_PATH_MAX];
2003 aspacem_assert(len > 0);
2004 aspacem_assert(VG_IS_PAGE_ALIGNED(a));
2005 aspacem_assert(VG_IS_PAGE_ALIGNED(len));
2006 aspacem_assert(VG_IS_PAGE_ALIGNED(offset));
2008 /* Discard is needed if any of the just-trashed range had T. */
2009 needDiscard = any_Ts_in_range( a, len );
2011 init_nsegment( &seg );
2012 seg.kind = (flags & VKI_MAP_ANONYMOUS) ? SkAnonC : SkFileC;
2014 seg.end = a + len - 1;
2015 seg.hasR = toBool(prot & VKI_PROT_READ);
2016 seg.hasW = toBool(prot & VKI_PROT_WRITE);
2017 seg.hasX = toBool(prot & VKI_PROT_EXEC);
2018 if (!(flags & VKI_MAP_ANONYMOUS)) {
2019 // Nb: We ignore offset requests in anonymous mmaps (see bug #126722)
2020 seg.offset = offset;
2021 if (ML_(am_get_fd_d_i_m)(fd, &dev, &ino, &mode)) {
2026 if (ML_(am_resolve_filename)(fd, buf, VKI_PATH_MAX)) {
2027 seg.fnIdx = allocate_segname( buf );
2030 add_segment( &seg );
2035 /* Notifies aspacem that the client completed a shmat successfully.
2036 The segment array is updated accordingly. If the returned Bool is
2037 True, the caller should immediately discard translations from the
2038 specified address range. */
2041 VG_(am_notify_client_shmat)( Addr a, SizeT len, UInt prot )
2046 aspacem_assert(len > 0);
2047 aspacem_assert(VG_IS_PAGE_ALIGNED(a));
2048 aspacem_assert(VG_IS_PAGE_ALIGNED(len));
2050 /* Discard is needed if any of the just-trashed range had T. */
2051 needDiscard = any_Ts_in_range( a, len );
2053 init_nsegment( &seg );
2056 seg.end = a + len - 1;
2058 seg.hasR = toBool(prot & VKI_PROT_READ);
2059 seg.hasW = toBool(prot & VKI_PROT_WRITE);
2060 seg.hasX = toBool(prot & VKI_PROT_EXEC);
2061 add_segment( &seg );
2066 /* Notifies aspacem that an mprotect was completed successfully. The
2067 segment array is updated accordingly. Note, as with
2068 VG_(am_notify_munmap), it is not the job of this function to reject
2069 stupid mprotects, for example the client doing mprotect of
2070 non-client areas. Such requests should be intercepted earlier, by
2071 the syscall wrapper for mprotect. This function merely records
2072 whatever it is told. If the returned Bool is True, the caller
2073 should immediately discard translations from the specified address
2076 Bool VG_(am_notify_mprotect)( Addr start, SizeT len, UInt prot )
2079 Bool newR, newW, newX, needDiscard;
2081 aspacem_assert(VG_IS_PAGE_ALIGNED(start));
2082 aspacem_assert(VG_IS_PAGE_ALIGNED(len));
2087 newR = toBool(prot & VKI_PROT_READ);
2088 newW = toBool(prot & VKI_PROT_WRITE);
2089 newX = toBool(prot & VKI_PROT_EXEC);
2091 /* Discard is needed if we're dumping X permission */
2092 needDiscard = any_Ts_in_range( start, len ) && !newX;
2094 split_nsegments_lo_and_hi( start, start+len-1, &iLo, &iHi );
2096 iLo = find_nsegment_idx(start);
2097 iHi = find_nsegment_idx(start + len - 1);
2099 for (i = iLo; i <= iHi; i++) {
2100 /* Apply the permissions to all relevant segments. */
2101 switch (nsegments[i].kind) {
2102 case SkAnonC: case SkAnonV: case SkFileC: case SkFileV: case SkShmC:
2103 nsegments[i].hasR = newR;
2104 nsegments[i].hasW = newW;
2105 nsegments[i].hasX = newX;
2106 aspacem_assert(sane_NSegment(&nsegments[i]));
2113 /* Changing permissions could have made previously un-mergable
2114 segments mergeable. Therefore have to re-preen them. */
2115 (void)preen_nsegments();
2121 /* Notifies aspacem that an munmap completed successfully. The
2122 segment array is updated accordingly. As with
2123 VG_(am_notify_munmap), we merely record the given info, and don't
2124 check it for sensibleness. If the returned Bool is True, the
2125 caller should immediately discard translations from the specified
2128 Bool VG_(am_notify_munmap)( Addr start, SizeT len )
2132 aspacem_assert(VG_IS_PAGE_ALIGNED(start));
2133 aspacem_assert(VG_IS_PAGE_ALIGNED(len));
2138 needDiscard = any_Ts_in_range( start, len );
2140 init_nsegment( &seg );
2142 seg.end = start + len - 1;
2144 /* The segment becomes unused (free). Segments from above
2145 aspacem_maxAddr were originally SkResvn and so we make them so
2146 again. Note, this isn't really right when the segment straddles
2147 the aspacem_maxAddr boundary - then really it should be split in
2148 two, the lower part marked as SkFree and the upper part as
2149 SkResvn. Ah well. */
2150 if (start > aspacem_maxAddr
2151 && /* check previous comparison is meaningful */
2152 aspacem_maxAddr < Addr_MAX)
2155 /* Ditto for segments from below aspacem_minAddr. */
2156 if (seg.end < aspacem_minAddr && aspacem_minAddr > 0)
2161 add_segment( &seg );
2163 /* Unmapping could create two adjacent free segments, so a preen is
2164 needed. add_segment() will do that, so no need to here. */
2170 /*-----------------------------------------------------------------*/
2172 /*--- Handling mappings which do not arise directly from the ---*/
2173 /*--- simulation of the client. ---*/
2175 /*-----------------------------------------------------------------*/
2177 /* --- --- --- map, unmap, protect --- --- --- */
2179 /* Map a file at a fixed address for the client, and update the
2180 segment array accordingly. */
2182 SysRes VG_(am_mmap_file_fixed_client)
2183 ( Addr start, SizeT length, UInt prot, Int fd, Off64T offset )
2185 return VG_(am_mmap_named_file_fixed_client)(start, length, prot, fd, offset, NULL);
2188 SysRes VG_(am_mmap_named_file_fixed_client)
2189 ( Addr start, SizeT length, UInt prot, Int fd, Off64T offset, const HChar *name )
2198 HChar buf[VKI_PATH_MAX];
2200 /* Not allowable. */
2202 || !VG_IS_PAGE_ALIGNED(start)
2203 || !VG_IS_PAGE_ALIGNED(offset))
2204 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2206 /* Ask for an advisory. If it's negative, fail immediately. */
2210 advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
2211 if (!ok || advised != start)
2212 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2214 /* We have been advised that the mapping is allowable at the
2215 specified address. So hand it off to the kernel, and propagate
2216 any resulting failure immediately. */
2217 // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
2218 sres = VG_(am_do_mmap_NO_NOTIFY)(
2219 start, length, prot,
2220 VKI_MAP_FIXED|VKI_MAP_PRIVATE,
2223 if (sr_isError(sres))
2226 if (sr_Res(sres) != start) {
2227 /* I don't think this can happen. It means the kernel made a
2228 fixed map succeed but not at the requested location. Try to
2229 repair the damage, then return saying the mapping failed. */
2230 (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
2231 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2234 /* Ok, the mapping succeeded. Now notify the interval map. */
2235 init_nsegment( &seg );
2238 seg.end = seg.start + VG_PGROUNDUP(length) - 1;
2239 seg.offset = offset;
2240 seg.hasR = toBool(prot & VKI_PROT_READ);
2241 seg.hasW = toBool(prot & VKI_PROT_WRITE);
2242 seg.hasX = toBool(prot & VKI_PROT_EXEC);
2243 if (ML_(am_get_fd_d_i_m)(fd, &dev, &ino, &mode)) {
2249 seg.fnIdx = allocate_segname( name );
2250 } else if (ML_(am_resolve_filename)(fd, buf, VKI_PATH_MAX)) {
2251 seg.fnIdx = allocate_segname( buf );
2253 add_segment( &seg );
2260 /* Map anonymously at a fixed address for the client, and update
2261 the segment array accordingly. */
2263 SysRes VG_(am_mmap_anon_fixed_client) ( Addr start, SizeT length, UInt prot )
2271 /* Not allowable. */
2272 if (length == 0 || !VG_IS_PAGE_ALIGNED(start))
2273 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2275 /* Ask for an advisory. If it's negative, fail immediately. */
2279 advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
2280 if (!ok || advised != start)
2281 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2283 /* We have been advised that the mapping is allowable at the
2284 specified address. So hand it off to the kernel, and propagate
2285 any resulting failure immediately. */
2286 // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
2287 sres = VG_(am_do_mmap_NO_NOTIFY)(
2288 start, length, prot,
2289 VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2292 if (sr_isError(sres))
2295 if (sr_Res(sres) != start) {
2296 /* I don't think this can happen. It means the kernel made a
2297 fixed map succeed but not at the requested location. Try to
2298 repair the damage, then return saying the mapping failed. */
2299 (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
2300 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2303 /* Ok, the mapping succeeded. Now notify the interval map. */
2304 init_nsegment( &seg );
2307 seg.end = seg.start + VG_PGROUNDUP(length) - 1;
2308 seg.hasR = toBool(prot & VKI_PROT_READ);
2309 seg.hasW = toBool(prot & VKI_PROT_WRITE);
2310 seg.hasX = toBool(prot & VKI_PROT_EXEC);
2311 add_segment( &seg );
2318 /* Map anonymously at an unconstrained address for the client, and
2319 update the segment array accordingly. */
2321 SysRes VG_(am_mmap_anon_float_client) ( SizeT length, Int prot )
2329 /* Not allowable. */
2331 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2333 /* Ask for an advisory. If it's negative, fail immediately. */
2337 advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
2339 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2341 /* We have been advised that the mapping is allowable at the
2342 advised address. So hand it off to the kernel, and propagate
2343 any resulting failure immediately. */
2344 // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
2345 sres = VG_(am_do_mmap_NO_NOTIFY)(
2346 advised, length, prot,
2347 VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2350 if (sr_isError(sres))
2353 if (sr_Res(sres) != advised) {
2354 /* I don't think this can happen. It means the kernel made a
2355 fixed map succeed but not at the requested location. Try to
2356 repair the damage, then return saying the mapping failed. */
2357 (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
2358 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2361 /* Ok, the mapping succeeded. Now notify the interval map. */
2362 init_nsegment( &seg );
2364 seg.start = advised;
2365 seg.end = seg.start + VG_PGROUNDUP(length) - 1;
2366 seg.hasR = toBool(prot & VKI_PROT_READ);
2367 seg.hasW = toBool(prot & VKI_PROT_WRITE);
2368 seg.hasX = toBool(prot & VKI_PROT_EXEC);
2369 add_segment( &seg );
2376 /* Similarly, acquire new address space for the client but with
2377 considerable restrictions on what can be done with it: (1) the
2378 actual protections may exceed those stated in 'prot', (2) the
2379 area's protections cannot be later changed using any form of
2380 mprotect, and (3) the area cannot be freed using any form of
2381 munmap. On Linux this behaves the same as
2382 VG_(am_mmap_anon_float_client). On AIX5 this *may* allocate memory
2383 by using sbrk, so as to make use of large pages on AIX. */
2385 SysRes VG_(am_sbrk_anon_float_client) ( SizeT length, Int prot )
2387 return VG_(am_mmap_anon_float_client) ( length, prot );
2391 /* Map anonymously at an unconstrained address for V, and update the
2392 segment array accordingly. This is fundamentally how V allocates
2393 itself more address space when needed. */
2395 SysRes VG_(am_mmap_anon_float_valgrind)( SizeT length )
2403 /* Not allowable. */
2405 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2407 /* Ask for an advisory. If it's negative, fail immediately. */
2411 advised = VG_(am_get_advisory)( &req, False/*valgrind*/, &ok );
2413 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2415 // On Darwin, for anonymous maps you can pass in a tag which is used by
2416 // programs like vmmap for statistical purposes.
2417 #ifndef VM_TAG_VALGRIND
2418 # define VM_TAG_VALGRIND 0
2421 /* We have been advised that the mapping is allowable at the
2422 specified address. So hand it off to the kernel, and propagate
2423 any resulting failure immediately. */
2424 /* GrP fixme darwin: use advisory as a hint only, otherwise syscall in
2425 another thread can pre-empt our spot. [At one point on the DARWIN
2426 branch the VKI_MAP_FIXED was commented out; unclear if this is
2427 necessary or not given the second Darwin-only call that immediately
2428 follows if this one fails. --njn] */
2429 sres = VG_(am_do_mmap_NO_NOTIFY)(
2431 VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC,
2432 VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2435 #if defined(VGO_darwin)
2436 if (sr_isError(sres)) {
2437 /* try again, ignoring the advisory */
2438 sres = VG_(am_do_mmap_NO_NOTIFY)(
2440 VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC,
2441 /*VKI_MAP_FIXED|*/VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2446 if (sr_isError(sres))
2449 #if defined(VGO_linux)
2450 if (sr_Res(sres) != advised) {
2451 /* I don't think this can happen. It means the kernel made a
2452 fixed map succeed but not at the requested location. Try to
2453 repair the damage, then return saying the mapping failed. */
2454 (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
2455 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2459 /* Ok, the mapping succeeded. Now notify the interval map. */
2460 init_nsegment( &seg );
2462 seg.start = sr_Res(sres);
2463 seg.end = seg.start + VG_PGROUNDUP(length) - 1;
2467 add_segment( &seg );
2473 /* Really just a wrapper around VG_(am_mmap_anon_float_valgrind). */
2475 void* VG_(am_shadow_alloc)(SizeT size)
2477 SysRes sres = VG_(am_mmap_anon_float_valgrind)( size );
2478 return sr_isError(sres) ? NULL : (void*)sr_Res(sres);
2481 /* Same comments apply as per VG_(am_sbrk_anon_float_client). On
2482 Linux this behaves the same as VG_(am_mmap_anon_float_valgrind). */
2484 SysRes VG_(am_sbrk_anon_float_valgrind)( SizeT cszB )
2486 return VG_(am_mmap_anon_float_valgrind)( cszB );
2490 /* Map a file at an unconstrained address for V, and update the
2491 segment array accordingly. Use the provided flags */
2493 static SysRes VG_(am_mmap_file_float_valgrind_flags) ( SizeT length, UInt prot,
2495 Int fd, Off64T offset )
2504 HChar buf[VKI_PATH_MAX];
2506 /* Not allowable. */
2507 if (length == 0 || !VG_IS_PAGE_ALIGNED(offset))
2508 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2510 /* Ask for an advisory. If it's negative, fail immediately. */
2514 advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
2516 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2518 /* We have been advised that the mapping is allowable at the
2519 specified address. So hand it off to the kernel, and propagate
2520 any resulting failure immediately. */
2521 sres = VG_(am_do_mmap_NO_NOTIFY)(
2522 advised, length, prot,
2526 if (sr_isError(sres))
2529 if (sr_Res(sres) != advised) {
2530 /* I don't think this can happen. It means the kernel made a
2531 fixed map succeed but not at the requested location. Try to
2532 repair the damage, then return saying the mapping failed. */
2533 (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
2534 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2537 /* Ok, the mapping succeeded. Now notify the interval map. */
2538 init_nsegment( &seg );
2540 seg.start = sr_Res(sres);
2541 seg.end = seg.start + VG_PGROUNDUP(length) - 1;
2542 seg.offset = offset;
2543 seg.hasR = toBool(prot & VKI_PROT_READ);
2544 seg.hasW = toBool(prot & VKI_PROT_WRITE);
2545 seg.hasX = toBool(prot & VKI_PROT_EXEC);
2546 if (ML_(am_get_fd_d_i_m)(fd, &dev, &ino, &mode)) {
2551 if (ML_(am_resolve_filename)(fd, buf, VKI_PATH_MAX)) {
2552 seg.fnIdx = allocate_segname( buf );
2554 add_segment( &seg );
2559 /* Map privately a file at an unconstrained address for V, and update the
2560 segment array accordingly. This is used by V for transiently
2561 mapping in object files to read their debug info. */
2563 SysRes VG_(am_mmap_file_float_valgrind) ( SizeT length, UInt prot,
2564 Int fd, Off64T offset )
2566 return VG_(am_mmap_file_float_valgrind_flags) (length, prot,
2567 VKI_MAP_FIXED|VKI_MAP_PRIVATE,
2571 extern SysRes VG_(am_shared_mmap_file_float_valgrind)
2572 ( SizeT length, UInt prot, Int fd, Off64T offset )
2574 return VG_(am_mmap_file_float_valgrind_flags) (length, prot,
2575 VKI_MAP_FIXED|VKI_MAP_SHARED,
2579 /* --- --- munmap helper --- --- */
2582 SysRes am_munmap_both_wrk ( /*OUT*/Bool* need_discard,
2583 Addr start, SizeT len, Bool forClient )
2588 if (!VG_IS_PAGE_ALIGNED(start))
2592 *need_discard = False;
2593 return VG_(mk_SysRes_Success)( 0 );
2596 if (start + len < len)
2599 len = VG_PGROUNDUP(len);
2600 aspacem_assert(VG_IS_PAGE_ALIGNED(start));
2601 aspacem_assert(VG_IS_PAGE_ALIGNED(len));
2604 if (!VG_(am_is_valid_for_client_or_free_or_resvn)
2605 ( start, len, VKI_PROT_NONE ))
2608 if (!is_valid_for_valgrind( start, len ))
2612 d = any_Ts_in_range( start, len );
2614 sres = ML_(am_do_munmap_NO_NOTIFY)( start, len );
2615 if (sr_isError(sres))
2618 VG_(am_notify_munmap)( start, len );
2624 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2627 /* Unmap the given address range and update the segment array
2628 accordingly. This fails if the range isn't valid for the client.
2629 If *need_discard is True after a successful return, the caller
2630 should immediately discard translations from the specified address
2633 SysRes VG_(am_munmap_client)( /*OUT*/Bool* need_discard,
2634 Addr start, SizeT len )
2636 return am_munmap_both_wrk( need_discard, start, len, True/*client*/ );
2639 /* Unmap the given address range and update the segment array
2640 accordingly. This fails if the range isn't valid for valgrind. */
2642 SysRes VG_(am_munmap_valgrind)( Addr start, SizeT len )
2645 SysRes r = am_munmap_both_wrk( &need_discard,
2646 start, len, False/*valgrind*/ );
2647 /* If this assertion fails, it means we allowed translations to be
2648 made from a V-owned section. Which shouldn't happen. */
2650 aspacem_assert(!need_discard);
2654 /* Let (start,len) denote an area within a single Valgrind-owned
2655 segment (anon or file). Change the ownership of [start, start+len)
2656 to the client instead. Fails if (start,len) does not denote a
2657 suitable segment. */
2659 Bool VG_(am_change_ownership_v_to_c)( Addr start, SizeT len )
2665 if (start + len < start)
2667 if (!VG_IS_PAGE_ALIGNED(start) || !VG_IS_PAGE_ALIGNED(len))
2670 i = find_nsegment_idx(start);
2671 if (nsegments[i].kind != SkFileV && nsegments[i].kind != SkAnonV)
2673 if (start+len-1 > nsegments[i].end)
2676 aspacem_assert(start >= nsegments[i].start);
2677 aspacem_assert(start+len-1 <= nsegments[i].end);
2679 /* This scheme is like how mprotect works: split the to-be-changed
2680 range into its own segment(s), then mess with them (it). There
2681 should be only one. */
2682 split_nsegments_lo_and_hi( start, start+len-1, &iLo, &iHi );
2683 aspacem_assert(iLo == iHi);
2684 switch (nsegments[iLo].kind) {
2685 case SkFileV: nsegments[iLo].kind = SkFileC; break;
2686 case SkAnonV: nsegments[iLo].kind = SkAnonC; break;
2687 default: aspacem_assert(0); /* can't happen - guarded above */
2694 /* 'seg' must be NULL or have been obtained from
2695 VG_(am_find_nsegment), and still valid. If non-NULL, and if it
2696 denotes a SkAnonC (anonymous client mapping) area, set the .isCH
2697 (is-client-heap) flag for that area. Otherwise do nothing.
2698 (Bizarre interface so that the same code works for both Linux and
2699 AIX and does not impose inefficiencies on the Linux version.) */
2700 void VG_(am_set_segment_isCH_if_SkAnonC)( NSegment* seg )
2702 Int i = segAddr_to_index( seg );
2703 aspacem_assert(i >= 0 && i < nsegments_used);
2704 if (nsegments[i].kind == SkAnonC) {
2705 nsegments[i].isCH = True;
2707 aspacem_assert(nsegments[i].isCH == False);
2711 /* Same idea as VG_(am_set_segment_isCH_if_SkAnonC), except set the
2712 segment's hasT bit (has-cached-code) if this is SkFileC or SkAnonC
2714 void VG_(am_set_segment_hasT_if_SkFileC_or_SkAnonC)( NSegment* seg )
2716 Int i = segAddr_to_index( seg );
2717 aspacem_assert(i >= 0 && i < nsegments_used);
2718 if (nsegments[i].kind == SkAnonC || nsegments[i].kind == SkFileC) {
2719 nsegments[i].hasT = True;
2724 /* --- --- --- reservations --- --- --- */
2726 /* Create a reservation from START .. START+LENGTH-1, with the given
2727 ShrinkMode. When checking whether the reservation can be created,
2728 also ensure that at least abs(EXTRA) extra free bytes will remain
2729 above (> 0) or below (< 0) the reservation.
2731 The reservation will only be created if it, plus the extra-zone,
2732 falls entirely within a single free segment. The returned Bool
2733 indicates whether the creation succeeded. */
2735 Bool VG_(am_create_reservation) ( Addr start, SizeT length,
2736 ShrinkMode smode, SSizeT extra )
2741 /* start and end, not taking into account the extra space. */
2742 Addr start1 = start;
2743 Addr end1 = start + length - 1;
2745 /* start and end, taking into account the extra space. */
2746 Addr start2 = start1;
2749 if (extra < 0) start2 += extra; // this moves it down :-)
2750 if (extra > 0) end2 += extra;
2752 aspacem_assert(VG_IS_PAGE_ALIGNED(start));
2753 aspacem_assert(VG_IS_PAGE_ALIGNED(start+length));
2754 aspacem_assert(VG_IS_PAGE_ALIGNED(start2));
2755 aspacem_assert(VG_IS_PAGE_ALIGNED(end2+1));
2757 startI = find_nsegment_idx( start2 );
2758 endI = find_nsegment_idx( end2 );
2760 /* If the start and end points don't fall within the same (free)
2761 segment, we're hosed. This does rely on the assumption that all
2762 mergeable adjacent segments can be merged, but add_segment()
2763 should ensure that. */
2767 if (nsegments[startI].kind != SkFree)
2770 /* Looks good - make the reservation. */
2771 aspacem_assert(nsegments[startI].start <= start2);
2772 aspacem_assert(end2 <= nsegments[startI].end);
2774 init_nsegment( &seg );
2776 seg.start = start1; /* NB: extra space is not included in the
2780 add_segment( &seg );
2787 /* Let SEG be an anonymous client mapping. This fn extends the
2788 mapping by DELTA bytes, taking the space from a reservation section
2789 which must be adjacent. If DELTA is positive, the segment is
2790 extended forwards in the address space, and the reservation must be
2791 the next one along. If DELTA is negative, the segment is extended
2792 backwards in the address space and the reservation must be the
2793 previous one. DELTA must be page aligned. abs(DELTA) must not
2794 exceed the size of the reservation segment minus one page, that is,
2795 the reservation segment after the operation must be at least one
2798 Bool VG_(am_extend_into_adjacent_reservation_client) ( NSegment* seg,
2805 /* Find the segment array index for SEG. If the assertion fails it
2806 probably means you passed in a bogus SEG. */
2807 segA = segAddr_to_index( seg );
2808 aspacem_assert(segA >= 0 && segA < nsegments_used);
2810 if (nsegments[segA].kind != SkAnonC)
2816 prot = (nsegments[segA].hasR ? VKI_PROT_READ : 0)
2817 | (nsegments[segA].hasW ? VKI_PROT_WRITE : 0)
2818 | (nsegments[segA].hasX ? VKI_PROT_EXEC : 0);
2820 aspacem_assert(VG_IS_PAGE_ALIGNED(delta<0 ? -delta : delta));
2824 /* Extending the segment forwards. */
2826 if (segR >= nsegments_used
2827 || nsegments[segR].kind != SkResvn
2828 || nsegments[segR].smode != SmLower
2829 || nsegments[segR].start != nsegments[segA].end + 1
2830 || delta + VKI_PAGE_SIZE
2831 > (nsegments[segR].end - nsegments[segR].start + 1))
2834 /* Extend the kernel's mapping. */
2835 // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
2836 sres = VG_(am_do_mmap_NO_NOTIFY)(
2837 nsegments[segR].start, delta,
2839 VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2842 if (sr_isError(sres))
2843 return False; /* kernel bug if this happens? */
2844 if (sr_Res(sres) != nsegments[segR].start) {
2845 /* kernel bug if this happens? */
2846 (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), delta );
2850 /* Ok, success with the kernel. Update our structures. */
2851 nsegments[segR].start += delta;
2852 nsegments[segA].end += delta;
2853 aspacem_assert(nsegments[segR].start <= nsegments[segR].end);
2857 /* Extending the segment backwards. */
2859 aspacem_assert(delta > 0);
2863 || nsegments[segR].kind != SkResvn
2864 || nsegments[segR].smode != SmUpper
2865 || nsegments[segR].end + 1 != nsegments[segA].start
2866 || delta + VKI_PAGE_SIZE
2867 > (nsegments[segR].end - nsegments[segR].start + 1))
2870 /* Extend the kernel's mapping. */
2871 // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
2872 sres = VG_(am_do_mmap_NO_NOTIFY)(
2873 nsegments[segA].start-delta, delta,
2875 VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2878 if (sr_isError(sres))
2879 return False; /* kernel bug if this happens? */
2880 if (sr_Res(sres) != nsegments[segA].start-delta) {
2881 /* kernel bug if this happens? */
2882 (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), delta );
2886 /* Ok, success with the kernel. Update our structures. */
2887 nsegments[segR].end -= delta;
2888 nsegments[segA].start -= delta;
2889 aspacem_assert(nsegments[segR].start <= nsegments[segR].end);
2898 /* --- --- --- resizing/move a mapping --- --- --- */
2902 /* Let SEG be a client mapping (anonymous or file). This fn extends
2903 the mapping forwards only by DELTA bytes, and trashes whatever was
2904 in the new area. Fails if SEG is not a single client mapping or if
2905 the new area is not accessible to the client. Fails if DELTA is
2906 not page aligned. *seg is invalid after a successful return. If
2907 *need_discard is True after a successful return, the caller should
2908 immediately discard translations from the new area. */
2910 Bool VG_(am_extend_map_client)( /*OUT*/Bool* need_discard,
2911 NSegment* seg, SizeT delta )
2915 NSegment seg_copy = *seg;
2916 SizeT seg_old_len = seg->end + 1 - seg->start;
2919 VG_(am_show_nsegments)(0, "VG_(am_extend_map_client) BEFORE");
2921 if (seg->kind != SkFileC && seg->kind != SkAnonC)
2924 if (delta == 0 || !VG_IS_PAGE_ALIGNED(delta))
2927 xStart = seg->end+1;
2928 if (xStart + delta < delta)
2931 if (!VG_(am_is_valid_for_client_or_free_or_resvn)( xStart, delta,
2936 sres = ML_(am_do_extend_mapping_NO_NOTIFY)( seg->start,
2938 seg_old_len + delta );
2939 if (sr_isError(sres)) {
2943 /* the area must not have moved */
2944 aspacem_assert(sr_Res(sres) == seg->start);
2947 *need_discard = any_Ts_in_range( seg_copy.end+1, delta );
2949 seg_copy.end += delta;
2950 add_segment( &seg_copy );
2953 VG_(am_show_nsegments)(0, "VG_(am_extend_map_client) AFTER");
2960 /* Remap the old address range to the new address range. Fails if any
2961 parameter is not page aligned, if the either size is zero, if any
2962 wraparound is implied, if the old address range does not fall
2963 entirely within a single segment, if the new address range overlaps
2964 with the old one, or if the old address range is not a valid client
2965 mapping. If *need_discard is True after a successful return, the
2966 caller should immediately discard translations from both specified
2969 Bool VG_(am_relocate_nooverlap_client)( /*OUT*/Bool* need_discard,
2970 Addr old_addr, SizeT old_len,
2971 Addr new_addr, SizeT new_len )
2977 if (old_len == 0 || new_len == 0)
2980 if (!VG_IS_PAGE_ALIGNED(old_addr) || !VG_IS_PAGE_ALIGNED(old_len)
2981 || !VG_IS_PAGE_ALIGNED(new_addr) || !VG_IS_PAGE_ALIGNED(new_len))
2984 if (old_addr + old_len < old_addr
2985 || new_addr + new_len < new_addr)
2988 if (old_addr + old_len - 1 < new_addr
2989 || new_addr + new_len - 1 < old_addr) {
2994 iLo = find_nsegment_idx( old_addr );
2995 iHi = find_nsegment_idx( old_addr + old_len - 1 );
2999 if (nsegments[iLo].kind != SkFileC && nsegments[iLo].kind != SkAnonC)
3002 sres = ML_(am_do_relocate_nooverlap_mapping_NO_NOTIFY)
3003 ( old_addr, old_len, new_addr, new_len );
3004 if (sr_isError(sres)) {
3008 aspacem_assert(sr_Res(sres) == new_addr);
3011 *need_discard = any_Ts_in_range( old_addr, old_len )
3012 || any_Ts_in_range( new_addr, new_len );
3014 seg = nsegments[iLo];
3016 /* Mark the new area based on the old seg. */
3017 if (seg.kind == SkFileC) {
3018 seg.offset += ((ULong)old_addr) - ((ULong)seg.start);
3020 aspacem_assert(seg.kind == SkAnonC);
3021 aspacem_assert(seg.offset == 0);
3023 seg.start = new_addr;
3024 seg.end = new_addr + new_len - 1;
3025 add_segment( &seg );
3027 /* Create a free hole in the old location. */
3028 init_nsegment( &seg );
3029 seg.start = old_addr;
3030 seg.end = old_addr + old_len - 1;
3031 /* See comments in VG_(am_notify_munmap) about this SkResvn vs
3033 if (old_addr > aspacem_maxAddr
3034 && /* check previous comparison is meaningful */
3035 aspacem_maxAddr < Addr_MAX)
3040 add_segment( &seg );
3046 #endif // HAVE_MREMAP
3049 #if defined(VGO_linux)
3051 /*-----------------------------------------------------------------*/
3053 /*--- A simple parser for /proc/self/maps on Linux 2.4.X/2.6.X. ---*/
3054 /*--- Almost completely independent of the stuff above. The ---*/
3055 /*--- only function it 'exports' to the code above this comment ---*/
3056 /*--- is parse_procselfmaps. ---*/
3058 /*-----------------------------------------------------------------*/
3060 /*------BEGIN-procmaps-parser-for-Linux--------------------------*/
3062 /* Size of a smallish table used to read /proc/self/map entries. */
3063 #define M_PROCMAP_BUF 100000
3065 /* static ... to keep it out of the stack frame. */
3066 static Char procmap_buf[M_PROCMAP_BUF];
3068 /* Records length of /proc/self/maps read into procmap_buf. */
3069 static Int buf_n_tot;
3073 static Int hexdigit ( Char c )
3075 if (c >= '0' && c <= '9') return (Int)(c - '0');
3076 if (c >= 'a' && c <= 'f') return 10 + (Int)(c - 'a');
3077 if (c >= 'A' && c <= 'F') return 10 + (Int)(c - 'A');
3081 static Int decdigit ( Char c )
3083 if (c >= '0' && c <= '9') return (Int)(c - '0');
3087 static Int readchar ( const Char* buf, Char* ch )
3089 if (*buf == 0) return 0;
3094 static Int readhex ( const Char* buf, UWord* val )
3096 /* Read a word-sized hex number. */
3099 while (hexdigit(*buf) >= 0) {
3100 *val = (*val << 4) + hexdigit(*buf);
3106 static Int readhex64 ( const Char* buf, ULong* val )
3108 /* Read a potentially 64-bit hex number. */
3111 while (hexdigit(*buf) >= 0) {
3112 *val = (*val << 4) + hexdigit(*buf);
3118 static Int readdec64 ( const Char* buf, ULong* val )
3122 while (hexdigit(*buf) >= 0) {
3123 *val = (*val * 10) + decdigit(*buf);
3130 /* Get the contents of /proc/self/maps into a static buffer. If
3131 there's a syntax error, it won't fit, or other failure, just
3134 static void read_procselfmaps_into_buf ( void )
3139 /* Read the initial memory mapping from the /proc filesystem. */
3140 fd = ML_(am_open)( "/proc/self/maps", VKI_O_RDONLY, 0 );
3142 ML_(am_barf)("can't open /proc/self/maps");
3146 n_chunk = ML_(am_read)( sr_Res(fd), &procmap_buf[buf_n_tot],
3147 M_PROCMAP_BUF - buf_n_tot );
3149 buf_n_tot += n_chunk;
3150 } while ( n_chunk > 0 && buf_n_tot < M_PROCMAP_BUF );
3152 ML_(am_close)(sr_Res(fd));
3154 if (buf_n_tot >= M_PROCMAP_BUF-5)
3155 ML_(am_barf_toolow)("M_PROCMAP_BUF");
3157 ML_(am_barf)("I/O error on /proc/self/maps");
3159 procmap_buf[buf_n_tot] = 0;
3162 /* Parse /proc/self/maps. For each map entry, call
3163 record_mapping, passing it, in this order:
3165 start address in memory
3167 page protections (using the VKI_PROT_* flags)
3168 mapped file device and inode
3169 offset in file, or zero if no file
3170 filename, zero terminated, or NULL if no file
3172 So the sig of the called fn might be
3174 void (*record_mapping)( Addr start, SizeT size, UInt prot,
3175 UInt dev, UInt info,
3176 ULong foffset, UChar* filename )
3178 Note that the supplied filename is transiently stored; record_mapping
3179 should make a copy if it wants to keep it.
3181 Nb: it is important that this function does not alter the contents of
3184 static void parse_procselfmaps (
3185 void (*record_mapping)( Addr addr, SizeT len, UInt prot,
3186 ULong dev, ULong ino, Off64T offset,
3187 const UChar* filename ),
3188 void (*record_gap)( Addr addr, SizeT len )
3192 Addr start, endPlusOne, gapStart;
3194 UChar rr, ww, xx, pp, ch, tmp;
3197 ULong foffset, dev, ino;
3199 foffset = ino = 0; /* keep gcc-4.1.0 happy */
3201 read_procselfmaps_into_buf();
3203 aspacem_assert('\0' != procmap_buf[0] && 0 != buf_n_tot);
3206 VG_(debugLog)(0, "procselfmaps", "raw:\n%s\n", procmap_buf);
3208 /* Ok, it's safely aboard. Parse the entries. */
3210 gapStart = Addr_MIN;
3212 if (i >= buf_n_tot) break;
3214 /* Read (without fscanf :) the pattern %16x-%16x %c%c%c%c %16x %2x:%2x %d */
3215 j = readhex(&procmap_buf[i], &start);
3216 if (j > 0) i += j; else goto syntaxerror;
3217 j = readchar(&procmap_buf[i], &ch);
3218 if (j == 1 && ch == '-') i += j; else goto syntaxerror;
3219 j = readhex(&procmap_buf[i], &endPlusOne);
3220 if (j > 0) i += j; else goto syntaxerror;
3222 j = readchar(&procmap_buf[i], &ch);
3223 if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
3225 j = readchar(&procmap_buf[i], &rr);
3226 if (j == 1 && (rr == 'r' || rr == '-')) i += j; else goto syntaxerror;
3227 j = readchar(&procmap_buf[i], &ww);
3228 if (j == 1 && (ww == 'w' || ww == '-')) i += j; else goto syntaxerror;
3229 j = readchar(&procmap_buf[i], &xx);
3230 if (j == 1 && (xx == 'x' || xx == '-')) i += j; else goto syntaxerror;
3231 /* This field is the shared/private flag */
3232 j = readchar(&procmap_buf[i], &pp);
3233 if (j == 1 && (pp == 'p' || pp == '-' || pp == 's'))
3234 i += j; else goto syntaxerror;
3236 j = readchar(&procmap_buf[i], &ch);
3237 if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
3239 j = readhex64(&procmap_buf[i], &foffset);
3240 if (j > 0) i += j; else goto syntaxerror;
3242 j = readchar(&procmap_buf[i], &ch);
3243 if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
3245 j = readhex(&procmap_buf[i], &maj);
3246 if (j > 0) i += j; else goto syntaxerror;
3247 j = readchar(&procmap_buf[i], &ch);
3248 if (j == 1 && ch == ':') i += j; else goto syntaxerror;
3249 j = readhex(&procmap_buf[i], &min);
3250 if (j > 0) i += j; else goto syntaxerror;
3252 j = readchar(&procmap_buf[i], &ch);
3253 if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
3255 j = readdec64(&procmap_buf[i], &ino);
3256 if (j > 0) i += j; else goto syntaxerror;
3261 VG_(debugLog)(0, "Valgrind:",
3262 "FATAL: syntax error reading /proc/self/maps\n");
3269 for (; k <= i; k++) {
3270 buf50[m] = procmap_buf[k];
3274 VG_(debugLog)(0, "procselfmaps", "Last 50 chars: '%s'\n", buf50);
3280 /* Try and find the name of the file mapped to this segment, if
3281 it exists. Note that files can contains spaces. */
3283 // Move i to the next non-space char, which should be either a '/' or
3285 while (procmap_buf[i] == ' ' && i < buf_n_tot-1) i++;
3287 // Move i_eol to the end of the line.
3289 while (procmap_buf[i_eol] != '\n' && i_eol < buf_n_tot-1) i_eol++;
3291 // If there's a filename...
3292 if (i < i_eol-1 && procmap_buf[i] == '/') {
3293 /* Minor hack: put a '\0' at the filename end for the call to
3294 'record_mapping', then restore the old char with 'tmp'. */
3295 filename = &procmap_buf[i];
3296 tmp = filename[i_eol - i];
3297 filename[i_eol - i] = '\0';
3305 if (rr == 'r') prot |= VKI_PROT_READ;
3306 if (ww == 'w') prot |= VKI_PROT_WRITE;
3307 if (xx == 'x') prot |= VKI_PROT_EXEC;
3309 /* Linux has two ways to encode a device number when it
3310 is exposed to user space (via fstat etc). The old way
3311 is the traditional unix scheme that produces a 16 bit
3312 device number with the top 8 being the major number and
3313 the bottom 8 the minor number.
3315 The new scheme allows for a 12 bit major number and
3316 a 20 bit minor number by using a 32 bit device number
3317 and putting the top 12 bits of the minor number into
3318 the top 12 bits of the device number thus leaving an
3319 extra 4 bits for the major number.
3321 If the minor and major number are both single byte
3322 values then both schemes give the same result so we
3323 use the new scheme here in case either number is
3324 outside the 0-255 range and then use fstat64 when
3325 available (or fstat on 64 bit systems) so that we
3326 should always have a new style device number and
3327 everything should match. */
3328 dev = (min & 0xff) | (maj << 8) | ((min & ~0xff) << 12);
3330 if (record_gap && gapStart < start)
3331 (*record_gap) ( gapStart, start-gapStart );
3333 if (record_mapping && start < endPlusOne)
3334 (*record_mapping) ( start, endPlusOne-start,
3336 foffset, filename );
3339 filename[i_eol - i] = tmp;
3343 gapStart = endPlusOne;
3346 # if defined(VGP_arm_linux)
3347 /* ARM puts code at the end of memory that contains processor
3348 specific stuff (cmpxchg, getting the thread local storage, etc.)
3349 This isn't specified in /proc/self/maps, so do it here. This
3350 kludgery causes the view of memory, as presented to
3351 record_gap/record_mapping, to actually reflect reality. IMO
3352 (JRS, 2010-Jan-03) the fact that /proc/.../maps does not list
3353 the commpage should be regarded as a bug in the kernel. */
3354 { const Addr commpage_start = ARM_LINUX_FAKE_COMMPAGE_START;
3355 const Addr commpage_end1 = ARM_LINUX_FAKE_COMMPAGE_END1;
3356 if (gapStart < commpage_start) {
3358 (*record_gap)( gapStart, commpage_start - gapStart );
3360 (*record_mapping)( commpage_start, commpage_end1 - commpage_start,
3361 VKI_PROT_READ|VKI_PROT_EXEC,
3362 0/*dev*/, 0/*ino*/, 0/*foffset*/,
3364 gapStart = commpage_end1;
3369 if (record_gap && gapStart < Addr_MAX)
3370 (*record_gap) ( gapStart, Addr_MAX - gapStart + 1 );
3373 /*------END-procmaps-parser-for-Linux----------------------------*/
3375 /*------BEGIN-procmaps-parser-for-Darwin-------------------------*/
3377 #elif defined(VGO_darwin)
3378 #include <mach/mach.h>
3379 #include <mach/mach_vm.h>
3381 static unsigned int mach2vki(unsigned int vm_prot)
3384 ((vm_prot & VM_PROT_READ) ? VKI_PROT_READ : 0) |
3385 ((vm_prot & VM_PROT_WRITE) ? VKI_PROT_WRITE : 0) |
3386 ((vm_prot & VM_PROT_EXECUTE) ? VKI_PROT_EXEC : 0) ;
3389 static UInt stats_machcalls = 0;
3391 static void parse_procselfmaps (
3392 void (*record_mapping)( Addr addr, SizeT len, UInt prot,
3393 ULong dev, ULong ino, Off64T offset,
3394 const UChar* filename ),
3395 void (*record_gap)( Addr addr, SizeT len )
3406 mach_vm_address_t addr = iter;
3407 mach_vm_size_t size;
3408 vm_region_submap_short_info_data_64_t info;
3412 mach_msg_type_number_t info_count
3413 = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
3415 kr = mach_vm_region_recurse(mach_task_self(), &addr, &size, &depth,
3416 (vm_region_info_t)&info, &info_count);
3419 if (info.is_submap) {
3427 if (addr > last && record_gap) {
3428 (*record_gap)(last, addr - last);
3430 if (record_mapping) {
3431 (*record_mapping)(addr, size, mach2vki(info.protection),
3432 0, 0, info.offset, NULL);
3437 if ((Addr)-1 > last && record_gap)
3438 (*record_gap)(last, (Addr)-1 - last);
3441 Bool css_overflowed;
3442 ChangedSeg* css_local;
3446 static void add_mapping_callback(Addr addr, SizeT len, UInt prot,
3447 ULong dev, ULong ino, Off64T offset,
3448 const UChar *filename)
3450 // derived from sync_check_mapping_callback()
3454 if (len == 0) return;
3456 /* The kernel should not give us wraparounds. */
3457 aspacem_assert(addr <= addr + len - 1);
3459 iLo = find_nsegment_idx( addr );
3460 iHi = find_nsegment_idx( addr + len - 1 );
3463 /* NSegments iLo .. iHi inclusive should agree with the presented
3465 for (i = iLo; i <= iHi; i++) {
3469 if (nsegments[i].kind == SkAnonV || nsegments[i].kind == SkFileV) {
3470 /* Ignore V regions */
3473 else if (nsegments[i].kind == SkFree || nsegments[i].kind == SkResvn) {
3474 /* Add mapping for SkResvn regions */
3475 ChangedSeg* cs = &css_local[css_used_local];
3476 if (css_used_local < css_size_local) {
3477 cs->is_added = True;
3479 cs->end = addr + len - 1;
3481 cs->offset = offset;
3484 css_overflowed = True;
3488 } else if (nsegments[i].kind == SkAnonC ||
3489 nsegments[i].kind == SkFileC ||
3490 nsegments[i].kind == SkShmC)
3492 /* Check permissions on client regions */
3495 if (nsegments[i].hasR) seg_prot |= VKI_PROT_READ;
3496 if (nsegments[i].hasW) seg_prot |= VKI_PROT_WRITE;
3497 # if defined(VGA_x86)
3498 // GrP fixme sloppyXcheck
3499 // darwin: kernel X ignored and spuriously changes? (vm_copy)
3500 seg_prot |= (prot & VKI_PROT_EXEC);
3502 if (nsegments[i].hasX) seg_prot |= VKI_PROT_EXEC;
3504 if (seg_prot != prot) {
3505 if (VG_(clo_trace_syscalls))
3506 VG_(debugLog)(0,"aspacem","region %p..%p permission "
3507 "mismatch (kernel %x, V %x)\n",
3508 (void*)nsegments[i].start,
3509 (void*)(nsegments[i].end+1), prot, seg_prot);
3518 static void remove_mapping_callback(Addr addr, SizeT len)
3520 // derived from sync_check_gap_callback()
3527 /* The kernel should not give us wraparounds. */
3528 aspacem_assert(addr <= addr + len - 1);
3530 iLo = find_nsegment_idx( addr );
3531 iHi = find_nsegment_idx( addr + len - 1 );
3533 /* NSegments iLo .. iHi inclusive should agree with the presented data. */
3534 for (i = iLo; i <= iHi; i++) {
3535 if (nsegments[i].kind != SkFree && nsegments[i].kind != SkResvn) {
3536 // V has a mapping, kernel doesn't
3537 ChangedSeg* cs = &css_local[css_used_local];
3538 if (css_used_local < css_size_local) {
3539 cs->is_added = False;
3540 cs->start = nsegments[i].start;
3541 cs->end = nsegments[i].end;
3546 css_overflowed = True;
3554 // Returns False if 'css' wasn't big enough.
3555 Bool VG_(get_changed_segments)(
3556 const HChar* when, const HChar* where, /*OUT*/ChangedSeg* css,
3557 Int css_size, /*OUT*/Int* css_used)
3559 static UInt stats_synccalls = 1;
3560 aspacem_assert(when && where);
3563 VG_(debugLog)(0,"aspacem",
3564 "[%u,%u] VG_(get_changed_segments)(%s, %s)\n",
3565 stats_synccalls++, stats_machcalls, when, where
3568 css_overflowed = False;
3570 css_size_local = css_size;
3573 // Get the list of segs that need to be added/removed.
3574 parse_procselfmaps(&add_mapping_callback, &remove_mapping_callback);
3576 *css_used = css_used_local;
3578 if (css_overflowed) {
3579 aspacem_assert(css_used_local == css_size_local);
3582 return !css_overflowed;
3585 #endif // defined(VGO_darwin)
3587 /*------END-procmaps-parser-for-Darwin---------------------------*/
3589 #endif // defined(VGO_linux) || defined(VGO_darwin)
3591 /*--------------------------------------------------------------------*/
3593 /*--------------------------------------------------------------------*/