2 /*--------------------------------------------------------------------*/
3 /*--- The address space manager: segment initialisation and ---*/
4 /*--- tracking, stack operations ---*/
6 /*--- Implementation for Linux (and Darwin!) m_aspacemgr-linux.c ---*/
7 /*--------------------------------------------------------------------*/
10 This file is part of Valgrind, a dynamic binary instrumentation
13 Copyright (C) 2000-2010 Julian Seward
16 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
31 The GNU General Public License is contained in the file COPYING.
34 #if defined(VGO_linux) || defined(VGO_darwin)
36 /* *************************************************************
37 DO NOT INCLUDE ANY OTHER FILES HERE.
38 ADD NEW INCLUDES ONLY TO priv_aspacemgr.h
39 AND THEN ONLY AFTER READING DIRE WARNINGS THERE TOO.
40 ************************************************************* */
42 #include "priv_aspacemgr.h"
46 /* Note: many of the exported functions implemented below are
47 described more fully in comments in pub_core_aspacemgr.h.
51 /*-----------------------------------------------------------------*/
55 /*-----------------------------------------------------------------*/
59 The purpose of the address space manager (aspacem) is:
61 (1) to record the disposition of all parts of the process' address
64 (2) to the extent that it can, influence layout in ways favourable
67 It is important to appreciate that whilst it can and does attempt
68 to influence layout, and usually succeeds, it isn't possible to
69 impose absolute control: in the end, the kernel is the final
70 arbiter, and can always bounce our requests.
74 The strategy is therefore as follows:
76 * Track ownership of mappings. Each one can belong either to
77 Valgrind or to the client.
79 * Try to place the client's fixed and hinted mappings at the
80 requested addresses. Fixed mappings are allowed anywhere except
81 in areas reserved by Valgrind; the client can trash its own
82 mappings if it wants. Hinted mappings are allowed providing they
83 fall entirely in free areas; if not, they will be placed by
84 aspacem in a free area.
86 * Anonymous mappings are allocated so as to keep Valgrind and
87 client areas widely separated when possible. If address space
88 runs low, then they may become intermingled: aspacem will attempt
89 to use all possible space. But under most circumstances lack of
90 address space is not a problem and so the areas will remain far
93 Searches for client space start at aspacem_cStart and will wrap
94 around the end of the available space if needed. Searches for
95 Valgrind space start at aspacem_vStart and will also wrap around.
96 Because aspacem_cStart is approximately at the start of the
97 available space and aspacem_vStart is approximately in the
98 middle, for the most part the client anonymous mappings will be
99 clustered towards the start of available space, and Valgrind ones
102 The available space is delimited by aspacem_minAddr and
103 aspacem_maxAddr. aspacem is flexible and can operate with these
104 at any (sane) setting. For 32-bit Linux, aspacem_minAddr is set
105 to some low-ish value at startup (64M) and aspacem_maxAddr is
106 derived from the stack pointer at system startup. This seems a
107 reliable way to establish the initial boundaries.
109 64-bit Linux is similar except for the important detail that the
110 upper boundary is set to 32G. The reason is so that all
111 anonymous mappings (basically all client data areas) are kept
112 below 32G, since that is the maximum range that memcheck can
113 track shadow memory using a fast 2-level sparse array. It can go
114 beyond that but runs much more slowly. The 32G limit is
115 arbitrary and is trivially changed. So, with the current
116 settings, programs on 64-bit Linux will appear to run out of
117 address space and presumably fail at the 32G limit. Given the
118 9/8 space overhead of Memcheck, that means you should be able to
119 memcheckify programs that use up to about 14G natively.
121 Note that the aspacem_minAddr/aspacem_maxAddr limits apply only to
122 anonymous mappings. The client can still do fixed and hinted maps
123 at any addresses provided they do not overlap Valgrind's segments.
124 This makes Valgrind able to load prelinked .so's at their requested
125 addresses on 64-bit platforms, even if they are very high (eg,
128 At startup, aspacem establishes the usable limits, and advises
129 m_main to place the client stack at the top of the range, which on
130 a 32-bit machine will be just below the real initial stack. One
131 effect of this is that self-hosting sort-of works, because an inner
132 valgrind will then place its client's stack just below its own
135 The segment array and segment kinds
136 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
137 The central data structure is the segment array (segments[0
138 .. nsegments_used-1]). This covers the entire address space in
139 order, giving account of every byte of it. Free spaces are
140 represented explicitly as this makes many operations simpler.
141 Mergeable adjacent segments are aggressively merged so as to create
142 a "normalised" representation (preen_nsegments).
144 There are 7 (mutually-exclusive) segment kinds, the meaning of
147 SkFree: a free space, which may be allocated either to Valgrind (V)
150 SkAnonC: an anonymous mapping belonging to C. For these, aspacem
151 tracks a boolean indicating whether or not is is part of the
152 client's heap area (can't remember why).
154 SkFileC: a file mapping belonging to C.
156 SkShmC: a shared memory segment belonging to C.
158 SkAnonV: an anonymous mapping belonging to V. These cover all V's
159 dynamic memory needs, including non-client malloc/free areas,
160 shadow memory, and the translation cache.
162 SkFileV: a file mapping belonging to V. As far as I know these are
163 only created transiently for the purposes of reading debug info.
165 SkResvn: a reservation segment.
167 These are mostly straightforward. Reservation segments have some
170 A reservation segment is unmapped from the kernel's point of view,
171 but is an area in which aspacem will not create anonymous maps
172 (either Vs or Cs). The idea is that we will try to keep it clear
173 when the choice to do so is ours. Reservation segments are
174 'invisible' from the client's point of view: it may choose to park
175 a fixed mapping in the middle of one, and that's just tough -- we
176 can't do anything about that. From the client's perspective
177 reservations are semantically equivalent to (although
178 distinguishable from, if it makes enquiries) free areas.
180 Reservations are a primitive mechanism provided for whatever
181 purposes the rest of the system wants. Currently they are used to
182 reserve the expansion space into which a growdown stack is
183 expanded, and into which the data segment is extended. Note,
184 though, those uses are entirely external to this module, which only
185 supplies the primitives.
187 Reservations may be shrunk in order that an adjoining anonymous
188 mapping may be extended. This makes dataseg/stack expansion work.
189 A reservation may not be shrunk below one page.
191 The advise/notify concept
192 ~~~~~~~~~~~~~~~~~~~~~~~~~
193 All mmap-related calls must be routed via aspacem. Calling
194 sys_mmap directly from the rest of the system is very dangerous
195 because aspacem's data structures will become out of date.
197 The fundamental mode of operation of aspacem is to support client
198 mmaps. Here's what happens (in ML_(generic_PRE_sys_mmap)):
200 * m_syswrap intercepts the mmap call. It examines the parameters
201 and identifies the requested placement constraints. There are
202 three possibilities: no constraint (MAny), hinted (MHint, "I
203 prefer X but will accept anything"), and fixed (MFixed, "X or
206 * This request is passed to VG_(am_get_advisory). This decides on
207 a placement as described in detail in Strategy above. It may
208 also indicate that the map should fail, because it would trash
209 one of Valgrind's areas, which would probably kill the system.
211 * Control returns to the wrapper. If VG_(am_get_advisory) has
212 declared that the map should fail, then it must be made to do so.
213 Usually, though, the request is considered acceptable, in which
214 case an "advised" address is supplied. The advised address
215 replaces the original address supplied by the client, and
218 Note at this point that although aspacem has been asked for
219 advice on where to place the mapping, no commitment has yet been
220 made by either it or the kernel.
222 * The adjusted request is handed off to the kernel.
224 * The kernel's result is examined. If the map succeeded, aspacem
225 is told of the outcome (VG_(am_notify_client_mmap)), so it can
226 update its records accordingly.
228 This then is the central advise-notify idiom for handling client
229 mmap/munmap/mprotect/shmat:
231 * ask aspacem for an advised placement (or a veto)
233 * if not vetoed, hand request to kernel, using the advised placement
235 * examine result, and if successful, notify aspacem of the result.
237 There are also many convenience functions, eg
238 VG_(am_mmap_anon_fixed_client), which do both phases entirely within
241 To debug all this, a sync-checker is provided. It reads
242 /proc/self/maps, compares what it sees with aspacem's records, and
243 complains if there is a difference. --sanity-level=3 runs it before
244 and after each syscall, which is a powerful, if slow way of finding
245 buggy syscall wrappers.
249 Up to and including Valgrind 2.4.1, x86 segmentation was used to
250 enforce seperation of V and C, so that wild writes by C could not
251 trash V. This got called "pointercheck". Unfortunately, the new
252 more flexible memory layout, plus the need to be portable across
253 different architectures, means doing this in hardware is no longer
254 viable, and doing it in software is expensive. So at the moment we
259 /*-----------------------------------------------------------------*/
261 /*--- The Address Space Manager's state. ---*/
263 /*-----------------------------------------------------------------*/
265 /* ------ start of STATE for the address-space manager ------ */
267 /* Max number of segments we can track. */
268 #define VG_N_SEGMENTS 5000
270 /* Max number of segment file names we can track. */
271 #define VG_N_SEGNAMES 1000
273 /* Max length of a segment file name. */
274 #define VG_MAX_SEGNAMELEN 1000
281 HChar fname[VG_MAX_SEGNAMELEN];
285 /* Filename table. _used is the high water mark; an entry is only
286 valid if its index >= 0, < _used, and its .inUse field == True.
287 The .mark field is used to garbage-collect dead entries.
289 static SegName segnames[VG_N_SEGNAMES];
290 static Int segnames_used = 0;
293 /* Array [0 .. nsegments_used-1] of all mappings. */
294 /* Sorted by .addr field. */
295 /* I: len may not be zero. */
296 /* I: overlapping segments are not allowed. */
297 /* I: the segments cover the entire address space precisely. */
298 /* Each segment can optionally hold an index into the filename table. */
300 static NSegment nsegments[VG_N_SEGMENTS];
301 static Int nsegments_used = 0;
303 #define Addr_MIN ((Addr)0)
304 #define Addr_MAX ((Addr)(-1ULL))
308 // The smallest address that aspacem will try to allocate
309 static Addr aspacem_minAddr = 0;
311 // The largest address that aspacem will try to allocate
312 static Addr aspacem_maxAddr = 0;
314 // Where aspacem will start looking for client space
315 static Addr aspacem_cStart = 0;
317 // Where aspacem will start looking for Valgrind space
318 static Addr aspacem_vStart = 0;
321 #define AM_SANITY_CHECK \
323 if (VG_(clo_sanity_level >= 3)) \
324 aspacem_assert(VG_(am_do_sync_check) \
325 (__PRETTY_FUNCTION__,__FILE__,__LINE__)); \
328 /* ------ end of STATE for the address-space manager ------ */
330 /* ------ Forwards decls ------ */
332 static Int find_nsegment_idx ( Addr a );
334 static void parse_procselfmaps (
335 void (*record_mapping)( Addr addr, SizeT len, UInt prot,
336 ULong dev, ULong ino, Off64T offset,
337 const UChar* filename ),
338 void (*record_gap)( Addr addr, SizeT len )
341 /* ----- Hacks to do with the "commpage" on arm-linux ----- */
342 /* Not that I have anything against the commpage per se. It's just
343 that it's not listed in /proc/self/maps, which is a royal PITA --
344 we have to fake it up, in parse_procselfmaps. */
345 #if defined(VGP_arm_linux)
346 # define ARM_LINUX_FAKE_COMMPAGE_START 0xFFFF0000
347 # define ARM_LINUX_FAKE_COMMPAGE_END1 0xFFFFF000
351 /*-----------------------------------------------------------------*/
353 /*--- SegName array management. ---*/
355 /*-----------------------------------------------------------------*/
357 /* Searches the filename table to find an index for the given name.
358 If none is found, an index is allocated and the name stored. If no
359 space is available we just give up. If the string is too long to
362 static Int allocate_segname ( const HChar* name )
366 aspacem_assert(name);
368 if (0) VG_(debugLog)(0,"aspacem","allocate_segname %s\n", name);
370 len = VG_(strlen)(name);
371 if (len >= VG_MAX_SEGNAMELEN-1) {
375 /* first see if we already have the name. */
376 for (i = 0; i < segnames_used; i++) {
377 if (!segnames[i].inUse)
379 if (0 == VG_(strcmp)(name, &segnames[i].fname[0])) {
384 /* no we don't. So look for a free slot. */
385 for (i = 0; i < segnames_used; i++)
386 if (!segnames[i].inUse)
389 if (i == segnames_used) {
390 /* no free slots .. advance the high-water mark. */
391 if (segnames_used+1 < VG_N_SEGNAMES) {
395 ML_(am_barf_toolow)("VG_N_SEGNAMES");
400 segnames[i].inUse = True;
401 for (j = 0; j < len; j++)
402 segnames[i].fname[j] = name[j];
403 aspacem_assert(len < VG_MAX_SEGNAMELEN);
404 segnames[i].fname[len] = 0;
409 /*-----------------------------------------------------------------*/
411 /*--- Displaying the segment array. ---*/
413 /*-----------------------------------------------------------------*/
415 static HChar* show_SegKind ( SegKind sk )
418 case SkFree: return " ";
419 case SkAnonC: return "anon";
420 case SkAnonV: return "ANON";
421 case SkFileC: return "file";
422 case SkFileV: return "FILE";
423 case SkShmC: return "shm ";
424 case SkResvn: return "RSVN";
425 default: return "????";
429 static HChar* show_ShrinkMode ( ShrinkMode sm )
432 case SmLower: return "SmLower";
433 case SmUpper: return "SmUpper";
434 case SmFixed: return "SmFixed";
435 default: return "Sm?????";
439 static void show_len_concisely ( /*OUT*/HChar* buf, Addr start, Addr end )
442 ULong len = ((ULong)end) - ((ULong)start) + 1;
444 if (len < 10*1000*1000ULL) {
447 else if (len < 999999ULL * (1ULL<<20)) {
451 else if (len < 999999ULL * (1ULL<<30)) {
455 else if (len < 999999ULL * (1ULL<<40)) {
463 ML_(am_sprintf)(buf, fmt, len);
467 /* Show full details of an NSegment */
469 static void __attribute__ ((unused))
470 show_nsegment_full ( Int logLevel, Int segNo, NSegment* seg )
473 HChar* name = "(none)";
475 if (seg->fnIdx >= 0 && seg->fnIdx < segnames_used
476 && segnames[seg->fnIdx].inUse
477 && segnames[seg->fnIdx].fname[0] != 0)
478 name = segnames[seg->fnIdx].fname;
480 show_len_concisely(len_buf, seg->start, seg->end);
484 "%3d: %s %010llx-%010llx %s %c%c%c%c%c %s "
485 "d=0x%03llx i=%-7lld o=%-7lld (%d) m=%d %s\n",
486 segNo, show_SegKind(seg->kind),
487 (ULong)seg->start, (ULong)seg->end, len_buf,
488 seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
489 seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
490 seg->isCH ? 'H' : '-',
491 show_ShrinkMode(seg->smode),
492 seg->dev, seg->ino, seg->offset, seg->fnIdx,
498 /* Show an NSegment in a user-friendly-ish way. */
500 static void show_nsegment ( Int logLevel, Int segNo, NSegment* seg )
503 show_len_concisely(len_buf, seg->start, seg->end);
510 "%3d: %s %010llx-%010llx %s\n",
511 segNo, show_SegKind(seg->kind),
512 (ULong)seg->start, (ULong)seg->end, len_buf
516 case SkAnonC: case SkAnonV: case SkShmC:
519 "%3d: %s %010llx-%010llx %s %c%c%c%c%c\n",
520 segNo, show_SegKind(seg->kind),
521 (ULong)seg->start, (ULong)seg->end, len_buf,
522 seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
523 seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
524 seg->isCH ? 'H' : '-'
528 case SkFileC: case SkFileV:
531 "%3d: %s %010llx-%010llx %s %c%c%c%c%c d=0x%03llx "
532 "i=%-7lld o=%-7lld (%d)\n",
533 segNo, show_SegKind(seg->kind),
534 (ULong)seg->start, (ULong)seg->end, len_buf,
535 seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
536 seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
537 seg->isCH ? 'H' : '-',
538 seg->dev, seg->ino, seg->offset, seg->fnIdx
545 "%3d: %s %010llx-%010llx %s %c%c%c%c%c %s\n",
546 segNo, show_SegKind(seg->kind),
547 (ULong)seg->start, (ULong)seg->end, len_buf,
548 seg->hasR ? 'r' : '-', seg->hasW ? 'w' : '-',
549 seg->hasX ? 'x' : '-', seg->hasT ? 'T' : '-',
550 seg->isCH ? 'H' : '-',
551 show_ShrinkMode(seg->smode)
558 "%3d: ???? UNKNOWN SEGMENT KIND\n",
565 /* Print out the segment array (debugging only!). */
566 void VG_(am_show_nsegments) ( Int logLevel, HChar* who )
569 VG_(debugLog)(logLevel, "aspacem",
570 "<<< SHOW_SEGMENTS: %s (%d segments, %d segnames)\n",
571 who, nsegments_used, segnames_used);
572 for (i = 0; i < segnames_used; i++) {
573 if (!segnames[i].inUse)
575 VG_(debugLog)(logLevel, "aspacem",
576 "(%2d) %s\n", i, segnames[i].fname);
578 for (i = 0; i < nsegments_used; i++)
579 show_nsegment( logLevel, i, &nsegments[i] );
580 VG_(debugLog)(logLevel, "aspacem",
585 /* Get the filename corresponding to this segment, if known and if it
586 has one. The returned name's storage cannot be assumed to be
587 persistent, so the caller should immediately copy the name
589 HChar* VG_(am_get_filename)( NSegment const * seg )
594 if (i < 0 || i >= segnames_used || !segnames[i].inUse)
597 return &segnames[i].fname[0];
600 /* Collect up the start addresses of all non-free, non-resvn segments.
601 The interface is a bit strange in order to avoid potential
602 segment-creation races caused by dynamic allocation of the result
605 The function first computes how many entries in the result
606 buffer *starts will be needed. If this number <= nStarts,
607 they are placed in starts[0..], and the number is returned.
608 If nStarts is not large enough, nothing is written to
609 starts[0..], and the negation of the size is returned.
611 Correct use of this function may mean calling it multiple times in
612 order to establish a suitably-sized buffer. */
614 Int VG_(am_get_segment_starts)( Addr* starts, Int nStarts )
618 /* don't pass dumbass arguments */
619 aspacem_assert(nStarts >= 0);
622 for (i = 0; i < nsegments_used; i++) {
623 if (nsegments[i].kind == SkFree || nsegments[i].kind == SkResvn)
628 if (nSegs > nStarts) {
629 /* The buffer isn't big enough. Tell the caller how big it needs
634 /* There's enough space. So write into the result buffer. */
635 aspacem_assert(nSegs <= nStarts);
638 for (i = 0; i < nsegments_used; i++) {
639 if (nsegments[i].kind == SkFree || nsegments[i].kind == SkResvn)
641 starts[j] = nsegments[i].start;
645 aspacem_assert(j == nSegs); /* this should not fail */
650 /*-----------------------------------------------------------------*/
652 /*--- Sanity checking and preening of the segment array. ---*/
654 /*-----------------------------------------------------------------*/
656 /* Check representational invariants for NSegments. */
658 static Bool sane_NSegment ( NSegment* s )
660 if (s == NULL) return False;
662 /* No zero sized segments and no wraparounds. */
663 if (s->start >= s->end) return False;
665 /* .mark is used for admin purposes only. */
666 if (s->mark) return False;
668 /* require page alignment */
669 if (!VG_IS_PAGE_ALIGNED(s->start)) return False;
670 if (!VG_IS_PAGE_ALIGNED(s->end+1)) return False;
677 && s->dev == 0 && s->ino == 0 && s->offset == 0 && s->fnIdx == -1
678 && !s->hasR && !s->hasW && !s->hasX && !s->hasT
681 case SkAnonC: case SkAnonV: case SkShmC:
684 && s->dev == 0 && s->ino == 0 && s->offset == 0 && s->fnIdx == -1
685 && (s->kind==SkAnonC ? True : !s->isCH);
687 case SkFileC: case SkFileV:
690 && (s->fnIdx == -1 ||
691 (s->fnIdx >= 0 && s->fnIdx < segnames_used
692 && segnames[s->fnIdx].inUse))
697 s->dev == 0 && s->ino == 0 && s->offset == 0 && s->fnIdx == -1
698 && !s->hasR && !s->hasW && !s->hasX && !s->hasT
707 /* Try merging s2 into s1, if possible. If successful, s1 is
708 modified, and True is returned. Otherwise s1 is unchanged and
709 False is returned. */
711 static Bool maybe_merge_nsegments ( NSegment* s1, NSegment* s2 )
713 if (s1->kind != s2->kind)
716 if (s1->end+1 != s2->start)
719 /* reject cases which would cause wraparound */
720 if (s1->start > s2->end)
729 case SkAnonC: case SkAnonV:
730 if (s1->hasR == s2->hasR && s1->hasW == s2->hasW
731 && s1->hasX == s2->hasX && s1->isCH == s2->isCH) {
733 s1->hasT |= s2->hasT;
738 case SkFileC: case SkFileV:
739 if (s1->hasR == s2->hasR
740 && s1->hasW == s2->hasW && s1->hasX == s2->hasX
741 && s1->dev == s2->dev && s1->ino == s2->ino
742 && s2->offset == s1->offset
743 + ((ULong)s2->start) - ((ULong)s1->start) ) {
745 s1->hasT |= s2->hasT;
754 if (s1->smode == SmFixed && s2->smode == SmFixed) {
768 /* Sanity-check and canonicalise the segment array (merge mergable
769 segments). Returns True if any segments were merged. */
771 static Bool preen_nsegments ( void )
773 Int i, j, r, w, nsegments_used_old = nsegments_used;
775 /* Pass 1: check the segment array covers the entire address space
776 exactly once, and also that each segment is sane. */
777 aspacem_assert(nsegments_used > 0);
778 aspacem_assert(nsegments[0].start == Addr_MIN);
779 aspacem_assert(nsegments[nsegments_used-1].end == Addr_MAX);
781 aspacem_assert(sane_NSegment(&nsegments[0]));
782 for (i = 1; i < nsegments_used; i++) {
783 aspacem_assert(sane_NSegment(&nsegments[i]));
784 aspacem_assert(nsegments[i-1].end+1 == nsegments[i].start);
787 /* Pass 2: merge as much as possible, using
788 maybe_merge_segments. */
790 for (r = 1; r < nsegments_used; r++) {
791 if (maybe_merge_nsegments(&nsegments[w], &nsegments[r])) {
796 nsegments[w] = nsegments[r];
800 aspacem_assert(w > 0 && w <= nsegments_used);
803 /* Pass 3: free up unused string table slots */
804 /* clear mark bits */
805 for (i = 0; i < segnames_used; i++)
806 segnames[i].mark = False;
808 for (i = 0; i < nsegments_used; i++) {
809 j = nsegments[i].fnIdx;
810 aspacem_assert(j >= -1 && j < segnames_used);
812 aspacem_assert(segnames[j].inUse);
813 segnames[j].mark = True;
817 for (i = 0; i < segnames_used; i++) {
818 if (segnames[i].mark == False) {
819 segnames[i].inUse = False;
820 segnames[i].fname[0] = 0;
824 return nsegments_used != nsegments_used_old;
828 /* Check the segment array corresponds with the kernel's view of
829 memory layout. sync_check_ok returns True if no anomalies were
830 found, else False. In the latter case the mismatching segments are
833 The general idea is: we get the kernel to show us all its segments
834 and also the gaps in between. For each such interval, try and find
835 a sequence of appropriate intervals in our segment array which
836 cover or more than cover the kernel's interval, and which all have
837 suitable kinds/permissions etc.
839 Although any specific kernel interval is not matched exactly to a
840 valgrind interval or sequence thereof, eventually any disagreement
841 on mapping boundaries will be detected. This is because, if for
842 example valgrind's intervals cover a greater range than the current
843 kernel interval, it must be the case that a neighbouring free-space
844 interval belonging to valgrind cannot cover the neighbouring
845 free-space interval belonging to the kernel. So the disagreement
848 In other words, we examine each kernel interval in turn, and check
849 we do not disagree over the range of that interval. Because all of
850 the address space is examined, any disagreements must eventually be
854 static Bool sync_check_ok = False;
856 static void sync_check_mapping_callback ( Addr addr, SizeT len, UInt prot,
857 ULong dev, ULong ino, Off64T offset,
858 const UChar* filename )
863 /* If a problem has already been detected, don't continue comparing
864 segments, so as to avoid flooding the output with error
866 #if !defined(VGO_darwin)
874 /* The kernel should not give us wraparounds. */
875 aspacem_assert(addr <= addr + len - 1);
877 iLo = find_nsegment_idx( addr );
878 iHi = find_nsegment_idx( addr + len - 1 );
880 /* These 5 should be guaranteed by find_nsegment_idx. */
881 aspacem_assert(0 <= iLo && iLo < nsegments_used);
882 aspacem_assert(0 <= iHi && iHi < nsegments_used);
883 aspacem_assert(iLo <= iHi);
884 aspacem_assert(nsegments[iLo].start <= addr );
885 aspacem_assert(nsegments[iHi].end >= addr + len - 1 );
887 /* x86 doesn't differentiate 'x' and 'r' (at least, all except the
888 most recent NX-bit enabled CPUs) and so recent kernels attempt
889 to provide execute protection by placing all executable mappings
890 low down in the address space and then reducing the size of the
891 code segment to prevent code at higher addresses being executed.
893 These kernels report which mappings are really executable in
894 the /proc/self/maps output rather than mirroring what was asked
895 for when each mapping was created. In order to cope with this we
896 have a sloppyXcheck mode which we enable on x86 - in this mode we
897 allow the kernel to report execute permission when we weren't
898 expecting it but not vice versa. */
899 # if defined(VGA_x86)
902 sloppyXcheck = False;
905 /* NSegments iLo .. iHi inclusive should agree with the presented
907 for (i = iLo; i <= iHi; i++) {
909 Bool same, cmp_offsets, cmp_devino;
912 /* compare the kernel's offering against ours. */
913 same = nsegments[i].kind == SkAnonC
914 || nsegments[i].kind == SkAnonV
915 || nsegments[i].kind == SkFileC
916 || nsegments[i].kind == SkFileV
917 || nsegments[i].kind == SkShmC;
920 if (nsegments[i].hasR) seg_prot |= VKI_PROT_READ;
921 if (nsegments[i].hasW) seg_prot |= VKI_PROT_WRITE;
922 if (nsegments[i].hasX) seg_prot |= VKI_PROT_EXEC;
925 = nsegments[i].kind == SkFileC || nsegments[i].kind == SkFileV;
928 = nsegments[i].dev != 0 || nsegments[i].ino != 0;
930 /* Consider other reasons to not compare dev/inode */
931 #if defined(VGO_linux)
932 /* bproc does some godawful hack on /dev/zero at process
933 migration, which changes the name of it, and its dev & ino */
934 if (filename && 0==VG_(strcmp)(filename, "/dev/zero (deleted)"))
937 /* hack apparently needed on MontaVista Linux */
938 if (filename && VG_(strstr)(filename, "/.lib-ro/"))
942 #if defined(VGO_darwin)
943 // GrP fixme kernel info doesn't have dev/inode
946 // GrP fixme V and kernel don't agree on offsets
950 /* If we are doing sloppy execute permission checks then we
951 allow segment to have X permission when we weren't expecting
952 it (but not vice versa) so if the kernel reported execute
953 permission then pretend that this segment has it regardless
954 of what we were expecting. */
955 if (sloppyXcheck && (prot & VKI_PROT_EXEC) != 0) {
956 seg_prot |= VKI_PROT_EXEC;
962 ? (nsegments[i].dev == dev && nsegments[i].ino == ino)
965 ? nsegments[i].start-nsegments[i].offset == addr-offset
969 Addr end = start + len - 1;
971 show_len_concisely(len_buf, start, end);
973 sync_check_ok = False;
977 "segment mismatch: V's seg 1st, kernel's 2nd:\n");
978 show_nsegment_full( 0, i, &nsegments[i] );
979 VG_(debugLog)(0,"aspacem",
980 "...: .... %010llx-%010llx %s %c%c%c.. ....... "
981 "d=0x%03llx i=%-7lld o=%-7lld (.) m=. %s\n",
982 (ULong)start, (ULong)end, len_buf,
983 prot & VKI_PROT_READ ? 'r' : '-',
984 prot & VKI_PROT_WRITE ? 'w' : '-',
985 prot & VKI_PROT_EXEC ? 'x' : '-',
986 dev, ino, offset, filename ? (HChar*)filename : "(none)" );
992 /* Looks harmless. Keep going. */
996 static void sync_check_gap_callback ( Addr addr, SizeT len )
1000 /* If a problem has already been detected, don't continue comparing
1001 segments, so as to avoid flooding the output with error
1003 #if !defined(VGO_darwin)
1011 /* The kernel should not give us wraparounds. */
1012 aspacem_assert(addr <= addr + len - 1);
1014 iLo = find_nsegment_idx( addr );
1015 iHi = find_nsegment_idx( addr + len - 1 );
1017 /* These 5 should be guaranteed by find_nsegment_idx. */
1018 aspacem_assert(0 <= iLo && iLo < nsegments_used);
1019 aspacem_assert(0 <= iHi && iHi < nsegments_used);
1020 aspacem_assert(iLo <= iHi);
1021 aspacem_assert(nsegments[iLo].start <= addr );
1022 aspacem_assert(nsegments[iHi].end >= addr + len - 1 );
1024 /* NSegments iLo .. iHi inclusive should agree with the presented
1026 for (i = iLo; i <= iHi; i++) {
1030 /* compare the kernel's offering against ours. */
1031 same = nsegments[i].kind == SkFree
1032 || nsegments[i].kind == SkResvn;
1036 Addr end = start + len - 1;
1038 show_len_concisely(len_buf, start, end);
1040 sync_check_ok = False;
1044 "segment mismatch: V's gap 1st, kernel's 2nd:\n");
1045 show_nsegment_full( 0, i, &nsegments[i] );
1046 VG_(debugLog)(0,"aspacem",
1047 " : .... %010llx-%010llx %s",
1048 (ULong)start, (ULong)end, len_buf);
1053 /* Looks harmless. Keep going. */
1058 /* Sanity check: check that Valgrind and the kernel agree on the
1059 address space layout. Prints offending segments and call point if
1060 a discrepancy is detected, but does not abort the system. Returned
1061 Bool is False if a discrepancy was found. */
1063 Bool VG_(am_do_sync_check) ( const HChar* fn,
1064 const HChar* file, Int line )
1066 sync_check_ok = True;
1068 VG_(debugLog)(0,"aspacem", "do_sync_check %s:%d\n", file,line);
1069 parse_procselfmaps( sync_check_mapping_callback,
1070 sync_check_gap_callback );
1071 if (!sync_check_ok) {
1072 VG_(debugLog)(0,"aspacem",
1073 "sync check at %s:%d (%s): FAILED\n",
1075 VG_(debugLog)(0,"aspacem", "\n");
1080 VG_(am_show_nsegments)(0,"post syncheck failure");
1081 VG_(sprintf)(buf, "/bin/cat /proc/%d/maps", VG_(getpid)());
1087 return sync_check_ok;
1090 /* Hook to allow sanity checks to be done from aspacemgr-common.c. */
1091 void ML_(am_do_sanity_check)( void )
1097 /*-----------------------------------------------------------------*/
1099 /*--- Low level access / modification of the segment array. ---*/
1101 /*-----------------------------------------------------------------*/
1103 /* Binary search the interval array for a given address. Since the
1104 array covers the entire address space the search cannot fail. The
1105 _WRK function does the real work. Its caller (just below) caches
1106 the results thereof, to save time. With N_CACHE of 63 we get a hit
1107 rate exceeding 90% when running OpenOffice.
1109 Re ">> 12", it doesn't matter that the page size of some targets
1110 might be different from 12. Really "(a >> 12) % N_CACHE" is merely
1111 a hash function, and the actual cache entry is always validated
1112 correctly against the selected cache entry before use.
1114 /* Don't call find_nsegment_idx_WRK; use find_nsegment_idx instead. */
1115 __attribute__((noinline))
1116 static Int find_nsegment_idx_WRK ( Addr a )
1118 Addr a_mid_lo, a_mid_hi;
1121 hi = nsegments_used-1;
1123 /* current unsearched space is from lo to hi, inclusive. */
1125 /* Not found. This can't happen. */
1126 ML_(am_barf)("find_nsegment_idx: not found");
1128 mid = (lo + hi) / 2;
1129 a_mid_lo = nsegments[mid].start;
1130 a_mid_hi = nsegments[mid].end;
1132 if (a < a_mid_lo) { hi = mid-1; continue; }
1133 if (a > a_mid_hi) { lo = mid+1; continue; }
1134 aspacem_assert(a >= a_mid_lo && a <= a_mid_hi);
1135 aspacem_assert(0 <= mid && mid < nsegments_used);
1140 inline static Int find_nsegment_idx ( Addr a )
1143 static Addr cache_pageno[N_CACHE];
1144 static Int cache_segidx[N_CACHE];
1145 static Bool cache_inited = False;
1147 static UWord n_q = 0;
1148 static UWord n_m = 0;
1152 if (LIKELY(cache_inited)) {
1155 for (ix = 0; ix < N_CACHE; ix++) {
1156 cache_pageno[ix] = 0;
1157 cache_segidx[ix] = -1;
1159 cache_inited = True;
1162 ix = (a >> 12) % N_CACHE;
1165 if (0 && 0 == (n_q & 0xFFFF))
1166 VG_(debugLog)(0,"xxx","find_nsegment_idx: %lu %lu\n", n_q, n_m);
1168 if ((a >> 12) == cache_pageno[ix]
1169 && cache_segidx[ix] >= 0
1170 && cache_segidx[ix] < nsegments_used
1171 && nsegments[cache_segidx[ix]].start <= a
1172 && a <= nsegments[cache_segidx[ix]].end) {
1174 /* aspacem_assert( cache_segidx[ix] == find_nsegment_idx_WRK(a) ); */
1175 return cache_segidx[ix];
1179 cache_segidx[ix] = find_nsegment_idx_WRK(a);
1180 cache_pageno[ix] = a >> 12;
1181 return cache_segidx[ix];
1187 /* Finds the segment containing 'a'. Only returns file/anon/resvn
1188 segments. This returns a 'NSegment const *' - a pointer to
1190 NSegment const * VG_(am_find_nsegment) ( Addr a )
1192 Int i = find_nsegment_idx(a);
1193 aspacem_assert(i >= 0 && i < nsegments_used);
1194 aspacem_assert(nsegments[i].start <= a);
1195 aspacem_assert(a <= nsegments[i].end);
1196 if (nsegments[i].kind == SkFree)
1199 return &nsegments[i];
1203 /* Given a pointer to a seg, tries to figure out which one it is in
1204 nsegments[..]. Very paranoid. */
1205 static Int segAddr_to_index ( NSegment* seg )
1208 if (seg < &nsegments[0] || seg >= &nsegments[nsegments_used])
1210 i = ((UChar*)seg - (UChar*)(&nsegments[0])) / sizeof(NSegment);
1211 if (i < 0 || i >= nsegments_used)
1213 if (seg == &nsegments[i])
1219 /* Find the next segment along from 'here', if it is a file/anon/resvn
1221 NSegment const * VG_(am_next_nsegment) ( NSegment* here, Bool fwds )
1223 Int i = segAddr_to_index(here);
1224 if (i < 0 || i >= nsegments_used)
1228 if (i >= nsegments_used)
1235 switch (nsegments[i].kind) {
1236 case SkFileC: case SkFileV: case SkShmC:
1237 case SkAnonC: case SkAnonV: case SkResvn:
1238 return &nsegments[i];
1246 /* Trivial fn: return the total amount of space in anonymous mappings,
1247 both for V and the client. Is used for printing stats in
1248 out-of-memory messages. */
1249 ULong VG_(am_get_anonsize_total)( void )
1253 for (i = 0; i < nsegments_used; i++) {
1254 if (nsegments[i].kind == SkAnonC || nsegments[i].kind == SkAnonV) {
1255 total += (ULong)nsegments[i].end
1256 - (ULong)nsegments[i].start + 1ULL;
1263 /* Test if a piece of memory is addressable by the client with at
1264 least the "prot" protection permissions by examining the underlying
1265 segments. If freeOk is True then SkFree areas are also allowed.
1268 Bool is_valid_for_client( Addr start, SizeT len, UInt prot, Bool freeOk )
1271 Bool needR, needW, needX;
1274 return True; /* somewhat dubious case */
1275 if (start + len < start)
1276 return False; /* reject wraparounds */
1278 needR = toBool(prot & VKI_PROT_READ);
1279 needW = toBool(prot & VKI_PROT_WRITE);
1280 needX = toBool(prot & VKI_PROT_EXEC);
1282 iLo = find_nsegment_idx(start);
1283 aspacem_assert(start >= nsegments[iLo].start);
1285 if (start+len-1 <= nsegments[iLo].end) {
1286 /* This is a speedup hack which avoids calling find_nsegment_idx
1287 a second time when possible. It is always correct to just
1288 use the "else" clause below, but is_valid_for_client is
1289 called a lot by the leak checker, so avoiding pointless calls
1290 to find_nsegment_idx, which can be expensive, is helpful. */
1293 iHi = find_nsegment_idx(start + len - 1);
1296 for (i = iLo; i <= iHi; i++) {
1297 if ( (nsegments[i].kind == SkFileC
1298 || nsegments[i].kind == SkAnonC
1299 || nsegments[i].kind == SkShmC
1300 || (nsegments[i].kind == SkFree && freeOk)
1301 || (nsegments[i].kind == SkResvn && freeOk))
1302 && (needR ? nsegments[i].hasR : True)
1303 && (needW ? nsegments[i].hasW : True)
1304 && (needX ? nsegments[i].hasX : True) ) {
1313 /* Test if a piece of memory is addressable by the client with at
1314 least the "prot" protection permissions by examining the underlying
1316 Bool VG_(am_is_valid_for_client)( Addr start, SizeT len,
1319 return is_valid_for_client( start, len, prot, False/*free not OK*/ );
1322 /* Variant of VG_(am_is_valid_for_client) which allows free areas to
1323 be consider part of the client's addressable space. It also
1324 considers reservations to be allowable, since from the client's
1325 point of view they don't exist. */
1326 Bool VG_(am_is_valid_for_client_or_free_or_resvn)
1327 ( Addr start, SizeT len, UInt prot )
1329 return is_valid_for_client( start, len, prot, True/*free is OK*/ );
1333 /* Test if a piece of memory is addressable by valgrind with at least
1334 PROT_NONE protection permissions by examining the underlying
1336 static Bool is_valid_for_valgrind( Addr start, SizeT len )
1341 return True; /* somewhat dubious case */
1342 if (start + len < start)
1343 return False; /* reject wraparounds */
1345 iLo = find_nsegment_idx(start);
1346 iHi = find_nsegment_idx(start + len - 1);
1347 for (i = iLo; i <= iHi; i++) {
1348 if (nsegments[i].kind == SkFileV || nsegments[i].kind == SkAnonV) {
1358 /* Returns True if any part of the address range is marked as having
1359 translations made from it. This is used to determine when to
1360 discard code, so if in doubt return True. */
1362 static Bool any_Ts_in_range ( Addr start, SizeT len )
1365 aspacem_assert(len > 0);
1366 aspacem_assert(start + len > start);
1367 iLo = find_nsegment_idx(start);
1368 iHi = find_nsegment_idx(start + len - 1);
1369 for (i = iLo; i <= iHi; i++) {
1370 if (nsegments[i].hasT)
1377 /*-----------------------------------------------------------------*/
1379 /*--- Modifying the segment array, and constructing segments. ---*/
1381 /*-----------------------------------------------------------------*/
1383 /* Split the segment containing 'a' into two, so that 'a' is
1384 guaranteed to be the start of a new segment. If 'a' is already the
1385 start of a segment, do nothing. */
1387 static void split_nsegment_at ( Addr a )
1391 aspacem_assert(a > 0);
1392 aspacem_assert(VG_IS_PAGE_ALIGNED(a));
1394 i = find_nsegment_idx(a);
1395 aspacem_assert(i >= 0 && i < nsegments_used);
1397 if (nsegments[i].start == a)
1398 /* 'a' is already the start point of a segment, so nothing to be
1402 /* else we have to slide the segments upwards to make a hole */
1403 if (nsegments_used >= VG_N_SEGMENTS)
1404 ML_(am_barf_toolow)("VG_N_SEGMENTS");
1405 for (j = nsegments_used-1; j > i; j--)
1406 nsegments[j+1] = nsegments[j];
1409 nsegments[i+1] = nsegments[i];
1410 nsegments[i+1].start = a;
1411 nsegments[i].end = a-1;
1413 if (nsegments[i].kind == SkFileV || nsegments[i].kind == SkFileC)
1414 nsegments[i+1].offset
1415 += ((ULong)nsegments[i+1].start) - ((ULong)nsegments[i].start);
1417 aspacem_assert(sane_NSegment(&nsegments[i]));
1418 aspacem_assert(sane_NSegment(&nsegments[i+1]));
1422 /* Do the minimum amount of segment splitting necessary to ensure that
1423 sLo is the first address denoted by some segment and sHi is the
1424 highest address denoted by some other segment. Returns the indices
1425 of the lowest and highest segments in the range. */
1428 void split_nsegments_lo_and_hi ( Addr sLo, Addr sHi,
1432 aspacem_assert(sLo < sHi);
1433 aspacem_assert(VG_IS_PAGE_ALIGNED(sLo));
1434 aspacem_assert(VG_IS_PAGE_ALIGNED(sHi+1));
1437 split_nsegment_at(sLo);
1439 split_nsegment_at(sHi+1);
1441 *iLo = find_nsegment_idx(sLo);
1442 *iHi = find_nsegment_idx(sHi);
1443 aspacem_assert(0 <= *iLo && *iLo < nsegments_used);
1444 aspacem_assert(0 <= *iHi && *iHi < nsegments_used);
1445 aspacem_assert(*iLo <= *iHi);
1446 aspacem_assert(nsegments[*iLo].start == sLo);
1447 aspacem_assert(nsegments[*iHi].end == sHi);
1448 /* Not that I'm overly paranoid or anything, definitely not :-) */
1452 /* Add SEG to the collection, deleting/truncating any it overlaps.
1453 This deals with all the tricky cases of splitting up segments as
1456 static void add_segment ( NSegment* seg )
1458 Int i, iLo, iHi, delta;
1459 Bool segment_is_sane;
1461 Addr sStart = seg->start;
1462 Addr sEnd = seg->end;
1464 aspacem_assert(sStart <= sEnd);
1465 aspacem_assert(VG_IS_PAGE_ALIGNED(sStart));
1466 aspacem_assert(VG_IS_PAGE_ALIGNED(sEnd+1));
1468 segment_is_sane = sane_NSegment(seg);
1469 if (!segment_is_sane) show_nsegment_full(0,-1,seg);
1470 aspacem_assert(segment_is_sane);
1472 split_nsegments_lo_and_hi( sStart, sEnd, &iLo, &iHi );
1474 /* Now iLo .. iHi inclusive is the range of segment indices which
1475 seg will replace. If we're replacing more than one segment,
1476 slide those above the range down to fill the hole. */
1478 aspacem_assert(delta >= 0);
1480 for (i = iLo; i < nsegments_used-delta; i++)
1481 nsegments[i] = nsegments[i+delta];
1482 nsegments_used -= delta;
1485 nsegments[iLo] = *seg;
1487 (void)preen_nsegments();
1488 if (0) VG_(am_show_nsegments)(0,"AFTER preen (add_segment)");
1492 /* Clear out an NSegment record. */
1494 static void init_nsegment ( /*OUT*/NSegment* seg )
1499 seg->smode = SmFixed;
1505 seg->hasR = seg->hasW = seg->hasX = seg->hasT = seg->isCH = False;
1509 /* Make an NSegment which holds a reservation. */
1511 static void init_resvn ( /*OUT*/NSegment* seg, Addr start, Addr end )
1513 aspacem_assert(start < end);
1514 aspacem_assert(VG_IS_PAGE_ALIGNED(start));
1515 aspacem_assert(VG_IS_PAGE_ALIGNED(end+1));
1517 seg->kind = SkResvn;
1523 /*-----------------------------------------------------------------*/
1525 /*--- Startup, including reading /proc/self/maps. ---*/
1527 /*-----------------------------------------------------------------*/
1529 static void read_maps_callback ( Addr addr, SizeT len, UInt prot,
1530 ULong dev, ULong ino, Off64T offset,
1531 const UChar* filename )
1534 init_nsegment( &seg );
1536 seg.end = addr+len-1;
1539 seg.offset = offset;
1540 seg.hasR = toBool(prot & VKI_PROT_READ);
1541 seg.hasW = toBool(prot & VKI_PROT_WRITE);
1542 seg.hasX = toBool(prot & VKI_PROT_EXEC);
1545 /* Don't use the presence of a filename to decide if a segment in
1546 the initial /proc/self/maps to decide if the segment is an AnonV
1547 or FileV segment as some systems don't report the filename. Use
1548 the device and inode numbers instead. Fixes bug #124528. */
1550 if (dev != 0 && ino != 0)
1553 # if defined(VGO_darwin)
1554 // GrP fixme no dev/ino on darwin
1557 # endif // defined(VGO_darwin)
1559 # if defined(VGP_arm_linux)
1560 /* The standard handling of entries read from /proc/self/maps will
1561 cause the faked up commpage segment to have type SkAnonV, which
1562 is a problem because it contains code we want the client to
1563 execute, and so later m_translate will segfault the client when
1564 it tries to go in there. Hence change the ownership of it here
1565 to the client (SkAnonC). The least-worst kludge I could think
1567 if (addr == ARM_LINUX_FAKE_COMMPAGE_START
1568 && addr + len == ARM_LINUX_FAKE_COMMPAGE_END1
1569 && seg.kind == SkAnonV)
1571 # endif // defined(VGP_arm_linux)
1574 seg.fnIdx = allocate_segname( filename );
1576 if (0) show_nsegment( 2,0, &seg );
1577 add_segment( &seg );
1580 /* Initialise the address space manager, setting up the initial
1581 segment list, and reading /proc/self/maps into it. This must
1582 be called before any other function.
1584 Takes a pointer to the SP at the time V gained control. This is
1585 taken to be the highest usable address (more or less). Based on
1586 that (and general consultation of tea leaves, etc) return a
1587 suggested end address for the client's stack. */
1589 Addr VG_(am_startup) ( Addr sp_at_startup )
1592 Addr suggested_clstack_top;
1594 aspacem_assert(sizeof(Word) == sizeof(void*));
1595 aspacem_assert(sizeof(Addr) == sizeof(void*));
1596 aspacem_assert(sizeof(SizeT) == sizeof(void*));
1597 aspacem_assert(sizeof(SSizeT) == sizeof(void*));
1599 /* Check that we can store the largest imaginable dev, ino and
1600 offset numbers in an NSegment. */
1601 aspacem_assert(sizeof(seg.dev) == 8);
1602 aspacem_assert(sizeof(seg.ino) == 8);
1603 aspacem_assert(sizeof(seg.offset) == 8);
1604 aspacem_assert(sizeof(seg.mode) == 4);
1606 /* Add a single interval covering the entire address space. */
1607 init_nsegment(&seg);
1609 seg.start = Addr_MIN;
1614 #if defined(VGO_darwin)
1616 # if VG_WORDSIZE == 4
1617 aspacem_minAddr = (Addr) 0x00001000;
1618 aspacem_maxAddr = (Addr) 0xffffffff;
1620 aspacem_cStart = aspacem_minAddr;
1621 aspacem_vStart = 0xf0000000; // 0xc0000000..0xf0000000 available
1623 aspacem_minAddr = (Addr) 0x100000000; // 4GB page zero
1624 aspacem_maxAddr = (Addr) 0x7fffffffffff;
1626 aspacem_cStart = aspacem_minAddr;
1627 aspacem_vStart = 0x700000000000; // 0x7000:00000000..0x7fff:5c000000 avail
1628 // 0x7fff:5c000000..0x7fff:ffe00000? is stack, dyld, shared cache
1631 suggested_clstack_top = -1; // ignored; Mach-O specifies its stack
1635 /* Establish address limits and block out unusable parts
1638 VG_(debugLog)(2, "aspacem",
1639 " sp_at_startup = 0x%010llx (supplied)\n",
1640 (ULong)sp_at_startup );
1642 aspacem_minAddr = (Addr) 0x04000000; // 64M
1644 # if VG_WORDSIZE == 8
1645 aspacem_maxAddr = (Addr)0x800000000 - 1; // 32G
1646 # ifdef ENABLE_INNER
1647 { Addr cse = VG_PGROUNDDN( sp_at_startup ) - 1;
1648 if (aspacem_maxAddr > cse)
1649 aspacem_maxAddr = cse;
1653 aspacem_maxAddr = VG_PGROUNDDN( sp_at_startup ) - 1;
1656 aspacem_cStart = aspacem_minAddr; // 64M
1657 aspacem_vStart = VG_PGROUNDUP((aspacem_minAddr + aspacem_maxAddr + 1) / 2);
1658 # ifdef ENABLE_INNER
1659 aspacem_vStart -= 0x10000000; // 256M
1662 suggested_clstack_top = aspacem_maxAddr - 16*1024*1024ULL
1667 aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_minAddr));
1668 aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_maxAddr + 1));
1669 aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_cStart));
1670 aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_vStart));
1671 aspacem_assert(VG_IS_PAGE_ALIGNED(suggested_clstack_top + 1));
1673 VG_(debugLog)(2, "aspacem",
1674 " minAddr = 0x%010llx (computed)\n",
1675 (ULong)aspacem_minAddr);
1676 VG_(debugLog)(2, "aspacem",
1677 " maxAddr = 0x%010llx (computed)\n",
1678 (ULong)aspacem_maxAddr);
1679 VG_(debugLog)(2, "aspacem",
1680 " cStart = 0x%010llx (computed)\n",
1681 (ULong)aspacem_cStart);
1682 VG_(debugLog)(2, "aspacem",
1683 " vStart = 0x%010llx (computed)\n",
1684 (ULong)aspacem_vStart);
1685 VG_(debugLog)(2, "aspacem",
1686 "suggested_clstack_top = 0x%010llx (computed)\n",
1687 (ULong)suggested_clstack_top);
1689 if (aspacem_cStart > Addr_MIN) {
1690 init_resvn(&seg, Addr_MIN, aspacem_cStart-1);
1693 if (aspacem_maxAddr < Addr_MAX) {
1694 init_resvn(&seg, aspacem_maxAddr+1, Addr_MAX);
1698 /* Create a 1-page reservation at the notional initial
1699 client/valgrind boundary. This isn't strictly necessary, but
1700 because the advisor does first-fit and starts searches for
1701 valgrind allocations at the boundary, this is kind of necessary
1702 in order to get it to start allocating in the right place. */
1703 init_resvn(&seg, aspacem_vStart, aspacem_vStart + VKI_PAGE_SIZE - 1);
1706 VG_(am_show_nsegments)(2, "Initial layout");
1708 VG_(debugLog)(2, "aspacem", "Reading /proc/self/maps\n");
1709 parse_procselfmaps( read_maps_callback, NULL );
1710 /* NB: on arm-linux, parse_procselfmaps automagically kludges up
1711 (iow, hands to its callbacks) a description of the ARM Commpage,
1712 since that's not listed in /proc/self/maps (kernel bug IMO). We
1713 have to fake up its existence in parse_procselfmaps and not
1714 merely add it here as an extra segment, because doing the latter
1715 causes sync checking to fail: we see we have an extra segment in
1716 the segments array, which isn't listed in /proc/self/maps.
1717 Hence we must make it appear that /proc/self/maps contained this
1718 segment all along. Sigh. */
1720 VG_(am_show_nsegments)(2, "With contents of /proc/self/maps");
1723 return suggested_clstack_top;
1727 /*-----------------------------------------------------------------*/
1729 /*--- The core query-notify mechanism. ---*/
1731 /*-----------------------------------------------------------------*/
1733 /* Query aspacem to ask where a mapping should go. */
1735 Addr VG_(am_get_advisory) ( MapRequest* req,
1739 /* This function implements allocation policy.
1741 The nature of the allocation request is determined by req, which
1742 specifies the start and length of the request and indicates
1743 whether the start address is mandatory, a hint, or irrelevant,
1744 and by forClient, which says whether this is for the client or
1747 Return values: the request can be vetoed (*ok is set to False),
1748 in which case the caller should not attempt to proceed with
1749 making the mapping. Otherwise, *ok is set to True, the caller
1750 may proceed, and the preferred address at which the mapping
1751 should happen is returned.
1753 Note that this is an advisory system only: the kernel can in
1754 fact do whatever it likes as far as placement goes, and we have
1755 no absolute control over it.
1757 Allocations will never be granted in a reserved area.
1759 The Default Policy is:
1761 Search the address space for two free intervals: one of them
1762 big enough to contain the request without regard to the
1763 specified address (viz, as if it was a floating request) and
1764 the other being able to contain the request at the specified
1765 address (viz, as if were a fixed request). Then, depending on
1766 the outcome of the search and the kind of request made, decide
1767 whether the request is allowable and what address to advise.
1769 The Default Policy is overriden by Policy Exception #1:
1771 If the request is for a fixed client map, we are prepared to
1772 grant it providing all areas inside the request are either
1773 free, reservations, or mappings belonging to the client. In
1774 other words we are prepared to let the client trash its own
1775 mappings if it wants to.
1777 The Default Policy is overriden by Policy Exception #2:
1779 If the request is for a hinted client map, we are prepared to
1780 grant it providing all areas inside the request are either
1781 free or reservations. In other words we are prepared to let
1782 the client have a hinted mapping anywhere it likes provided
1783 it does not trash either any of its own mappings or any of
1784 valgrind's mappings.
1787 Addr holeStart, holeEnd, holeLen;
1788 Bool fixed_not_required;
1790 Addr startPoint = forClient ? aspacem_cStart : aspacem_vStart;
1792 Addr reqStart = req->rkind==MAny ? 0 : req->start;
1793 Addr reqEnd = reqStart + req->len - 1;
1794 Addr reqLen = req->len;
1796 /* These hold indices for segments found during search, or -1 if not
1801 aspacem_assert(nsegments_used > 0);
1804 VG_(am_show_nsegments)(0,"getAdvisory");
1805 VG_(debugLog)(0,"aspacem", "getAdvisory 0x%llx %lld\n",
1806 (ULong)req->start, (ULong)req->len);
1809 /* Reject zero-length requests */
1810 if (req->len == 0) {
1815 /* Reject wraparounds */
1816 if ((req->rkind==MFixed || req->rkind==MHint)
1817 && req->start + req->len < req->start) {
1822 /* ------ Implement Policy Exception #1 ------ */
1824 if (forClient && req->rkind == MFixed) {
1825 Int iLo = find_nsegment_idx(reqStart);
1826 Int iHi = find_nsegment_idx(reqEnd);
1828 for (i = iLo; i <= iHi; i++) {
1829 if (nsegments[i].kind == SkFree
1830 || nsegments[i].kind == SkFileC
1831 || nsegments[i].kind == SkAnonC
1832 || nsegments[i].kind == SkShmC
1833 || nsegments[i].kind == SkResvn) {
1841 /* Acceptable. Granted. */
1845 /* Not acceptable. Fail. */
1850 /* ------ Implement Policy Exception #2 ------ */
1852 if (forClient && req->rkind == MHint) {
1853 Int iLo = find_nsegment_idx(reqStart);
1854 Int iHi = find_nsegment_idx(reqEnd);
1856 for (i = iLo; i <= iHi; i++) {
1857 if (nsegments[i].kind == SkFree
1858 || nsegments[i].kind == SkResvn) {
1866 /* Acceptable. Granted. */
1870 /* Not acceptable. Fall through to the default policy. */
1873 /* ------ Implement the Default Policy ------ */
1875 /* Don't waste time looking for a fixed match if not requested to. */
1876 fixed_not_required = req->rkind == MAny;
1878 i = find_nsegment_idx(startPoint);
1880 /* Examine holes from index i back round to i-1. Record the
1881 index first fixed hole and the first floating hole which would
1882 satisfy the request. */
1883 for (j = 0; j < nsegments_used; j++) {
1885 if (nsegments[i].kind != SkFree) {
1887 if (i >= nsegments_used) i = 0;
1891 holeStart = nsegments[i].start;
1892 holeEnd = nsegments[i].end;
1895 aspacem_assert(holeStart <= holeEnd);
1896 aspacem_assert(aspacem_minAddr <= holeStart);
1897 aspacem_assert(holeEnd <= aspacem_maxAddr);
1899 /* See if it's any use to us. */
1900 holeLen = holeEnd - holeStart + 1;
1902 if (fixedIdx == -1 && holeStart <= reqStart && reqEnd <= holeEnd)
1905 if (floatIdx == -1 && holeLen >= reqLen)
1908 /* Don't waste time searching once we've found what we wanted. */
1909 if ((fixed_not_required || fixedIdx >= 0) && floatIdx >= 0)
1913 if (i >= nsegments_used) i = 0;
1916 aspacem_assert(fixedIdx >= -1 && fixedIdx < nsegments_used);
1918 aspacem_assert(nsegments[fixedIdx].kind == SkFree);
1920 aspacem_assert(floatIdx >= -1 && floatIdx < nsegments_used);
1922 aspacem_assert(nsegments[floatIdx].kind == SkFree);
1926 /* Now see if we found anything which can satisfy the request. */
1927 switch (req->rkind) {
1929 if (fixedIdx >= 0) {
1938 if (fixedIdx >= 0) {
1942 if (floatIdx >= 0) {
1944 return nsegments[floatIdx].start;
1949 if (floatIdx >= 0) {
1951 return nsegments[floatIdx].start;
1960 ML_(am_barf)("getAdvisory: unknown request kind");
1965 /* Convenience wrapper for VG_(am_get_advisory) for client floating or
1966 fixed requests. If start is zero, a floating request is issued; if
1967 nonzero, a fixed request at that address is issued. Same comments
1968 about return values apply. */
1970 Addr VG_(am_get_advisory_client_simple) ( Addr start, SizeT len,
1974 mreq.rkind = start==0 ? MAny : MFixed;
1977 return VG_(am_get_advisory)( &mreq, True/*client*/, ok );
1981 /* Notifies aspacem that the client completed an mmap successfully.
1982 The segment array is updated accordingly. If the returned Bool is
1983 True, the caller should immediately discard translations from the
1984 specified address range. */
1987 VG_(am_notify_client_mmap)( Addr a, SizeT len, UInt prot, UInt flags,
1988 Int fd, Off64T offset )
1990 HChar buf[VKI_PATH_MAX];
1996 aspacem_assert(len > 0);
1997 aspacem_assert(VG_IS_PAGE_ALIGNED(a));
1998 aspacem_assert(VG_IS_PAGE_ALIGNED(len));
1999 aspacem_assert(VG_IS_PAGE_ALIGNED(offset));
2001 /* Discard is needed if any of the just-trashed range had T. */
2002 needDiscard = any_Ts_in_range( a, len );
2004 init_nsegment( &seg );
2005 seg.kind = (flags & VKI_MAP_ANONYMOUS) ? SkAnonC : SkFileC;
2007 seg.end = a + len - 1;
2008 seg.hasR = toBool(prot & VKI_PROT_READ);
2009 seg.hasW = toBool(prot & VKI_PROT_WRITE);
2010 seg.hasX = toBool(prot & VKI_PROT_EXEC);
2011 if (!(flags & VKI_MAP_ANONYMOUS)) {
2012 // Nb: We ignore offset requests in anonymous mmaps (see bug #126722)
2013 seg.offset = offset;
2014 if (ML_(am_get_fd_d_i_m)(fd, &dev, &ino, &mode)) {
2019 if (ML_(am_resolve_filename)(fd, buf, VKI_PATH_MAX)) {
2020 seg.fnIdx = allocate_segname( buf );
2023 add_segment( &seg );
2028 /* Notifies aspacem that the client completed a shmat successfully.
2029 The segment array is updated accordingly. If the returned Bool is
2030 True, the caller should immediately discard translations from the
2031 specified address range. */
2034 VG_(am_notify_client_shmat)( Addr a, SizeT len, UInt prot )
2039 aspacem_assert(len > 0);
2040 aspacem_assert(VG_IS_PAGE_ALIGNED(a));
2041 aspacem_assert(VG_IS_PAGE_ALIGNED(len));
2043 /* Discard is needed if any of the just-trashed range had T. */
2044 needDiscard = any_Ts_in_range( a, len );
2046 init_nsegment( &seg );
2049 seg.end = a + len - 1;
2051 seg.hasR = toBool(prot & VKI_PROT_READ);
2052 seg.hasW = toBool(prot & VKI_PROT_WRITE);
2053 seg.hasX = toBool(prot & VKI_PROT_EXEC);
2054 add_segment( &seg );
2059 /* Notifies aspacem that an mprotect was completed successfully. The
2060 segment array is updated accordingly. Note, as with
2061 VG_(am_notify_munmap), it is not the job of this function to reject
2062 stupid mprotects, for example the client doing mprotect of
2063 non-client areas. Such requests should be intercepted earlier, by
2064 the syscall wrapper for mprotect. This function merely records
2065 whatever it is told. If the returned Bool is True, the caller
2066 should immediately discard translations from the specified address
2069 Bool VG_(am_notify_mprotect)( Addr start, SizeT len, UInt prot )
2072 Bool newR, newW, newX, needDiscard;
2074 aspacem_assert(VG_IS_PAGE_ALIGNED(start));
2075 aspacem_assert(VG_IS_PAGE_ALIGNED(len));
2080 newR = toBool(prot & VKI_PROT_READ);
2081 newW = toBool(prot & VKI_PROT_WRITE);
2082 newX = toBool(prot & VKI_PROT_EXEC);
2084 /* Discard is needed if we're dumping X permission */
2085 needDiscard = any_Ts_in_range( start, len ) && !newX;
2087 split_nsegments_lo_and_hi( start, start+len-1, &iLo, &iHi );
2089 iLo = find_nsegment_idx(start);
2090 iHi = find_nsegment_idx(start + len - 1);
2092 for (i = iLo; i <= iHi; i++) {
2093 /* Apply the permissions to all relevant segments. */
2094 switch (nsegments[i].kind) {
2095 case SkAnonC: case SkAnonV: case SkFileC: case SkFileV: case SkShmC:
2096 nsegments[i].hasR = newR;
2097 nsegments[i].hasW = newW;
2098 nsegments[i].hasX = newX;
2099 aspacem_assert(sane_NSegment(&nsegments[i]));
2106 /* Changing permissions could have made previously un-mergable
2107 segments mergeable. Therefore have to re-preen them. */
2108 (void)preen_nsegments();
2114 /* Notifies aspacem that an munmap completed successfully. The
2115 segment array is updated accordingly. As with
2116 VG_(am_notify_munmap), we merely record the given info, and don't
2117 check it for sensibleness. If the returned Bool is True, the
2118 caller should immediately discard translations from the specified
2121 Bool VG_(am_notify_munmap)( Addr start, SizeT len )
2125 aspacem_assert(VG_IS_PAGE_ALIGNED(start));
2126 aspacem_assert(VG_IS_PAGE_ALIGNED(len));
2131 needDiscard = any_Ts_in_range( start, len );
2133 init_nsegment( &seg );
2135 seg.end = start + len - 1;
2137 /* The segment becomes unused (free). Segments from above
2138 aspacem_maxAddr were originally SkResvn and so we make them so
2139 again. Note, this isn't really right when the segment straddles
2140 the aspacem_maxAddr boundary - then really it should be split in
2141 two, the lower part marked as SkFree and the upper part as
2142 SkResvn. Ah well. */
2143 if (start > aspacem_maxAddr
2144 && /* check previous comparison is meaningful */
2145 aspacem_maxAddr < Addr_MAX)
2148 /* Ditto for segments from below aspacem_minAddr. */
2149 if (seg.end < aspacem_minAddr && aspacem_minAddr > 0)
2154 add_segment( &seg );
2156 /* Unmapping could create two adjacent free segments, so a preen is
2157 needed. add_segment() will do that, so no need to here. */
2163 /*-----------------------------------------------------------------*/
2165 /*--- Handling mappings which do not arise directly from the ---*/
2166 /*--- simulation of the client. ---*/
2168 /*-----------------------------------------------------------------*/
2170 /* --- --- --- map, unmap, protect --- --- --- */
2172 /* Map a file at a fixed address for the client, and update the
2173 segment array accordingly. */
2175 SysRes VG_(am_mmap_file_fixed_client)
2176 ( Addr start, SizeT length, UInt prot, Int fd, Off64T offset )
2178 return VG_(am_mmap_named_file_fixed_client)(start, length, prot, fd, offset, NULL);
2181 SysRes VG_(am_mmap_named_file_fixed_client)
2182 ( Addr start, SizeT length, UInt prot, Int fd, Off64T offset, const HChar *name )
2191 HChar buf[VKI_PATH_MAX];
2193 /* Not allowable. */
2195 || !VG_IS_PAGE_ALIGNED(start)
2196 || !VG_IS_PAGE_ALIGNED(offset))
2197 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2199 /* Ask for an advisory. If it's negative, fail immediately. */
2203 advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
2204 if (!ok || advised != start)
2205 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2207 /* We have been advised that the mapping is allowable at the
2208 specified address. So hand it off to the kernel, and propagate
2209 any resulting failure immediately. */
2210 // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
2211 sres = VG_(am_do_mmap_NO_NOTIFY)(
2212 start, length, prot,
2213 VKI_MAP_FIXED|VKI_MAP_PRIVATE,
2216 if (sr_isError(sres))
2219 if (sr_Res(sres) != start) {
2220 /* I don't think this can happen. It means the kernel made a
2221 fixed map succeed but not at the requested location. Try to
2222 repair the damage, then return saying the mapping failed. */
2223 (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
2224 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2227 /* Ok, the mapping succeeded. Now notify the interval map. */
2228 init_nsegment( &seg );
2231 seg.end = seg.start + VG_PGROUNDUP(length) - 1;
2232 seg.offset = offset;
2233 seg.hasR = toBool(prot & VKI_PROT_READ);
2234 seg.hasW = toBool(prot & VKI_PROT_WRITE);
2235 seg.hasX = toBool(prot & VKI_PROT_EXEC);
2236 if (ML_(am_get_fd_d_i_m)(fd, &dev, &ino, &mode)) {
2242 seg.fnIdx = allocate_segname( name );
2243 } else if (ML_(am_resolve_filename)(fd, buf, VKI_PATH_MAX)) {
2244 seg.fnIdx = allocate_segname( buf );
2246 add_segment( &seg );
2253 /* Map anonymously at a fixed address for the client, and update
2254 the segment array accordingly. */
2256 SysRes VG_(am_mmap_anon_fixed_client) ( Addr start, SizeT length, UInt prot )
2264 /* Not allowable. */
2265 if (length == 0 || !VG_IS_PAGE_ALIGNED(start))
2266 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2268 /* Ask for an advisory. If it's negative, fail immediately. */
2272 advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
2273 if (!ok || advised != start)
2274 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2276 /* We have been advised that the mapping is allowable at the
2277 specified address. So hand it off to the kernel, and propagate
2278 any resulting failure immediately. */
2279 // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
2280 sres = VG_(am_do_mmap_NO_NOTIFY)(
2281 start, length, prot,
2282 VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2285 if (sr_isError(sres))
2288 if (sr_Res(sres) != start) {
2289 /* I don't think this can happen. It means the kernel made a
2290 fixed map succeed but not at the requested location. Try to
2291 repair the damage, then return saying the mapping failed. */
2292 (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
2293 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2296 /* Ok, the mapping succeeded. Now notify the interval map. */
2297 init_nsegment( &seg );
2300 seg.end = seg.start + VG_PGROUNDUP(length) - 1;
2301 seg.hasR = toBool(prot & VKI_PROT_READ);
2302 seg.hasW = toBool(prot & VKI_PROT_WRITE);
2303 seg.hasX = toBool(prot & VKI_PROT_EXEC);
2304 add_segment( &seg );
2311 /* Map anonymously at an unconstrained address for the client, and
2312 update the segment array accordingly. */
2314 SysRes VG_(am_mmap_anon_float_client) ( SizeT length, Int prot )
2322 /* Not allowable. */
2324 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2326 /* Ask for an advisory. If it's negative, fail immediately. */
2330 advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
2332 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2334 /* We have been advised that the mapping is allowable at the
2335 advised address. So hand it off to the kernel, and propagate
2336 any resulting failure immediately. */
2337 // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
2338 sres = VG_(am_do_mmap_NO_NOTIFY)(
2339 advised, length, prot,
2340 VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2343 if (sr_isError(sres))
2346 if (sr_Res(sres) != advised) {
2347 /* I don't think this can happen. It means the kernel made a
2348 fixed map succeed but not at the requested location. Try to
2349 repair the damage, then return saying the mapping failed. */
2350 (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
2351 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2354 /* Ok, the mapping succeeded. Now notify the interval map. */
2355 init_nsegment( &seg );
2357 seg.start = advised;
2358 seg.end = seg.start + VG_PGROUNDUP(length) - 1;
2359 seg.hasR = toBool(prot & VKI_PROT_READ);
2360 seg.hasW = toBool(prot & VKI_PROT_WRITE);
2361 seg.hasX = toBool(prot & VKI_PROT_EXEC);
2362 add_segment( &seg );
2369 /* Similarly, acquire new address space for the client but with
2370 considerable restrictions on what can be done with it: (1) the
2371 actual protections may exceed those stated in 'prot', (2) the
2372 area's protections cannot be later changed using any form of
2373 mprotect, and (3) the area cannot be freed using any form of
2374 munmap. On Linux this behaves the same as
2375 VG_(am_mmap_anon_float_client). On AIX5 this *may* allocate memory
2376 by using sbrk, so as to make use of large pages on AIX. */
2378 SysRes VG_(am_sbrk_anon_float_client) ( SizeT length, Int prot )
2380 return VG_(am_mmap_anon_float_client) ( length, prot );
2384 /* Map anonymously at an unconstrained address for V, and update the
2385 segment array accordingly. This is fundamentally how V allocates
2386 itself more address space when needed. */
2388 SysRes VG_(am_mmap_anon_float_valgrind)( SizeT length )
2396 /* Not allowable. */
2398 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2400 /* Ask for an advisory. If it's negative, fail immediately. */
2404 advised = VG_(am_get_advisory)( &req, False/*valgrind*/, &ok );
2406 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2408 // On Darwin, for anonymous maps you can pass in a tag which is used by
2409 // programs like vmmap for statistical purposes.
2410 #ifndef VM_TAG_VALGRIND
2411 # define VM_TAG_VALGRIND 0
2414 /* We have been advised that the mapping is allowable at the
2415 specified address. So hand it off to the kernel, and propagate
2416 any resulting failure immediately. */
2417 /* GrP fixme darwin: use advisory as a hint only, otherwise syscall in
2418 another thread can pre-empt our spot. [At one point on the DARWIN
2419 branch the VKI_MAP_FIXED was commented out; unclear if this is
2420 necessary or not given the second Darwin-only call that immediately
2421 follows if this one fails. --njn] */
2422 sres = VG_(am_do_mmap_NO_NOTIFY)(
2424 VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC,
2425 VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2428 #if defined(VGO_darwin)
2429 if (sr_isError(sres)) {
2430 /* try again, ignoring the advisory */
2431 sres = VG_(am_do_mmap_NO_NOTIFY)(
2433 VKI_PROT_READ|VKI_PROT_WRITE|VKI_PROT_EXEC,
2434 /*VKI_MAP_FIXED|*/VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2439 if (sr_isError(sres))
2442 #if defined(VGO_linux)
2443 if (sr_Res(sres) != advised) {
2444 /* I don't think this can happen. It means the kernel made a
2445 fixed map succeed but not at the requested location. Try to
2446 repair the damage, then return saying the mapping failed. */
2447 (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
2448 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2452 /* Ok, the mapping succeeded. Now notify the interval map. */
2453 init_nsegment( &seg );
2455 seg.start = sr_Res(sres);
2456 seg.end = seg.start + VG_PGROUNDUP(length) - 1;
2460 add_segment( &seg );
2466 /* Really just a wrapper around VG_(am_mmap_anon_float_valgrind). */
2468 void* VG_(am_shadow_alloc)(SizeT size)
2470 SysRes sres = VG_(am_mmap_anon_float_valgrind)( size );
2471 return sr_isError(sres) ? NULL : (void*)sr_Res(sres);
2474 /* Same comments apply as per VG_(am_sbrk_anon_float_client). On
2475 Linux this behaves the same as VG_(am_mmap_anon_float_valgrind). */
2477 SysRes VG_(am_sbrk_anon_float_valgrind)( SizeT cszB )
2479 return VG_(am_mmap_anon_float_valgrind)( cszB );
2483 /* Map a file at an unconstrained address for V, and update the
2484 segment array accordingly. This is used by V for transiently
2485 mapping in object files to read their debug info. */
2487 SysRes VG_(am_mmap_file_float_valgrind) ( SizeT length, UInt prot,
2488 Int fd, Off64T offset )
2497 HChar buf[VKI_PATH_MAX];
2499 /* Not allowable. */
2500 if (length == 0 || !VG_IS_PAGE_ALIGNED(offset))
2501 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2503 /* Ask for an advisory. If it's negative, fail immediately. */
2507 advised = VG_(am_get_advisory)( &req, True/*client*/, &ok );
2509 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2511 /* We have been advised that the mapping is allowable at the
2512 specified address. So hand it off to the kernel, and propagate
2513 any resulting failure immediately. */
2514 sres = VG_(am_do_mmap_NO_NOTIFY)(
2515 advised, length, prot,
2516 VKI_MAP_FIXED|VKI_MAP_PRIVATE,
2519 if (sr_isError(sres))
2522 if (sr_Res(sres) != advised) {
2523 /* I don't think this can happen. It means the kernel made a
2524 fixed map succeed but not at the requested location. Try to
2525 repair the damage, then return saying the mapping failed. */
2526 (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), length );
2527 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2530 /* Ok, the mapping succeeded. Now notify the interval map. */
2531 init_nsegment( &seg );
2533 seg.start = sr_Res(sres);
2534 seg.end = seg.start + VG_PGROUNDUP(length) - 1;
2535 seg.offset = offset;
2536 seg.hasR = toBool(prot & VKI_PROT_READ);
2537 seg.hasW = toBool(prot & VKI_PROT_WRITE);
2538 seg.hasX = toBool(prot & VKI_PROT_EXEC);
2539 if (ML_(am_get_fd_d_i_m)(fd, &dev, &ino, &mode)) {
2544 if (ML_(am_resolve_filename)(fd, buf, VKI_PATH_MAX)) {
2545 seg.fnIdx = allocate_segname( buf );
2547 add_segment( &seg );
2554 /* --- --- munmap helper --- --- */
2557 SysRes am_munmap_both_wrk ( /*OUT*/Bool* need_discard,
2558 Addr start, SizeT len, Bool forClient )
2563 if (!VG_IS_PAGE_ALIGNED(start))
2567 *need_discard = False;
2568 return VG_(mk_SysRes_Success)( 0 );
2571 if (start + len < len)
2574 len = VG_PGROUNDUP(len);
2575 aspacem_assert(VG_IS_PAGE_ALIGNED(start));
2576 aspacem_assert(VG_IS_PAGE_ALIGNED(len));
2579 if (!VG_(am_is_valid_for_client_or_free_or_resvn)
2580 ( start, len, VKI_PROT_NONE ))
2583 if (!is_valid_for_valgrind( start, len ))
2587 d = any_Ts_in_range( start, len );
2589 sres = ML_(am_do_munmap_NO_NOTIFY)( start, len );
2590 if (sr_isError(sres))
2593 VG_(am_notify_munmap)( start, len );
2599 return VG_(mk_SysRes_Error)( VKI_EINVAL );
2602 /* Unmap the given address range and update the segment array
2603 accordingly. This fails if the range isn't valid for the client.
2604 If *need_discard is True after a successful return, the caller
2605 should immediately discard translations from the specified address
2608 SysRes VG_(am_munmap_client)( /*OUT*/Bool* need_discard,
2609 Addr start, SizeT len )
2611 return am_munmap_both_wrk( need_discard, start, len, True/*client*/ );
2614 /* Unmap the given address range and update the segment array
2615 accordingly. This fails if the range isn't valid for valgrind. */
2617 SysRes VG_(am_munmap_valgrind)( Addr start, SizeT len )
2620 SysRes r = am_munmap_both_wrk( &need_discard,
2621 start, len, False/*valgrind*/ );
2622 /* If this assertion fails, it means we allowed translations to be
2623 made from a V-owned section. Which shouldn't happen. */
2625 aspacem_assert(!need_discard);
2629 /* Let (start,len) denote an area within a single Valgrind-owned
2630 segment (anon or file). Change the ownership of [start, start+len)
2631 to the client instead. Fails if (start,len) does not denote a
2632 suitable segment. */
2634 Bool VG_(am_change_ownership_v_to_c)( Addr start, SizeT len )
2640 if (start + len < start)
2642 if (!VG_IS_PAGE_ALIGNED(start) || !VG_IS_PAGE_ALIGNED(len))
2645 i = find_nsegment_idx(start);
2646 if (nsegments[i].kind != SkFileV && nsegments[i].kind != SkAnonV)
2648 if (start+len-1 > nsegments[i].end)
2651 aspacem_assert(start >= nsegments[i].start);
2652 aspacem_assert(start+len-1 <= nsegments[i].end);
2654 /* This scheme is like how mprotect works: split the to-be-changed
2655 range into its own segment(s), then mess with them (it). There
2656 should be only one. */
2657 split_nsegments_lo_and_hi( start, start+len-1, &iLo, &iHi );
2658 aspacem_assert(iLo == iHi);
2659 switch (nsegments[iLo].kind) {
2660 case SkFileV: nsegments[iLo].kind = SkFileC; break;
2661 case SkAnonV: nsegments[iLo].kind = SkAnonC; break;
2662 default: aspacem_assert(0); /* can't happen - guarded above */
2669 /* 'seg' must be NULL or have been obtained from
2670 VG_(am_find_nsegment), and still valid. If non-NULL, and if it
2671 denotes a SkAnonC (anonymous client mapping) area, set the .isCH
2672 (is-client-heap) flag for that area. Otherwise do nothing.
2673 (Bizarre interface so that the same code works for both Linux and
2674 AIX and does not impose inefficiencies on the Linux version.) */
2675 void VG_(am_set_segment_isCH_if_SkAnonC)( NSegment* seg )
2677 Int i = segAddr_to_index( seg );
2678 aspacem_assert(i >= 0 && i < nsegments_used);
2679 if (nsegments[i].kind == SkAnonC) {
2680 nsegments[i].isCH = True;
2682 aspacem_assert(nsegments[i].isCH == False);
2686 /* Same idea as VG_(am_set_segment_isCH_if_SkAnonC), except set the
2687 segment's hasT bit (has-cached-code) if this is SkFileC or SkAnonC
2689 void VG_(am_set_segment_hasT_if_SkFileC_or_SkAnonC)( NSegment* seg )
2691 Int i = segAddr_to_index( seg );
2692 aspacem_assert(i >= 0 && i < nsegments_used);
2693 if (nsegments[i].kind == SkAnonC || nsegments[i].kind == SkFileC) {
2694 nsegments[i].hasT = True;
2699 /* --- --- --- reservations --- --- --- */
2701 /* Create a reservation from START .. START+LENGTH-1, with the given
2702 ShrinkMode. When checking whether the reservation can be created,
2703 also ensure that at least abs(EXTRA) extra free bytes will remain
2704 above (> 0) or below (< 0) the reservation.
2706 The reservation will only be created if it, plus the extra-zone,
2707 falls entirely within a single free segment. The returned Bool
2708 indicates whether the creation succeeded. */
2710 Bool VG_(am_create_reservation) ( Addr start, SizeT length,
2711 ShrinkMode smode, SSizeT extra )
2716 /* start and end, not taking into account the extra space. */
2717 Addr start1 = start;
2718 Addr end1 = start + length - 1;
2720 /* start and end, taking into account the extra space. */
2721 Addr start2 = start1;
2724 if (extra < 0) start2 += extra; // this moves it down :-)
2725 if (extra > 0) end2 += extra;
2727 aspacem_assert(VG_IS_PAGE_ALIGNED(start));
2728 aspacem_assert(VG_IS_PAGE_ALIGNED(start+length));
2729 aspacem_assert(VG_IS_PAGE_ALIGNED(start2));
2730 aspacem_assert(VG_IS_PAGE_ALIGNED(end2+1));
2732 startI = find_nsegment_idx( start2 );
2733 endI = find_nsegment_idx( end2 );
2735 /* If the start and end points don't fall within the same (free)
2736 segment, we're hosed. This does rely on the assumption that all
2737 mergeable adjacent segments can be merged, but add_segment()
2738 should ensure that. */
2742 if (nsegments[startI].kind != SkFree)
2745 /* Looks good - make the reservation. */
2746 aspacem_assert(nsegments[startI].start <= start2);
2747 aspacem_assert(end2 <= nsegments[startI].end);
2749 init_nsegment( &seg );
2751 seg.start = start1; /* NB: extra space is not included in the
2755 add_segment( &seg );
2762 /* Let SEG be an anonymous client mapping. This fn extends the
2763 mapping by DELTA bytes, taking the space from a reservation section
2764 which must be adjacent. If DELTA is positive, the segment is
2765 extended forwards in the address space, and the reservation must be
2766 the next one along. If DELTA is negative, the segment is extended
2767 backwards in the address space and the reservation must be the
2768 previous one. DELTA must be page aligned. abs(DELTA) must not
2769 exceed the size of the reservation segment minus one page, that is,
2770 the reservation segment after the operation must be at least one
2773 Bool VG_(am_extend_into_adjacent_reservation_client) ( NSegment* seg,
2780 /* Find the segment array index for SEG. If the assertion fails it
2781 probably means you passed in a bogus SEG. */
2782 segA = segAddr_to_index( seg );
2783 aspacem_assert(segA >= 0 && segA < nsegments_used);
2785 if (nsegments[segA].kind != SkAnonC)
2791 prot = (nsegments[segA].hasR ? VKI_PROT_READ : 0)
2792 | (nsegments[segA].hasW ? VKI_PROT_WRITE : 0)
2793 | (nsegments[segA].hasX ? VKI_PROT_EXEC : 0);
2795 aspacem_assert(VG_IS_PAGE_ALIGNED(delta<0 ? -delta : delta));
2799 /* Extending the segment forwards. */
2801 if (segR >= nsegments_used
2802 || nsegments[segR].kind != SkResvn
2803 || nsegments[segR].smode != SmLower
2804 || nsegments[segR].start != nsegments[segA].end + 1
2805 || delta + VKI_PAGE_SIZE
2806 > (nsegments[segR].end - nsegments[segR].start + 1))
2809 /* Extend the kernel's mapping. */
2810 // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
2811 sres = VG_(am_do_mmap_NO_NOTIFY)(
2812 nsegments[segR].start, delta,
2814 VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2817 if (sr_isError(sres))
2818 return False; /* kernel bug if this happens? */
2819 if (sr_Res(sres) != nsegments[segR].start) {
2820 /* kernel bug if this happens? */
2821 (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), delta );
2825 /* Ok, success with the kernel. Update our structures. */
2826 nsegments[segR].start += delta;
2827 nsegments[segA].end += delta;
2828 aspacem_assert(nsegments[segR].start <= nsegments[segR].end);
2832 /* Extending the segment backwards. */
2834 aspacem_assert(delta > 0);
2838 || nsegments[segR].kind != SkResvn
2839 || nsegments[segR].smode != SmUpper
2840 || nsegments[segR].end + 1 != nsegments[segA].start
2841 || delta + VKI_PAGE_SIZE
2842 > (nsegments[segR].end - nsegments[segR].start + 1))
2845 /* Extend the kernel's mapping. */
2846 // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
2847 sres = VG_(am_do_mmap_NO_NOTIFY)(
2848 nsegments[segA].start-delta, delta,
2850 VKI_MAP_FIXED|VKI_MAP_PRIVATE|VKI_MAP_ANONYMOUS,
2853 if (sr_isError(sres))
2854 return False; /* kernel bug if this happens? */
2855 if (sr_Res(sres) != nsegments[segA].start-delta) {
2856 /* kernel bug if this happens? */
2857 (void)ML_(am_do_munmap_NO_NOTIFY)( sr_Res(sres), delta );
2861 /* Ok, success with the kernel. Update our structures. */
2862 nsegments[segR].end -= delta;
2863 nsegments[segA].start -= delta;
2864 aspacem_assert(nsegments[segR].start <= nsegments[segR].end);
2873 /* --- --- --- resizing/move a mapping --- --- --- */
2877 /* Let SEG be a client mapping (anonymous or file). This fn extends
2878 the mapping forwards only by DELTA bytes, and trashes whatever was
2879 in the new area. Fails if SEG is not a single client mapping or if
2880 the new area is not accessible to the client. Fails if DELTA is
2881 not page aligned. *seg is invalid after a successful return. If
2882 *need_discard is True after a successful return, the caller should
2883 immediately discard translations from the new area. */
2885 Bool VG_(am_extend_map_client)( /*OUT*/Bool* need_discard,
2886 NSegment* seg, SizeT delta )
2890 NSegment seg_copy = *seg;
2891 SizeT seg_old_len = seg->end + 1 - seg->start;
2894 VG_(am_show_nsegments)(0, "VG_(am_extend_map_client) BEFORE");
2896 if (seg->kind != SkFileC && seg->kind != SkAnonC)
2899 if (delta == 0 || !VG_IS_PAGE_ALIGNED(delta))
2902 xStart = seg->end+1;
2903 if (xStart + delta < delta)
2906 if (!VG_(am_is_valid_for_client_or_free_or_resvn)( xStart, delta,
2911 sres = ML_(am_do_extend_mapping_NO_NOTIFY)( seg->start,
2913 seg_old_len + delta );
2914 if (sr_isError(sres)) {
2918 /* the area must not have moved */
2919 aspacem_assert(sr_Res(sres) == seg->start);
2922 *need_discard = any_Ts_in_range( seg_copy.end+1, delta );
2924 seg_copy.end += delta;
2925 add_segment( &seg_copy );
2928 VG_(am_show_nsegments)(0, "VG_(am_extend_map_client) AFTER");
2935 /* Remap the old address range to the new address range. Fails if any
2936 parameter is not page aligned, if the either size is zero, if any
2937 wraparound is implied, if the old address range does not fall
2938 entirely within a single segment, if the new address range overlaps
2939 with the old one, or if the old address range is not a valid client
2940 mapping. If *need_discard is True after a successful return, the
2941 caller should immediately discard translations from both specified
2944 Bool VG_(am_relocate_nooverlap_client)( /*OUT*/Bool* need_discard,
2945 Addr old_addr, SizeT old_len,
2946 Addr new_addr, SizeT new_len )
2952 if (old_len == 0 || new_len == 0)
2955 if (!VG_IS_PAGE_ALIGNED(old_addr) || !VG_IS_PAGE_ALIGNED(old_len)
2956 || !VG_IS_PAGE_ALIGNED(new_addr) || !VG_IS_PAGE_ALIGNED(new_len))
2959 if (old_addr + old_len < old_addr
2960 || new_addr + new_len < new_addr)
2963 if (old_addr + old_len - 1 < new_addr
2964 || new_addr + new_len - 1 < old_addr) {
2969 iLo = find_nsegment_idx( old_addr );
2970 iHi = find_nsegment_idx( old_addr + old_len - 1 );
2974 if (nsegments[iLo].kind != SkFileC && nsegments[iLo].kind != SkAnonC)
2977 sres = ML_(am_do_relocate_nooverlap_mapping_NO_NOTIFY)
2978 ( old_addr, old_len, new_addr, new_len );
2979 if (sr_isError(sres)) {
2983 aspacem_assert(sr_Res(sres) == new_addr);
2986 *need_discard = any_Ts_in_range( old_addr, old_len )
2987 || any_Ts_in_range( new_addr, new_len );
2989 seg = nsegments[iLo];
2991 /* Mark the new area based on the old seg. */
2992 if (seg.kind == SkFileC) {
2993 seg.offset += ((ULong)old_addr) - ((ULong)seg.start);
2995 aspacem_assert(seg.kind == SkAnonC);
2996 aspacem_assert(seg.offset == 0);
2998 seg.start = new_addr;
2999 seg.end = new_addr + new_len - 1;
3000 add_segment( &seg );
3002 /* Create a free hole in the old location. */
3003 init_nsegment( &seg );
3004 seg.start = old_addr;
3005 seg.end = old_addr + old_len - 1;
3006 /* See comments in VG_(am_notify_munmap) about this SkResvn vs
3008 if (old_addr > aspacem_maxAddr
3009 && /* check previous comparison is meaningful */
3010 aspacem_maxAddr < Addr_MAX)
3015 add_segment( &seg );
3021 #endif // HAVE_MREMAP
3024 #if defined(VGO_linux)
3026 /*-----------------------------------------------------------------*/
3028 /*--- A simple parser for /proc/self/maps on Linux 2.4.X/2.6.X. ---*/
3029 /*--- Almost completely independent of the stuff above. The ---*/
3030 /*--- only function it 'exports' to the code above this comment ---*/
3031 /*--- is parse_procselfmaps. ---*/
3033 /*-----------------------------------------------------------------*/
3035 /*------BEGIN-procmaps-parser-for-Linux--------------------------*/
3037 /* Size of a smallish table used to read /proc/self/map entries. */
3038 #define M_PROCMAP_BUF 100000
3040 /* static ... to keep it out of the stack frame. */
3041 static Char procmap_buf[M_PROCMAP_BUF];
3043 /* Records length of /proc/self/maps read into procmap_buf. */
3044 static Int buf_n_tot;
3048 static Int hexdigit ( Char c )
3050 if (c >= '0' && c <= '9') return (Int)(c - '0');
3051 if (c >= 'a' && c <= 'f') return 10 + (Int)(c - 'a');
3052 if (c >= 'A' && c <= 'F') return 10 + (Int)(c - 'A');
3056 static Int decdigit ( Char c )
3058 if (c >= '0' && c <= '9') return (Int)(c - '0');
3062 static Int readchar ( const Char* buf, Char* ch )
3064 if (*buf == 0) return 0;
3069 static Int readhex ( const Char* buf, UWord* val )
3071 /* Read a word-sized hex number. */
3074 while (hexdigit(*buf) >= 0) {
3075 *val = (*val << 4) + hexdigit(*buf);
3081 static Int readhex64 ( const Char* buf, ULong* val )
3083 /* Read a potentially 64-bit hex number. */
3086 while (hexdigit(*buf) >= 0) {
3087 *val = (*val << 4) + hexdigit(*buf);
3093 static Int readdec64 ( const Char* buf, ULong* val )
3097 while (hexdigit(*buf) >= 0) {
3098 *val = (*val * 10) + decdigit(*buf);
3105 /* Get the contents of /proc/self/maps into a static buffer. If
3106 there's a syntax error, it won't fit, or other failure, just
3109 static void read_procselfmaps_into_buf ( void )
3114 /* Read the initial memory mapping from the /proc filesystem. */
3115 fd = ML_(am_open)( "/proc/self/maps", VKI_O_RDONLY, 0 );
3117 ML_(am_barf)("can't open /proc/self/maps");
3121 n_chunk = ML_(am_read)( sr_Res(fd), &procmap_buf[buf_n_tot],
3122 M_PROCMAP_BUF - buf_n_tot );
3124 buf_n_tot += n_chunk;
3125 } while ( n_chunk > 0 && buf_n_tot < M_PROCMAP_BUF );
3127 ML_(am_close)(sr_Res(fd));
3129 if (buf_n_tot >= M_PROCMAP_BUF-5)
3130 ML_(am_barf_toolow)("M_PROCMAP_BUF");
3132 ML_(am_barf)("I/O error on /proc/self/maps");
3134 procmap_buf[buf_n_tot] = 0;
3137 /* Parse /proc/self/maps. For each map entry, call
3138 record_mapping, passing it, in this order:
3140 start address in memory
3142 page protections (using the VKI_PROT_* flags)
3143 mapped file device and inode
3144 offset in file, or zero if no file
3145 filename, zero terminated, or NULL if no file
3147 So the sig of the called fn might be
3149 void (*record_mapping)( Addr start, SizeT size, UInt prot,
3150 UInt dev, UInt info,
3151 ULong foffset, UChar* filename )
3153 Note that the supplied filename is transiently stored; record_mapping
3154 should make a copy if it wants to keep it.
3156 Nb: it is important that this function does not alter the contents of
3159 static void parse_procselfmaps (
3160 void (*record_mapping)( Addr addr, SizeT len, UInt prot,
3161 ULong dev, ULong ino, Off64T offset,
3162 const UChar* filename ),
3163 void (*record_gap)( Addr addr, SizeT len )
3167 Addr start, endPlusOne, gapStart;
3169 UChar rr, ww, xx, pp, ch, tmp;
3172 ULong foffset, dev, ino;
3174 foffset = ino = 0; /* keep gcc-4.1.0 happy */
3176 read_procselfmaps_into_buf();
3178 aspacem_assert('\0' != procmap_buf[0] && 0 != buf_n_tot);
3181 VG_(debugLog)(0, "procselfmaps", "raw:\n%s\n", procmap_buf);
3183 /* Ok, it's safely aboard. Parse the entries. */
3185 gapStart = Addr_MIN;
3187 if (i >= buf_n_tot) break;
3189 /* Read (without fscanf :) the pattern %16x-%16x %c%c%c%c %16x %2x:%2x %d */
3190 j = readhex(&procmap_buf[i], &start);
3191 if (j > 0) i += j; else goto syntaxerror;
3192 j = readchar(&procmap_buf[i], &ch);
3193 if (j == 1 && ch == '-') i += j; else goto syntaxerror;
3194 j = readhex(&procmap_buf[i], &endPlusOne);
3195 if (j > 0) i += j; else goto syntaxerror;
3197 j = readchar(&procmap_buf[i], &ch);
3198 if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
3200 j = readchar(&procmap_buf[i], &rr);
3201 if (j == 1 && (rr == 'r' || rr == '-')) i += j; else goto syntaxerror;
3202 j = readchar(&procmap_buf[i], &ww);
3203 if (j == 1 && (ww == 'w' || ww == '-')) i += j; else goto syntaxerror;
3204 j = readchar(&procmap_buf[i], &xx);
3205 if (j == 1 && (xx == 'x' || xx == '-')) i += j; else goto syntaxerror;
3206 /* This field is the shared/private flag */
3207 j = readchar(&procmap_buf[i], &pp);
3208 if (j == 1 && (pp == 'p' || pp == '-' || pp == 's'))
3209 i += j; else goto syntaxerror;
3211 j = readchar(&procmap_buf[i], &ch);
3212 if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
3214 j = readhex64(&procmap_buf[i], &foffset);
3215 if (j > 0) i += j; else goto syntaxerror;
3217 j = readchar(&procmap_buf[i], &ch);
3218 if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
3220 j = readhex(&procmap_buf[i], &maj);
3221 if (j > 0) i += j; else goto syntaxerror;
3222 j = readchar(&procmap_buf[i], &ch);
3223 if (j == 1 && ch == ':') i += j; else goto syntaxerror;
3224 j = readhex(&procmap_buf[i], &min);
3225 if (j > 0) i += j; else goto syntaxerror;
3227 j = readchar(&procmap_buf[i], &ch);
3228 if (j == 1 && ch == ' ') i += j; else goto syntaxerror;
3230 j = readdec64(&procmap_buf[i], &ino);
3231 if (j > 0) i += j; else goto syntaxerror;
3236 VG_(debugLog)(0, "Valgrind:",
3237 "FATAL: syntax error reading /proc/self/maps\n");
3244 for (; k <= i; k++) {
3245 buf50[m] = procmap_buf[k];
3249 VG_(debugLog)(0, "procselfmaps", "Last 50 chars: '%s'\n", buf50);
3255 /* Try and find the name of the file mapped to this segment, if
3256 it exists. Note that files can contains spaces. */
3258 // Move i to the next non-space char, which should be either a '/' or
3260 while (procmap_buf[i] == ' ' && i < buf_n_tot-1) i++;
3262 // Move i_eol to the end of the line.
3264 while (procmap_buf[i_eol] != '\n' && i_eol < buf_n_tot-1) i_eol++;
3266 // If there's a filename...
3267 if (i < i_eol-1 && procmap_buf[i] == '/') {
3268 /* Minor hack: put a '\0' at the filename end for the call to
3269 'record_mapping', then restore the old char with 'tmp'. */
3270 filename = &procmap_buf[i];
3271 tmp = filename[i_eol - i];
3272 filename[i_eol - i] = '\0';
3280 if (rr == 'r') prot |= VKI_PROT_READ;
3281 if (ww == 'w') prot |= VKI_PROT_WRITE;
3282 if (xx == 'x') prot |= VKI_PROT_EXEC;
3284 /* Linux has two ways to encode a device number when it
3285 is exposed to user space (via fstat etc). The old way
3286 is the traditional unix scheme that produces a 16 bit
3287 device number with the top 8 being the major number and
3288 the bottom 8 the minor number.
3290 The new scheme allows for a 12 bit major number and
3291 a 20 bit minor number by using a 32 bit device number
3292 and putting the top 12 bits of the minor number into
3293 the top 12 bits of the device number thus leaving an
3294 extra 4 bits for the major number.
3296 If the minor and major number are both single byte
3297 values then both schemes give the same result so we
3298 use the new scheme here in case either number is
3299 outside the 0-255 range and then use fstat64 when
3300 available (or fstat on 64 bit systems) so that we
3301 should always have a new style device number and
3302 everything should match. */
3303 dev = (min & 0xff) | (maj << 8) | ((min & ~0xff) << 12);
3305 if (record_gap && gapStart < start)
3306 (*record_gap) ( gapStart, start-gapStart );
3308 if (record_mapping && start < endPlusOne)
3309 (*record_mapping) ( start, endPlusOne-start,
3311 foffset, filename );
3314 filename[i_eol - i] = tmp;
3318 gapStart = endPlusOne;
3321 # if defined(VGP_arm_linux)
3322 /* ARM puts code at the end of memory that contains processor
3323 specific stuff (cmpxchg, getting the thread local storage, etc.)
3324 This isn't specified in /proc/self/maps, so do it here. This
3325 kludgery causes the view of memory, as presented to
3326 record_gap/record_mapping, to actually reflect reality. IMO
3327 (JRS, 2010-Jan-03) the fact that /proc/.../maps does not list
3328 the commpage should be regarded as a bug in the kernel. */
3329 { const Addr commpage_start = ARM_LINUX_FAKE_COMMPAGE_START;
3330 const Addr commpage_end1 = ARM_LINUX_FAKE_COMMPAGE_END1;
3331 if (gapStart < commpage_start) {
3333 (*record_gap)( gapStart, commpage_start - gapStart );
3335 (*record_mapping)( commpage_start, commpage_end1 - commpage_start,
3336 VKI_PROT_READ|VKI_PROT_EXEC,
3337 0/*dev*/, 0/*ino*/, 0/*foffset*/,
3339 gapStart = commpage_end1;
3344 if (record_gap && gapStart < Addr_MAX)
3345 (*record_gap) ( gapStart, Addr_MAX - gapStart + 1 );
3348 /*------END-procmaps-parser-for-Linux----------------------------*/
3350 /*------BEGIN-procmaps-parser-for-Darwin-------------------------*/
3352 #elif defined(VGO_darwin)
3353 #include <mach/mach.h>
3354 #include <mach/mach_vm.h>
3356 static unsigned int mach2vki(unsigned int vm_prot)
3359 ((vm_prot & VM_PROT_READ) ? VKI_PROT_READ : 0) |
3360 ((vm_prot & VM_PROT_WRITE) ? VKI_PROT_WRITE : 0) |
3361 ((vm_prot & VM_PROT_EXECUTE) ? VKI_PROT_EXEC : 0) ;
3364 static UInt stats_machcalls = 0;
3366 static void parse_procselfmaps (
3367 void (*record_mapping)( Addr addr, SizeT len, UInt prot,
3368 ULong dev, ULong ino, Off64T offset,
3369 const UChar* filename ),
3370 void (*record_gap)( Addr addr, SizeT len )
3381 mach_vm_address_t addr = iter;
3382 mach_vm_size_t size;
3383 vm_region_submap_short_info_data_64_t info;
3387 mach_msg_type_number_t info_count
3388 = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
3390 kr = mach_vm_region_recurse(mach_task_self(), &addr, &size, &depth,
3391 (vm_region_info_t)&info, &info_count);
3394 if (info.is_submap) {
3402 if (addr > last && record_gap) {
3403 (*record_gap)(last, addr - last);
3405 if (record_mapping) {
3406 (*record_mapping)(addr, size, mach2vki(info.protection),
3407 0, 0, info.offset, NULL);
3412 if ((Addr)-1 > last && record_gap)
3413 (*record_gap)(last, (Addr)-1 - last);
3416 Bool css_overflowed;
3417 ChangedSeg* css_local;
3421 static void add_mapping_callback(Addr addr, SizeT len, UInt prot,
3422 ULong dev, ULong ino, Off64T offset,
3423 const UChar *filename)
3425 // derived from sync_check_mapping_callback()
3429 if (len == 0) return;
3431 /* The kernel should not give us wraparounds. */
3432 aspacem_assert(addr <= addr + len - 1);
3434 iLo = find_nsegment_idx( addr );
3435 iHi = find_nsegment_idx( addr + len - 1 );
3438 /* NSegments iLo .. iHi inclusive should agree with the presented
3440 for (i = iLo; i <= iHi; i++) {
3444 if (nsegments[i].kind == SkAnonV || nsegments[i].kind == SkFileV) {
3445 /* Ignore V regions */
3448 else if (nsegments[i].kind == SkFree || nsegments[i].kind == SkResvn) {
3449 /* Add mapping for SkResvn regions */
3450 ChangedSeg* cs = &css_local[css_used_local];
3451 if (css_used_local < css_size_local) {
3452 cs->is_added = True;
3454 cs->end = addr + len - 1;
3456 cs->offset = offset;
3459 css_overflowed = True;
3463 } else if (nsegments[i].kind == SkAnonC ||
3464 nsegments[i].kind == SkFileC ||
3465 nsegments[i].kind == SkShmC)
3467 /* Check permissions on client regions */
3470 if (nsegments[i].hasR) seg_prot |= VKI_PROT_READ;
3471 if (nsegments[i].hasW) seg_prot |= VKI_PROT_WRITE;
3472 # if defined(VGA_x86)
3473 // GrP fixme sloppyXcheck
3474 // darwin: kernel X ignored and spuriously changes? (vm_copy)
3475 seg_prot |= (prot & VKI_PROT_EXEC);
3477 if (nsegments[i].hasX) seg_prot |= VKI_PROT_EXEC;
3479 if (seg_prot != prot) {
3480 if (VG_(clo_trace_syscalls))
3481 VG_(debugLog)(0,"aspacem","\nregion %p..%p permission "
3482 "mismatch (kernel %x, V %x)",
3483 (void*)nsegments[i].start,
3484 (void*)(nsegments[i].end+1), prot, seg_prot);
3493 static void remove_mapping_callback(Addr addr, SizeT len)
3495 // derived from sync_check_gap_callback()
3502 /* The kernel should not give us wraparounds. */
3503 aspacem_assert(addr <= addr + len - 1);
3505 iLo = find_nsegment_idx( addr );
3506 iHi = find_nsegment_idx( addr + len - 1 );
3508 /* NSegments iLo .. iHi inclusive should agree with the presented data. */
3509 for (i = iLo; i <= iHi; i++) {
3510 if (nsegments[i].kind != SkFree && nsegments[i].kind != SkResvn) {
3511 // V has a mapping, kernel doesn't
3512 ChangedSeg* cs = &css_local[css_used_local];
3513 if (css_used_local < css_size_local) {
3514 cs->is_added = False;
3515 cs->start = nsegments[i].start;
3516 cs->end = nsegments[i].end;
3521 css_overflowed = True;
3529 // Returns False if 'css' wasn't big enough.
3530 Bool VG_(get_changed_segments)(
3531 const HChar* when, const HChar* where, /*OUT*/ChangedSeg* css,
3532 Int css_size, /*OUT*/Int* css_used)
3534 static UInt stats_synccalls = 1;
3535 aspacem_assert(when && where);
3538 VG_(debugLog)(0,"aspacem",
3539 "[%u,%u] VG_(get_changed_segments)(%s, %s)\n",
3540 stats_synccalls++, stats_machcalls, when, where
3543 css_overflowed = False;
3545 css_size_local = css_size;
3548 // Get the list of segs that need to be added/removed.
3549 parse_procselfmaps(&add_mapping_callback, &remove_mapping_callback);
3551 *css_used = css_used_local;
3553 if (css_overflowed) {
3554 aspacem_assert(css_used_local == css_size_local);
3557 return !css_overflowed;
3560 #endif // defined(VGO_darwin)
3562 /*------END-procmaps-parser-for-Darwin---------------------------*/
3564 #endif // defined(VGO_linux) || defined(VGO_darwin)
3566 /*--------------------------------------------------------------------*/
3568 /*--------------------------------------------------------------------*/