2 /*--------------------------------------------------------------------*/
3 /*--- MemCheck: Maintain bitmaps of memory, tracking the ---*/
4 /*--- accessibility (A) and validity (V) status of each byte. ---*/
6 /*--------------------------------------------------------------------*/
9 This file is part of MemCheck, a heavyweight Valgrind tool for
10 detecting memory errors.
12 Copyright (C) 2000-2010 Julian Seward
15 This program is free software; you can redistribute it and/or
16 modify it under the terms of the GNU General Public License as
17 published by the Free Software Foundation; either version 2 of the
18 License, or (at your option) any later version.
20 This program is distributed in the hope that it will be useful, but
21 WITHOUT ANY WARRANTY; without even the implied warranty of
22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 General Public License for more details.
25 You should have received a copy of the GNU General Public License
26 along with this program; if not, write to the Free Software
27 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
30 The GNU General Public License is contained in the file COPYING.
33 #include "pub_tool_basics.h"
34 #include "pub_tool_aspacemgr.h"
35 #include "pub_tool_hashtable.h" // For mc_include.h
36 #include "pub_tool_libcbase.h"
37 #include "pub_tool_libcassert.h"
38 #include "pub_tool_libcprint.h"
39 #include "pub_tool_machine.h"
40 #include "pub_tool_mallocfree.h"
41 #include "pub_tool_options.h"
42 #include "pub_tool_oset.h"
43 #include "pub_tool_replacemalloc.h"
44 #include "pub_tool_tooliface.h"
45 #include "pub_tool_threadstate.h"
47 #include "mc_include.h"
48 #include "memcheck.h" /* for client requests */
51 HChar *VG_(toolname)="memcheck";
54 /* Set to 1 to do a little more sanity checking */
55 #define VG_DEBUG_MEMORY 0
57 #define DEBUG(fmt, args...) //VG_(printf)(fmt, ## args)
59 static void ocache_sarp_Set_Origins ( Addr, UWord, UInt ); /* fwds */
60 static void ocache_sarp_Clear_Origins ( Addr, UWord ); /* fwds */
63 /*------------------------------------------------------------*/
64 /*--- Fast-case knobs ---*/
65 /*------------------------------------------------------------*/
67 // Comment these out to disable the fast cases (don't just set them to zero).
69 #define PERF_FAST_LOADV 1
70 #define PERF_FAST_STOREV 1
72 #define PERF_FAST_SARP 1
74 #define PERF_FAST_STACK 1
75 #define PERF_FAST_STACK2 1
77 /* Change this to 1 to enable assertions on origin tracking cache fast
79 #define OC_ENABLE_ASSERTIONS 0
82 /*------------------------------------------------------------*/
83 /*--- Comments on the origin tracking implementation ---*/
84 /*------------------------------------------------------------*/
86 /* See detailed comment entitled
87 AN OVERVIEW OF THE ORIGIN TRACKING IMPLEMENTATION
88 which is contained further on in this file. */
91 /*------------------------------------------------------------*/
92 /*--- V bits and A bits ---*/
93 /*------------------------------------------------------------*/
95 /* Conceptually, every byte value has 8 V bits, which track whether Memcheck
96 thinks the corresponding value bit is defined. And every memory byte
97 has an A bit, which tracks whether Memcheck thinks the program can access
98 it safely (ie. it's mapped, and has at least one of the RWX permission bits
99 set). So every N-bit register is shadowed with N V bits, and every memory
100 byte is shadowed with 8 V bits and one A bit.
102 In the implementation, we use two forms of compression (compressed V bits
103 and distinguished secondary maps) to avoid the 9-bit-per-byte overhead
106 Memcheck also tracks extra information about each heap block that is
107 allocated, for detecting memory leaks and other purposes.
110 /*------------------------------------------------------------*/
111 /*--- Basic A/V bitmap representation. ---*/
112 /*------------------------------------------------------------*/
114 /* All reads and writes are checked against a memory map (a.k.a. shadow
115 memory), which records the state of all memory in the process.
117 On 32-bit machines the memory map is organised as follows.
118 The top 16 bits of an address are used to index into a top-level
119 map table, containing 65536 entries. Each entry is a pointer to a
120 second-level map, which records the accesibililty and validity
121 permissions for the 65536 bytes indexed by the lower 16 bits of the
122 address. Each byte is represented by two bits (details are below). So
123 each second-level map contains 16384 bytes. This two-level arrangement
124 conveniently divides the 4G address space into 64k lumps, each size 64k
127 All entries in the primary (top-level) map must point to a valid
128 secondary (second-level) map. Since many of the 64kB chunks will
129 have the same status for every bit -- ie. noaccess (for unused
130 address space) or entirely addressable and defined (for code segments) --
131 there are three distinguished secondary maps, which indicate 'noaccess',
132 'undefined' and 'defined'. For these uniform 64kB chunks, the primary
133 map entry points to the relevant distinguished map. In practice,
134 typically more than half of the addressable memory is represented with
135 the 'undefined' or 'defined' distinguished secondary map, so it gives a
136 good saving. It also lets us set the V+A bits of large address regions
137 quickly in set_address_range_perms().
139 On 64-bit machines it's more complicated. If we followed the same basic
140 scheme we'd have a four-level table which would require too many memory
141 accesses. So instead the top-level map table has 2^19 entries (indexed
142 using bits 16..34 of the address); this covers the bottom 32GB. Any
143 accesses above 32GB are handled with a slow, sparse auxiliary table.
144 Valgrind's address space manager tries very hard to keep things below
145 this 32GB barrier so that performance doesn't suffer too much.
147 Note that this file has a lot of different functions for reading and
148 writing shadow memory. Only a couple are strictly necessary (eg.
149 get_vabits2 and set_vabits2), most are just specialised for specific
150 common cases to improve performance.
152 Aside: the V+A bits are less precise than they could be -- we have no way
153 of marking memory as read-only. It would be great if we could add an
154 extra state VA_BITSn_READONLY. But then we'd have 5 different states,
155 which requires 2.3 bits to hold, and there's no way to do that elegantly
156 -- we'd have to double up to 4 bits of metadata per byte, which doesn't
160 /* --------------- Basic configuration --------------- */
162 /* Only change this. N_PRIMARY_MAP *must* be a power of 2. */
166 /* cover the entire address space */
167 # define N_PRIMARY_BITS 16
171 /* Just handle the first 32G fast and the rest via auxiliary
172 primaries. If you change this, Memcheck will assert at startup.
173 See the definition of UNALIGNED_OR_HIGH for extensive comments. */
174 # define N_PRIMARY_BITS 19
179 /* Do not change this. */
180 #define N_PRIMARY_MAP ( ((UWord)1) << N_PRIMARY_BITS)
182 /* Do not change this. */
183 #define MAX_PRIMARY_ADDRESS (Addr)((((Addr)65536) * N_PRIMARY_MAP)-1)
186 /* --------------- Secondary maps --------------- */
188 // Each byte of memory conceptually has an A bit, which indicates its
189 // addressability, and 8 V bits, which indicates its definedness.
191 // But because very few bytes are partially defined, we can use a nice
192 // compression scheme to reduce the size of shadow memory. Each byte of
193 // memory has 2 bits which indicates its state (ie. V+A bits):
195 // 00: noaccess (unaddressable but treated as fully defined)
196 // 01: undefined (addressable and fully undefined)
197 // 10: defined (addressable and fully defined)
198 // 11: partdefined (addressable and partially defined)
200 // In the "partdefined" case, we use a secondary table to store the V bits.
201 // Each entry in the secondary-V-bits table maps a byte address to its 8 V
204 // We store the compressed V+A bits in 8-bit chunks, ie. the V+A bits for
205 // four bytes (32 bits) of memory are in each chunk. Hence the name
206 // "vabits8". This lets us get the V+A bits for four bytes at a time
207 // easily (without having to do any shifting and/or masking), and that is a
208 // very common operation. (Note that although each vabits8 chunk
209 // is 8 bits in size, it represents 32 bits of memory.)
211 // The representation is "inverse" little-endian... each 4 bytes of
212 // memory is represented by a 1 byte value, where:
214 // - the status of byte (a+0) is held in bits [1..0]
215 // - the status of byte (a+1) is held in bits [3..2]
216 // - the status of byte (a+2) is held in bits [5..4]
217 // - the status of byte (a+3) is held in bits [7..6]
219 // It's "inverse" because endianness normally describes a mapping from
220 // value bits to memory addresses; in this case the mapping is inverted.
221 // Ie. instead of particular value bits being held in certain addresses, in
222 // this case certain addresses are represented by particular value bits.
223 // See insert_vabits2_into_vabits8() for an example.
225 // But note that we don't compress the V bits stored in registers; they
226 // need to be explicit to made the shadow operations possible. Therefore
227 // when moving values between registers and memory we need to convert
228 // between the expanded in-register format and the compressed in-memory
229 // format. This isn't so difficult, it just requires careful attention in a
232 // These represent eight bits of memory.
233 #define VA_BITS2_NOACCESS 0x0 // 00b
234 #define VA_BITS2_UNDEFINED 0x1 // 01b
235 #define VA_BITS2_DEFINED 0x2 // 10b
236 #define VA_BITS2_PARTDEFINED 0x3 // 11b
238 // These represent 16 bits of memory.
239 #define VA_BITS4_NOACCESS 0x0 // 00_00b
240 #define VA_BITS4_UNDEFINED 0x5 // 01_01b
241 #define VA_BITS4_DEFINED 0xa // 10_10b
243 // These represent 32 bits of memory.
244 #define VA_BITS8_NOACCESS 0x00 // 00_00_00_00b
245 #define VA_BITS8_UNDEFINED 0x55 // 01_01_01_01b
246 #define VA_BITS8_DEFINED 0xaa // 10_10_10_10b
248 // These represent 64 bits of memory.
249 #define VA_BITS16_NOACCESS 0x0000 // 00_00_00_00b x 2
250 #define VA_BITS16_UNDEFINED 0x5555 // 01_01_01_01b x 2
251 #define VA_BITS16_DEFINED 0xaaaa // 10_10_10_10b x 2
254 #define SM_CHUNKS 16384
255 #define SM_OFF(aaa) (((aaa) & 0xffff) >> 2)
256 #define SM_OFF_16(aaa) (((aaa) & 0xffff) >> 3)
258 // Paranoia: it's critical for performance that the requested inlining
259 // occurs. So try extra hard.
260 #define INLINE inline __attribute__((always_inline))
262 static INLINE Addr start_of_this_sm ( Addr a ) {
263 return (a & (~SM_MASK));
265 static INLINE Bool is_start_of_sm ( Addr a ) {
266 return (start_of_this_sm(a) == a);
271 UChar vabits8[SM_CHUNKS];
275 // 3 distinguished secondary maps, one for no-access, one for
276 // accessible but undefined, and one for accessible and defined.
277 // Distinguished secondaries may never be modified.
278 #define SM_DIST_NOACCESS 0
279 #define SM_DIST_UNDEFINED 1
280 #define SM_DIST_DEFINED 2
282 static SecMap sm_distinguished[3];
284 static INLINE Bool is_distinguished_sm ( SecMap* sm ) {
285 return sm >= &sm_distinguished[0] && sm <= &sm_distinguished[2];
288 // Forward declaration
289 static void update_SM_counts(SecMap* oldSM, SecMap* newSM);
291 /* dist_sm points to one of our three distinguished secondaries. Make
292 a copy of it so that we can write to it.
294 static SecMap* copy_for_writing ( SecMap* dist_sm )
297 tl_assert(dist_sm == &sm_distinguished[0]
298 || dist_sm == &sm_distinguished[1]
299 || dist_sm == &sm_distinguished[2]);
301 new_sm = VG_(am_shadow_alloc)(sizeof(SecMap));
303 VG_(out_of_memory_NORETURN)( "memcheck:allocate new SecMap",
305 VG_(memcpy)(new_sm, dist_sm, sizeof(SecMap));
306 update_SM_counts(dist_sm, new_sm);
310 /* --------------- Stats --------------- */
312 static Int n_issued_SMs = 0;
313 static Int n_deissued_SMs = 0;
314 static Int n_noaccess_SMs = N_PRIMARY_MAP; // start with many noaccess DSMs
315 static Int n_undefined_SMs = 0;
316 static Int n_defined_SMs = 0;
317 static Int n_non_DSM_SMs = 0;
318 static Int max_noaccess_SMs = 0;
319 static Int max_undefined_SMs = 0;
320 static Int max_defined_SMs = 0;
321 static Int max_non_DSM_SMs = 0;
323 /* # searches initiated in auxmap_L1, and # base cmps required */
324 static ULong n_auxmap_L1_searches = 0;
325 static ULong n_auxmap_L1_cmps = 0;
326 /* # of searches that missed in auxmap_L1 and therefore had to
327 be handed to auxmap_L2. And the number of nodes inserted. */
328 static ULong n_auxmap_L2_searches = 0;
329 static ULong n_auxmap_L2_nodes = 0;
331 static Int n_sanity_cheap = 0;
332 static Int n_sanity_expensive = 0;
334 static Int n_secVBit_nodes = 0;
335 static Int max_secVBit_nodes = 0;
337 static void update_SM_counts(SecMap* oldSM, SecMap* newSM)
339 if (oldSM == &sm_distinguished[SM_DIST_NOACCESS ]) n_noaccess_SMs --;
340 else if (oldSM == &sm_distinguished[SM_DIST_UNDEFINED]) n_undefined_SMs--;
341 else if (oldSM == &sm_distinguished[SM_DIST_DEFINED ]) n_defined_SMs --;
342 else { n_non_DSM_SMs --;
345 if (newSM == &sm_distinguished[SM_DIST_NOACCESS ]) n_noaccess_SMs ++;
346 else if (newSM == &sm_distinguished[SM_DIST_UNDEFINED]) n_undefined_SMs++;
347 else if (newSM == &sm_distinguished[SM_DIST_DEFINED ]) n_defined_SMs ++;
348 else { n_non_DSM_SMs ++;
351 if (n_noaccess_SMs > max_noaccess_SMs ) max_noaccess_SMs = n_noaccess_SMs;
352 if (n_undefined_SMs > max_undefined_SMs) max_undefined_SMs = n_undefined_SMs;
353 if (n_defined_SMs > max_defined_SMs ) max_defined_SMs = n_defined_SMs;
354 if (n_non_DSM_SMs > max_non_DSM_SMs ) max_non_DSM_SMs = n_non_DSM_SMs;
357 /* --------------- Primary maps --------------- */
359 /* The main primary map. This covers some initial part of the address
360 space, addresses 0 .. (N_PRIMARY_MAP << 16)-1. The rest of it is
361 handled using the auxiliary primary map.
363 static SecMap* primary_map[N_PRIMARY_MAP];
366 /* An entry in the auxiliary primary map. base must be a 64k-aligned
367 value, and sm points at the relevant secondary map. As with the
368 main primary map, the secondary may be either a real secondary, or
369 one of the three distinguished secondaries. DO NOT CHANGE THIS
370 LAYOUT: the first word has to be the key for OSet fast lookups.
379 /* Tunable parameter: How big is the L1 queue? */
380 #define N_AUXMAP_L1 24
382 /* Tunable parameter: How far along the L1 queue to insert
383 entries resulting from L2 lookups? */
384 #define AUXMAP_L1_INSERT_IX 12
388 AuxMapEnt* ent; // pointer to the matching auxmap_L2 node
390 auxmap_L1[N_AUXMAP_L1];
392 static OSet* auxmap_L2 = NULL;
394 static void init_auxmap_L1_L2 ( void )
397 for (i = 0; i < N_AUXMAP_L1; i++) {
398 auxmap_L1[i].base = 0;
399 auxmap_L1[i].ent = NULL;
402 tl_assert(0 == offsetof(AuxMapEnt,base));
403 tl_assert(sizeof(Addr) == sizeof(void*));
404 auxmap_L2 = VG_(OSetGen_Create)( /*keyOff*/ offsetof(AuxMapEnt,base),
406 VG_(malloc), "mc.iaLL.1", VG_(free) );
409 /* Check representation invariants; if OK return NULL; else a
410 descriptive bit of text. Also return the number of
411 non-distinguished secondary maps referred to from the auxiliary
414 static HChar* check_auxmap_L1_L2_sanity ( Word* n_secmaps_found )
417 /* On a 32-bit platform, the L2 and L1 tables should
418 both remain empty forever.
420 On a 64-bit platform:
422 all .base & 0xFFFF == 0
423 all .base > MAX_PRIMARY_ADDRESS
425 all .base & 0xFFFF == 0
426 all (.base > MAX_PRIMARY_ADDRESS
428 and .ent points to an AuxMapEnt with the same .base)
430 (.base == 0 and .ent == NULL)
432 *n_secmaps_found = 0;
433 if (sizeof(void*) == 4) {
434 /* 32-bit platform */
435 if (VG_(OSetGen_Size)(auxmap_L2) != 0)
436 return "32-bit: auxmap_L2 is non-empty";
437 for (i = 0; i < N_AUXMAP_L1; i++)
438 if (auxmap_L1[i].base != 0 || auxmap_L1[i].ent != NULL)
439 return "32-bit: auxmap_L1 is non-empty";
441 /* 64-bit platform */
442 UWord elems_seen = 0;
443 AuxMapEnt *elem, *res;
446 VG_(OSetGen_ResetIter)(auxmap_L2);
447 while ( (elem = VG_(OSetGen_Next)(auxmap_L2)) ) {
449 if (0 != (elem->base & (Addr)0xFFFF))
450 return "64-bit: nonzero .base & 0xFFFF in auxmap_L2";
451 if (elem->base <= MAX_PRIMARY_ADDRESS)
452 return "64-bit: .base <= MAX_PRIMARY_ADDRESS in auxmap_L2";
453 if (elem->sm == NULL)
454 return "64-bit: .sm in _L2 is NULL";
455 if (!is_distinguished_sm(elem->sm))
456 (*n_secmaps_found)++;
458 if (elems_seen != n_auxmap_L2_nodes)
459 return "64-bit: disagreement on number of elems in _L2";
460 /* Check L1-L2 correspondence */
461 for (i = 0; i < N_AUXMAP_L1; i++) {
462 if (auxmap_L1[i].base == 0 && auxmap_L1[i].ent == NULL)
464 if (0 != (auxmap_L1[i].base & (Addr)0xFFFF))
465 return "64-bit: nonzero .base & 0xFFFF in auxmap_L1";
466 if (auxmap_L1[i].base <= MAX_PRIMARY_ADDRESS)
467 return "64-bit: .base <= MAX_PRIMARY_ADDRESS in auxmap_L1";
468 if (auxmap_L1[i].ent == NULL)
469 return "64-bit: .ent is NULL in auxmap_L1";
470 if (auxmap_L1[i].ent->base != auxmap_L1[i].base)
471 return "64-bit: _L1 and _L2 bases are inconsistent";
472 /* Look it up in auxmap_L2. */
473 key.base = auxmap_L1[i].base;
475 res = VG_(OSetGen_Lookup)(auxmap_L2, &key);
477 return "64-bit: _L1 .base not found in _L2";
478 if (res != auxmap_L1[i].ent)
479 return "64-bit: _L1 .ent disagrees with _L2 entry";
481 /* Check L1 contains no duplicates */
482 for (i = 0; i < N_AUXMAP_L1; i++) {
483 if (auxmap_L1[i].base == 0)
485 for (j = i+1; j < N_AUXMAP_L1; j++) {
486 if (auxmap_L1[j].base == 0)
488 if (auxmap_L1[j].base == auxmap_L1[i].base)
489 return "64-bit: duplicate _L1 .base entries";
493 return NULL; /* ok */
496 static void insert_into_auxmap_L1_at ( Word rank, AuxMapEnt* ent )
500 tl_assert(rank >= 0 && rank < N_AUXMAP_L1);
501 for (i = N_AUXMAP_L1-1; i > rank; i--)
502 auxmap_L1[i] = auxmap_L1[i-1];
503 auxmap_L1[rank].base = ent->base;
504 auxmap_L1[rank].ent = ent;
507 static INLINE AuxMapEnt* maybe_find_in_auxmap ( Addr a )
513 tl_assert(a > MAX_PRIMARY_ADDRESS);
516 /* First search the front-cache, which is a self-organising
517 list containing the most popular entries. */
519 if (LIKELY(auxmap_L1[0].base == a))
520 return auxmap_L1[0].ent;
521 if (LIKELY(auxmap_L1[1].base == a)) {
522 Addr t_base = auxmap_L1[0].base;
523 AuxMapEnt* t_ent = auxmap_L1[0].ent;
524 auxmap_L1[0].base = auxmap_L1[1].base;
525 auxmap_L1[0].ent = auxmap_L1[1].ent;
526 auxmap_L1[1].base = t_base;
527 auxmap_L1[1].ent = t_ent;
528 return auxmap_L1[0].ent;
531 n_auxmap_L1_searches++;
533 for (i = 0; i < N_AUXMAP_L1; i++) {
534 if (auxmap_L1[i].base == a) {
538 tl_assert(i >= 0 && i <= N_AUXMAP_L1);
540 n_auxmap_L1_cmps += (ULong)(i+1);
542 if (i < N_AUXMAP_L1) {
544 Addr t_base = auxmap_L1[i-1].base;
545 AuxMapEnt* t_ent = auxmap_L1[i-1].ent;
546 auxmap_L1[i-1].base = auxmap_L1[i-0].base;
547 auxmap_L1[i-1].ent = auxmap_L1[i-0].ent;
548 auxmap_L1[i-0].base = t_base;
549 auxmap_L1[i-0].ent = t_ent;
552 return auxmap_L1[i].ent;
555 n_auxmap_L2_searches++;
557 /* First see if we already have it. */
561 res = VG_(OSetGen_Lookup)(auxmap_L2, &key);
563 insert_into_auxmap_L1_at( AUXMAP_L1_INSERT_IX, res );
567 static AuxMapEnt* find_or_alloc_in_auxmap ( Addr a )
569 AuxMapEnt *nyu, *res;
571 /* First see if we already have it. */
572 res = maybe_find_in_auxmap( a );
576 /* Ok, there's no entry in the secondary map, so we'll have
580 nyu = (AuxMapEnt*) VG_(OSetGen_AllocNode)( auxmap_L2, sizeof(AuxMapEnt) );
583 nyu->sm = &sm_distinguished[SM_DIST_NOACCESS];
584 VG_(OSetGen_Insert)( auxmap_L2, nyu );
585 insert_into_auxmap_L1_at( AUXMAP_L1_INSERT_IX, nyu );
590 /* --------------- SecMap fundamentals --------------- */
592 // In all these, 'low' means it's definitely in the main primary map,
593 // 'high' means it's definitely in the auxiliary table.
595 static INLINE SecMap** get_secmap_low_ptr ( Addr a )
597 UWord pm_off = a >> 16;
598 # if VG_DEBUG_MEMORY >= 1
599 tl_assert(pm_off < N_PRIMARY_MAP);
601 return &primary_map[ pm_off ];
604 static INLINE SecMap** get_secmap_high_ptr ( Addr a )
606 AuxMapEnt* am = find_or_alloc_in_auxmap(a);
610 static SecMap** get_secmap_ptr ( Addr a )
612 return ( a <= MAX_PRIMARY_ADDRESS
613 ? get_secmap_low_ptr(a)
614 : get_secmap_high_ptr(a));
617 static INLINE SecMap* get_secmap_for_reading_low ( Addr a )
619 return *get_secmap_low_ptr(a);
622 static INLINE SecMap* get_secmap_for_reading_high ( Addr a )
624 return *get_secmap_high_ptr(a);
627 static INLINE SecMap* get_secmap_for_writing_low(Addr a)
629 SecMap** p = get_secmap_low_ptr(a);
630 if (UNLIKELY(is_distinguished_sm(*p)))
631 *p = copy_for_writing(*p);
635 static INLINE SecMap* get_secmap_for_writing_high ( Addr a )
637 SecMap** p = get_secmap_high_ptr(a);
638 if (UNLIKELY(is_distinguished_sm(*p)))
639 *p = copy_for_writing(*p);
643 /* Produce the secmap for 'a', either from the primary map or by
644 ensuring there is an entry for it in the aux primary map. The
645 secmap may be a distinguished one as the caller will only want to
648 static INLINE SecMap* get_secmap_for_reading ( Addr a )
650 return ( a <= MAX_PRIMARY_ADDRESS
651 ? get_secmap_for_reading_low (a)
652 : get_secmap_for_reading_high(a) );
655 /* Produce the secmap for 'a', either from the primary map or by
656 ensuring there is an entry for it in the aux primary map. The
657 secmap may not be a distinguished one, since the caller will want
658 to be able to write it. If it is a distinguished secondary, make a
659 writable copy of it, install it, and return the copy instead. (COW
662 static SecMap* get_secmap_for_writing ( Addr a )
664 return ( a <= MAX_PRIMARY_ADDRESS
665 ? get_secmap_for_writing_low (a)
666 : get_secmap_for_writing_high(a) );
669 /* If 'a' has a SecMap, produce it. Else produce NULL. But don't
670 allocate one if one doesn't already exist. This is used by the
673 static SecMap* maybe_get_secmap_for ( Addr a )
675 if (a <= MAX_PRIMARY_ADDRESS) {
676 return get_secmap_for_reading_low(a);
678 AuxMapEnt* am = maybe_find_in_auxmap(a);
679 return am ? am->sm : NULL;
683 /* --------------- Fundamental functions --------------- */
686 void insert_vabits2_into_vabits8 ( Addr a, UChar vabits2, UChar* vabits8 )
688 UInt shift = (a & 3) << 1; // shift by 0, 2, 4, or 6
689 *vabits8 &= ~(0x3 << shift); // mask out the two old bits
690 *vabits8 |= (vabits2 << shift); // mask in the two new bits
694 void insert_vabits4_into_vabits8 ( Addr a, UChar vabits4, UChar* vabits8 )
697 tl_assert(VG_IS_2_ALIGNED(a)); // Must be 2-aligned
698 shift = (a & 2) << 1; // shift by 0 or 4
699 *vabits8 &= ~(0xf << shift); // mask out the four old bits
700 *vabits8 |= (vabits4 << shift); // mask in the four new bits
704 UChar extract_vabits2_from_vabits8 ( Addr a, UChar vabits8 )
706 UInt shift = (a & 3) << 1; // shift by 0, 2, 4, or 6
707 vabits8 >>= shift; // shift the two bits to the bottom
708 return 0x3 & vabits8; // mask out the rest
712 UChar extract_vabits4_from_vabits8 ( Addr a, UChar vabits8 )
715 tl_assert(VG_IS_2_ALIGNED(a)); // Must be 2-aligned
716 shift = (a & 2) << 1; // shift by 0 or 4
717 vabits8 >>= shift; // shift the four bits to the bottom
718 return 0xf & vabits8; // mask out the rest
721 // Note that these four are only used in slow cases. The fast cases do
722 // clever things like combine the auxmap check (in
723 // get_secmap_{read,writ}able) with alignment checks.
726 // Any time this function is called, if it is possible that vabits2
727 // is equal to VA_BITS2_PARTDEFINED, then the corresponding entry in the
728 // sec-V-bits table must also be set!
730 void set_vabits2 ( Addr a, UChar vabits2 )
732 SecMap* sm = get_secmap_for_writing(a);
733 UWord sm_off = SM_OFF(a);
734 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
738 UChar get_vabits2 ( Addr a )
740 SecMap* sm = get_secmap_for_reading(a);
741 UWord sm_off = SM_OFF(a);
742 UChar vabits8 = sm->vabits8[sm_off];
743 return extract_vabits2_from_vabits8(a, vabits8);
747 // Any time this function is called, if it is possible that any of the
748 // 4 2-bit fields in vabits8 are equal to VA_BITS2_PARTDEFINED, then the
749 // corresponding entry(s) in the sec-V-bits table must also be set!
751 UChar get_vabits8_for_aligned_word32 ( Addr a )
753 SecMap* sm = get_secmap_for_reading(a);
754 UWord sm_off = SM_OFF(a);
755 UChar vabits8 = sm->vabits8[sm_off];
760 void set_vabits8_for_aligned_word32 ( Addr a, UChar vabits8 )
762 SecMap* sm = get_secmap_for_writing(a);
763 UWord sm_off = SM_OFF(a);
764 sm->vabits8[sm_off] = vabits8;
768 // Forward declarations
769 static UWord get_sec_vbits8(Addr a);
770 static void set_sec_vbits8(Addr a, UWord vbits8);
772 // Returns False if there was an addressability error.
774 Bool set_vbits8 ( Addr a, UChar vbits8 )
777 UChar vabits2 = get_vabits2(a);
778 if ( VA_BITS2_NOACCESS != vabits2 ) {
779 // Addressable. Convert in-register format to in-memory format.
780 // Also remove any existing sec V bit entry for the byte if no
782 if ( V_BITS8_DEFINED == vbits8 ) { vabits2 = VA_BITS2_DEFINED; }
783 else if ( V_BITS8_UNDEFINED == vbits8 ) { vabits2 = VA_BITS2_UNDEFINED; }
784 else { vabits2 = VA_BITS2_PARTDEFINED;
785 set_sec_vbits8(a, vbits8); }
786 set_vabits2(a, vabits2);
789 // Unaddressable! Do nothing -- when writing to unaddressable
790 // memory it acts as a black hole, and the V bits can never be seen
791 // again. So we don't have to write them at all.
797 // Returns False if there was an addressability error. In that case, we put
798 // all defined bits into vbits8.
800 Bool get_vbits8 ( Addr a, UChar* vbits8 )
803 UChar vabits2 = get_vabits2(a);
805 // Convert the in-memory format to in-register format.
806 if ( VA_BITS2_DEFINED == vabits2 ) { *vbits8 = V_BITS8_DEFINED; }
807 else if ( VA_BITS2_UNDEFINED == vabits2 ) { *vbits8 = V_BITS8_UNDEFINED; }
808 else if ( VA_BITS2_NOACCESS == vabits2 ) {
809 *vbits8 = V_BITS8_DEFINED; // Make V bits defined!
812 tl_assert( VA_BITS2_PARTDEFINED == vabits2 );
813 *vbits8 = get_sec_vbits8(a);
819 /* --------------- Secondary V bit table ------------ */
821 // This table holds the full V bit pattern for partially-defined bytes
822 // (PDBs) that are represented by VA_BITS2_PARTDEFINED in the main shadow
825 // Note: the nodes in this table can become stale. Eg. if you write a PDB,
826 // then overwrite the same address with a fully defined byte, the sec-V-bit
827 // node will not necessarily be removed. This is because checking for
828 // whether removal is necessary would slow down the fast paths.
830 // To avoid the stale nodes building up too much, we periodically (once the
831 // table reaches a certain size) garbage collect (GC) the table by
832 // traversing it and evicting any "sufficiently stale" nodes, ie. nodes that
833 // are stale and haven't been touched for a certain number of collections.
834 // If more than a certain proportion of nodes survived, we increase the
835 // table size so that GCs occur less often.
837 // (So this a bit different to a traditional GC, where you definitely want
838 // to remove any dead nodes. It's more like we have a resizable cache and
839 // we're trying to find the right balance how many elements to evict and how
840 // big to make the cache.)
842 // This policy is designed to avoid bad table bloat in the worst case where
843 // a program creates huge numbers of stale PDBs -- we would get this bloat
844 // if we had no GC -- while handling well the case where a node becomes
845 // stale but shortly afterwards is rewritten with a PDB and so becomes
846 // non-stale again (which happens quite often, eg. in perf/bz2). If we just
847 // remove all stale nodes as soon as possible, we just end up re-adding a
848 // lot of them in later again. The "sufficiently stale" approach avoids
849 // this. (If a program has many live PDBs, performance will just suck,
850 // there's no way around that.)
852 static OSet* secVBitTable;
855 static ULong sec_vbits_new_nodes = 0;
856 static ULong sec_vbits_updates = 0;
858 // This must be a power of two; this is checked in mc_pre_clo_init().
859 // The size chosen here is a trade-off: if the nodes are bigger (ie. cover
860 // a larger address range) they take more space but we can get multiple
861 // partially-defined bytes in one if they are close to each other, reducing
862 // the number of total nodes. In practice sometimes they are clustered (eg.
863 // perf/bz2 repeatedly writes then reads more than 20,000 in a contiguous
864 // row), but often not. So we choose something intermediate.
865 #define BYTES_PER_SEC_VBIT_NODE 16
867 // We make the table bigger if more than this many nodes survive a GC.
868 #define MAX_SURVIVOR_PROPORTION 0.5
870 // Each time we make the table bigger, we increase it by this much.
871 #define TABLE_GROWTH_FACTOR 2
873 // This defines "sufficiently stale" -- any node that hasn't been touched in
874 // this many GCs will be removed.
875 #define MAX_STALE_AGE 2
877 // We GC the table when it gets this many nodes in it, ie. it's effectively
878 // the table size. It can change.
879 static Int secVBitLimit = 1024;
881 // The number of GCs done, used to age sec-V-bit nodes for eviction.
882 // Because it's unsigned, wrapping doesn't matter -- the right answer will
884 static UInt GCs_done = 0;
889 UChar vbits8[BYTES_PER_SEC_VBIT_NODE];
894 static OSet* createSecVBitTable(void)
896 return VG_(OSetGen_Create)( offsetof(SecVBitNode, a),
897 NULL, // use fast comparisons
898 VG_(malloc), "mc.cSVT.1 (sec VBit table)",
902 static void gcSecVBitTable(void)
906 Int i, n_nodes = 0, n_survivors = 0;
910 // Create the new table.
911 secVBitTable2 = createSecVBitTable();
913 // Traverse the table, moving fresh nodes into the new table.
914 VG_(OSetGen_ResetIter)(secVBitTable);
915 while ( (n = VG_(OSetGen_Next)(secVBitTable)) ) {
917 if ( (GCs_done - n->last_touched) <= MAX_STALE_AGE ) {
918 // Keep node if it's been touched recently enough (regardless of
919 // freshness/staleness).
922 // Keep node if any of its bytes are non-stale. Using
923 // get_vabits2() for the lookup is not very efficient, but I don't
925 for (i = 0; i < BYTES_PER_SEC_VBIT_NODE; i++) {
926 if (VA_BITS2_PARTDEFINED == get_vabits2(n->a + i)) {
927 keep = True; // Found a non-stale byte, so keep
934 // Insert a copy of the node into the new table.
936 VG_(OSetGen_AllocNode)(secVBitTable2, sizeof(SecVBitNode));
938 VG_(OSetGen_Insert)(secVBitTable2, n2);
942 // Get the before and after sizes.
943 n_nodes = VG_(OSetGen_Size)(secVBitTable);
944 n_survivors = VG_(OSetGen_Size)(secVBitTable2);
946 // Destroy the old table, and put the new one in its place.
947 VG_(OSetGen_Destroy)(secVBitTable);
948 secVBitTable = secVBitTable2;
950 if (VG_(clo_verbosity) > 1) {
952 VG_(percentify)(n_survivors, n_nodes, 1, 6, percbuf);
953 VG_(message)(Vg_DebugMsg, "memcheck GC: %d nodes, %d survivors (%s)\n",
954 n_nodes, n_survivors, percbuf);
957 // Increase table size if necessary.
958 if (n_survivors > (secVBitLimit * MAX_SURVIVOR_PROPORTION)) {
959 secVBitLimit *= TABLE_GROWTH_FACTOR;
960 if (VG_(clo_verbosity) > 1)
961 VG_(message)(Vg_DebugMsg, "memcheck GC: increase table size to %d\n",
966 static UWord get_sec_vbits8(Addr a)
968 Addr aAligned = VG_ROUNDDN(a, BYTES_PER_SEC_VBIT_NODE);
969 Int amod = a % BYTES_PER_SEC_VBIT_NODE;
970 SecVBitNode* n = VG_(OSetGen_Lookup)(secVBitTable, &aAligned);
972 tl_assert2(n, "get_sec_vbits8: no node for address %p (%p)\n", aAligned, a);
973 // Shouldn't be fully defined or fully undefined -- those cases shouldn't
974 // make it to the secondary V bits table.
975 vbits8 = n->vbits8[amod];
976 tl_assert(V_BITS8_DEFINED != vbits8 && V_BITS8_UNDEFINED != vbits8);
980 static void set_sec_vbits8(Addr a, UWord vbits8)
982 Addr aAligned = VG_ROUNDDN(a, BYTES_PER_SEC_VBIT_NODE);
983 Int i, amod = a % BYTES_PER_SEC_VBIT_NODE;
984 SecVBitNode* n = VG_(OSetGen_Lookup)(secVBitTable, &aAligned);
985 // Shouldn't be fully defined or fully undefined -- those cases shouldn't
986 // make it to the secondary V bits table.
987 tl_assert(V_BITS8_DEFINED != vbits8 && V_BITS8_UNDEFINED != vbits8);
989 n->vbits8[amod] = vbits8; // update
990 n->last_touched = GCs_done;
993 // New node: assign the specific byte, make the rest invalid (they
994 // should never be read as-is, but be cautious).
995 n = VG_(OSetGen_AllocNode)(secVBitTable, sizeof(SecVBitNode));
997 for (i = 0; i < BYTES_PER_SEC_VBIT_NODE; i++) {
998 n->vbits8[i] = V_BITS8_UNDEFINED;
1000 n->vbits8[amod] = vbits8;
1001 n->last_touched = GCs_done;
1003 // Do a table GC if necessary. Nb: do this before inserting the new
1004 // node, to avoid erroneously GC'ing the new node.
1005 if (secVBitLimit == VG_(OSetGen_Size)(secVBitTable)) {
1009 // Insert the new node.
1010 VG_(OSetGen_Insert)(secVBitTable, n);
1011 sec_vbits_new_nodes++;
1013 n_secVBit_nodes = VG_(OSetGen_Size)(secVBitTable);
1014 if (n_secVBit_nodes > max_secVBit_nodes)
1015 max_secVBit_nodes = n_secVBit_nodes;
1019 /* --------------- Endianness helpers --------------- */
1021 /* Returns the offset in memory of the byteno-th most significant byte
1022 in a wordszB-sized word, given the specified endianness. */
1023 static INLINE UWord byte_offset_w ( UWord wordszB, Bool bigendian,
1025 return bigendian ? (wordszB-1-byteno) : byteno;
1029 /* --------------- Ignored address ranges --------------- */
1031 #define M_IGNORE_RANGES 4
1036 Addr start[M_IGNORE_RANGES];
1037 Addr end[M_IGNORE_RANGES];
1041 static IgnoreRanges ignoreRanges;
1043 INLINE Bool MC_(in_ignored_range) ( Addr a )
1046 if (LIKELY(ignoreRanges.used == 0))
1048 for (i = 0; i < ignoreRanges.used; i++) {
1049 if (a >= ignoreRanges.start[i] && a < ignoreRanges.end[i])
1056 /* Parse a 32- or 64-bit hex number, including leading 0x, from string
1057 starting at *ppc, putting result in *result, and return True. Or
1058 fail, in which case *ppc and *result are undefined, and return
1061 static Bool isHex ( UChar c )
1063 return ((c >= '0' && c <= '9') ||
1064 (c >= 'a' && c <= 'f') ||
1065 (c >= 'A' && c <= 'F'));
1068 static UInt fromHex ( UChar c )
1070 if (c >= '0' && c <= '9')
1071 return (UInt)c - (UInt)'0';
1072 if (c >= 'a' && c <= 'f')
1073 return 10 + (UInt)c - (UInt)'a';
1074 if (c >= 'A' && c <= 'F')
1075 return 10 + (UInt)c - (UInt)'A';
1081 static Bool parse_Addr ( UChar** ppc, Addr* result )
1083 Int used, limit = 2 * sizeof(Addr);
1092 while (isHex(**ppc)) {
1093 UInt d = fromHex(**ppc);
1095 *result = ((*result) << 4) | fromHex(**ppc);
1098 if (used > limit) return False;
1105 /* Parse two such numbers separated by a dash, or fail. */
1107 static Bool parse_range ( UChar** ppc, Addr* result1, Addr* result2 )
1109 Bool ok = parse_Addr(ppc, result1);
1115 ok = parse_Addr(ppc, result2);
1121 /* Parse a set of ranges separated by commas into 'ignoreRanges', or
1124 static Bool parse_ignore_ranges ( UChar* str0 )
1130 ignoreRanges.used = 0;
1132 ok = parse_range(ppc, &start, &end);
1135 if (ignoreRanges.used >= M_IGNORE_RANGES)
1137 ignoreRanges.start[ignoreRanges.used] = start;
1138 ignoreRanges.end[ignoreRanges.used] = end;
1139 ignoreRanges.used++;
1151 /* --------------- Load/store slow cases. --------------- */
1154 #ifndef PERF_FAST_LOADV
1157 ULong mc_LOADVn_slow ( Addr a, SizeT nBits, Bool bigendian )
1159 /* Make up a 64-bit result V word, which contains the loaded data for
1160 valid addresses and Defined for invalid addresses. Iterate over
1161 the bytes in the word, from the most significant down to the
1163 ULong vbits64 = V_BITS64_UNDEFINED;
1164 SizeT szB = nBits / 8;
1165 SSizeT i; // Must be signed.
1166 SizeT n_addrs_bad = 0;
1168 Bool partial_load_exemption_applies;
1172 PROF_EVENT(30, "mc_LOADVn_slow");
1174 /* ------------ BEGIN semi-fast cases ------------ */
1175 /* These deal quickly-ish with the common auxiliary primary map
1176 cases on 64-bit platforms. Are merely a speedup hack; can be
1177 omitted without loss of correctness/functionality. Note that in
1178 both cases the "sizeof(void*) == 8" causes these cases to be
1179 folded out by compilers on 32-bit platforms. These are derived
1180 from LOADV64 and LOADV32.
1182 if (LIKELY(sizeof(void*) == 8
1183 && nBits == 64 && VG_IS_8_ALIGNED(a))) {
1184 SecMap* sm = get_secmap_for_reading(a);
1185 UWord sm_off16 = SM_OFF_16(a);
1186 UWord vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
1187 if (LIKELY(vabits16 == VA_BITS16_DEFINED))
1188 return V_BITS64_DEFINED;
1189 if (LIKELY(vabits16 == VA_BITS16_UNDEFINED))
1190 return V_BITS64_UNDEFINED;
1191 /* else fall into the slow case */
1193 if (LIKELY(sizeof(void*) == 8
1194 && nBits == 32 && VG_IS_4_ALIGNED(a))) {
1195 SecMap* sm = get_secmap_for_reading(a);
1196 UWord sm_off = SM_OFF(a);
1197 UWord vabits8 = sm->vabits8[sm_off];
1198 if (LIKELY(vabits8 == VA_BITS8_DEFINED))
1199 return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_DEFINED);
1200 if (LIKELY(vabits8 == VA_BITS8_UNDEFINED))
1201 return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_UNDEFINED);
1202 /* else fall into slow case */
1204 /* ------------ END semi-fast cases ------------ */
1206 tl_assert(nBits == 64 || nBits == 32 || nBits == 16 || nBits == 8);
1208 for (i = szB-1; i >= 0; i--) {
1209 PROF_EVENT(31, "mc_LOADVn_slow(loop)");
1210 ai = a + byte_offset_w(szB, bigendian, i);
1211 ok = get_vbits8(ai, &vbits8);
1212 if (!ok) n_addrs_bad++;
1217 /* This is a hack which avoids producing errors for code which
1218 insists in stepping along byte strings in aligned word-sized
1219 chunks, and there is a partially defined word at the end. (eg,
1220 optimised strlen). Such code is basically broken at least WRT
1221 semantics of ANSI C, but sometimes users don't have the option
1222 to fix it, and so this option is provided. Note it is now
1223 defaulted to not-engaged.
1225 A load from a partially-addressible place is allowed if:
1226 - the command-line flag is set
1227 - it's a word-sized, word-aligned load
1228 - at least one of the addresses in the word *is* valid
1230 partial_load_exemption_applies
1231 = MC_(clo_partial_loads_ok) && szB == VG_WORDSIZE
1232 && VG_IS_WORD_ALIGNED(a)
1233 && n_addrs_bad < VG_WORDSIZE;
1235 if (n_addrs_bad > 0 && !partial_load_exemption_applies)
1236 MC_(record_address_error)( VG_(get_running_tid)(), a, szB, False );
1243 #ifndef PERF_FAST_STOREV
1246 void mc_STOREVn_slow ( Addr a, SizeT nBits, ULong vbytes, Bool bigendian )
1248 SizeT szB = nBits / 8;
1249 SizeT i, n_addrs_bad = 0;
1254 PROF_EVENT(35, "mc_STOREVn_slow");
1256 /* ------------ BEGIN semi-fast cases ------------ */
1257 /* These deal quickly-ish with the common auxiliary primary map
1258 cases on 64-bit platforms. Are merely a speedup hack; can be
1259 omitted without loss of correctness/functionality. Note that in
1260 both cases the "sizeof(void*) == 8" causes these cases to be
1261 folded out by compilers on 32-bit platforms. These are derived
1262 from STOREV64 and STOREV32.
1264 if (LIKELY(sizeof(void*) == 8
1265 && nBits == 64 && VG_IS_8_ALIGNED(a))) {
1266 SecMap* sm = get_secmap_for_reading(a);
1267 UWord sm_off16 = SM_OFF_16(a);
1268 UWord vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
1269 if (LIKELY( !is_distinguished_sm(sm) &&
1270 (VA_BITS16_DEFINED == vabits16 ||
1271 VA_BITS16_UNDEFINED == vabits16) )) {
1272 /* Handle common case quickly: a is suitably aligned, */
1273 /* is mapped, and is addressible. */
1274 // Convert full V-bits in register to compact 2-bit form.
1275 if (LIKELY(V_BITS64_DEFINED == vbytes)) {
1276 ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_DEFINED;
1278 } else if (V_BITS64_UNDEFINED == vbytes) {
1279 ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_UNDEFINED;
1282 /* else fall into the slow case */
1284 /* else fall into the slow case */
1286 if (LIKELY(sizeof(void*) == 8
1287 && nBits == 32 && VG_IS_4_ALIGNED(a))) {
1288 SecMap* sm = get_secmap_for_reading(a);
1289 UWord sm_off = SM_OFF(a);
1290 UWord vabits8 = sm->vabits8[sm_off];
1291 if (LIKELY( !is_distinguished_sm(sm) &&
1292 (VA_BITS8_DEFINED == vabits8 ||
1293 VA_BITS8_UNDEFINED == vabits8) )) {
1294 /* Handle common case quickly: a is suitably aligned, */
1295 /* is mapped, and is addressible. */
1296 // Convert full V-bits in register to compact 2-bit form.
1297 if (LIKELY(V_BITS32_DEFINED == (vbytes & 0xFFFFFFFF))) {
1298 sm->vabits8[sm_off] = VA_BITS8_DEFINED;
1300 } else if (V_BITS32_UNDEFINED == (vbytes & 0xFFFFFFFF)) {
1301 sm->vabits8[sm_off] = VA_BITS8_UNDEFINED;
1304 /* else fall into the slow case */
1306 /* else fall into the slow case */
1308 /* ------------ END semi-fast cases ------------ */
1310 tl_assert(nBits == 64 || nBits == 32 || nBits == 16 || nBits == 8);
1312 /* Dump vbytes in memory, iterating from least to most significant
1313 byte. At the same time establish addressibility of the location. */
1314 for (i = 0; i < szB; i++) {
1315 PROF_EVENT(36, "mc_STOREVn_slow(loop)");
1316 ai = a + byte_offset_w(szB, bigendian, i);
1317 vbits8 = vbytes & 0xff;
1318 ok = set_vbits8(ai, vbits8);
1319 if (!ok) n_addrs_bad++;
1323 /* If an address error has happened, report it. */
1324 if (n_addrs_bad > 0)
1325 MC_(record_address_error)( VG_(get_running_tid)(), a, szB, True );
1329 /*------------------------------------------------------------*/
1330 /*--- Setting permissions over address ranges. ---*/
1331 /*------------------------------------------------------------*/
1333 static void set_address_range_perms ( Addr a, SizeT lenT, UWord vabits16,
1336 UWord sm_off, sm_off16;
1337 UWord vabits2 = vabits16 & 0x3;
1338 SizeT lenA, lenB, len_to_next_secmap;
1342 SecMap* example_dsm;
1344 PROF_EVENT(150, "set_address_range_perms");
1346 /* Check the V+A bits make sense. */
1347 tl_assert(VA_BITS16_NOACCESS == vabits16 ||
1348 VA_BITS16_UNDEFINED == vabits16 ||
1349 VA_BITS16_DEFINED == vabits16);
1351 // This code should never write PDBs; ensure this. (See comment above
1353 tl_assert(VA_BITS2_PARTDEFINED != vabits2);
1358 if (lenT > 256 * 1024 * 1024) {
1359 if (VG_(clo_verbosity) > 0 && !VG_(clo_xml)) {
1360 Char* s = "unknown???";
1361 if (vabits16 == VA_BITS16_NOACCESS ) s = "noaccess";
1362 if (vabits16 == VA_BITS16_UNDEFINED) s = "undefined";
1363 if (vabits16 == VA_BITS16_DEFINED ) s = "defined";
1364 VG_(message)(Vg_UserMsg, "Warning: set address range perms: "
1365 "large range [0x%lx, 0x%lx) (%s)\n",
1370 #ifndef PERF_FAST_SARP
1371 /*------------------ debug-only case ------------------ */
1373 // Endianness doesn't matter here because all bytes are being set to
1375 // Nb: We don't have to worry about updating the sec-V-bits table
1376 // after these set_vabits2() calls because this code never writes
1377 // VA_BITS2_PARTDEFINED values.
1379 for (i = 0; i < lenT; i++) {
1380 set_vabits2(a + i, vabits2);
1386 /*------------------ standard handling ------------------ */
1388 /* Get the distinguished secondary that we might want
1389 to use (part of the space-compression scheme). */
1390 example_dsm = &sm_distinguished[dsm_num];
1392 // We have to handle ranges covering various combinations of partial and
1393 // whole sec-maps. Here is how parts 1, 2 and 3 are used in each case.
1394 // Cases marked with a '*' are common.
1398 // * one partial sec-map (p) 1
1399 // - one whole sec-map (P) 2
1401 // * two partial sec-maps (pp) 1,3
1402 // - one partial, one whole sec-map (pP) 1,2
1403 // - one whole, one partial sec-map (Pp) 2,3
1404 // - two whole sec-maps (PP) 2,2
1406 // * one partial, one whole, one partial (pPp) 1,2,3
1407 // - one partial, two whole (pPP) 1,2,2
1408 // - two whole, one partial (PPp) 2,2,3
1409 // - three whole (PPP) 2,2,2
1411 // * one partial, N-2 whole, one partial (pP...Pp) 1,2...2,3
1412 // - one partial, N-1 whole (pP...PP) 1,2...2,2
1413 // - N-1 whole, one partial (PP...Pp) 2,2...2,3
1414 // - N whole (PP...PP) 2,2...2,3
1416 // Break up total length (lenT) into two parts: length in the first
1417 // sec-map (lenA), and the rest (lenB); lenT == lenA + lenB.
1418 aNext = start_of_this_sm(a) + SM_SIZE;
1419 len_to_next_secmap = aNext - a;
1420 if ( lenT <= len_to_next_secmap ) {
1421 // Range entirely within one sec-map. Covers almost all cases.
1422 PROF_EVENT(151, "set_address_range_perms-single-secmap");
1425 } else if (is_start_of_sm(a)) {
1426 // Range spans at least one whole sec-map, and starts at the beginning
1427 // of a sec-map; skip to Part 2.
1428 PROF_EVENT(152, "set_address_range_perms-startof-secmap");
1433 // Range spans two or more sec-maps, first one is partial.
1434 PROF_EVENT(153, "set_address_range_perms-multiple-secmaps");
1435 lenA = len_to_next_secmap;
1439 //------------------------------------------------------------------------
1440 // Part 1: Deal with the first sec_map. Most of the time the range will be
1441 // entirely within a sec_map and this part alone will suffice. Also,
1442 // doing it this way lets us avoid repeatedly testing for the crossing of
1443 // a sec-map boundary within these loops.
1444 //------------------------------------------------------------------------
1446 // If it's distinguished, make it undistinguished if necessary.
1447 sm_ptr = get_secmap_ptr(a);
1448 if (is_distinguished_sm(*sm_ptr)) {
1449 if (*sm_ptr == example_dsm) {
1450 // Sec-map already has the V+A bits that we want, so skip.
1451 PROF_EVENT(154, "set_address_range_perms-dist-sm1-quick");
1455 PROF_EVENT(155, "set_address_range_perms-dist-sm1");
1456 *sm_ptr = copy_for_writing(*sm_ptr);
1463 if (VG_IS_8_ALIGNED(a)) break;
1464 if (lenA < 1) break;
1465 PROF_EVENT(156, "set_address_range_perms-loop1a");
1467 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
1471 // 8-aligned, 8 byte steps
1473 if (lenA < 8) break;
1474 PROF_EVENT(157, "set_address_range_perms-loop8a");
1475 sm_off16 = SM_OFF_16(a);
1476 ((UShort*)(sm->vabits8))[sm_off16] = vabits16;
1482 if (lenA < 1) break;
1483 PROF_EVENT(158, "set_address_range_perms-loop1b");
1485 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
1490 // We've finished the first sec-map. Is that it?
1494 //------------------------------------------------------------------------
1495 // Part 2: Fast-set entire sec-maps at a time.
1496 //------------------------------------------------------------------------
1498 // 64KB-aligned, 64KB steps.
1499 // Nb: we can reach here with lenB < SM_SIZE
1500 tl_assert(0 == lenA);
1502 if (lenB < SM_SIZE) break;
1503 tl_assert(is_start_of_sm(a));
1504 PROF_EVENT(159, "set_address_range_perms-loop64K");
1505 sm_ptr = get_secmap_ptr(a);
1506 if (!is_distinguished_sm(*sm_ptr)) {
1507 PROF_EVENT(160, "set_address_range_perms-loop64K-free-dist-sm");
1508 // Free the non-distinguished sec-map that we're replacing. This
1509 // case happens moderately often, enough to be worthwhile.
1510 VG_(am_munmap_valgrind)((Addr)*sm_ptr, sizeof(SecMap));
1512 update_SM_counts(*sm_ptr, example_dsm);
1513 // Make the sec-map entry point to the example DSM
1514 *sm_ptr = example_dsm;
1519 // We've finished the whole sec-maps. Is that it?
1523 //------------------------------------------------------------------------
1524 // Part 3: Finish off the final partial sec-map, if necessary.
1525 //------------------------------------------------------------------------
1527 tl_assert(is_start_of_sm(a) && lenB < SM_SIZE);
1529 // If it's distinguished, make it undistinguished if necessary.
1530 sm_ptr = get_secmap_ptr(a);
1531 if (is_distinguished_sm(*sm_ptr)) {
1532 if (*sm_ptr == example_dsm) {
1533 // Sec-map already has the V+A bits that we want, so stop.
1534 PROF_EVENT(161, "set_address_range_perms-dist-sm2-quick");
1537 PROF_EVENT(162, "set_address_range_perms-dist-sm2");
1538 *sm_ptr = copy_for_writing(*sm_ptr);
1543 // 8-aligned, 8 byte steps
1545 if (lenB < 8) break;
1546 PROF_EVENT(163, "set_address_range_perms-loop8b");
1547 sm_off16 = SM_OFF_16(a);
1548 ((UShort*)(sm->vabits8))[sm_off16] = vabits16;
1554 if (lenB < 1) return;
1555 PROF_EVENT(164, "set_address_range_perms-loop1c");
1557 insert_vabits2_into_vabits8( a, vabits2, &(sm->vabits8[sm_off]) );
1564 /* --- Set permissions for arbitrary address ranges --- */
1566 void MC_(make_mem_noaccess) ( Addr a, SizeT len )
1568 PROF_EVENT(40, "MC_(make_mem_noaccess)");
1569 DEBUG("MC_(make_mem_noaccess)(%p, %lu)\n", a, len);
1570 set_address_range_perms ( a, len, VA_BITS16_NOACCESS, SM_DIST_NOACCESS );
1571 if (UNLIKELY( MC_(clo_mc_level) == 3 ))
1572 ocache_sarp_Clear_Origins ( a, len );
1575 static void make_mem_undefined ( Addr a, SizeT len )
1577 PROF_EVENT(41, "make_mem_undefined");
1578 DEBUG("make_mem_undefined(%p, %lu)\n", a, len);
1579 set_address_range_perms ( a, len, VA_BITS16_UNDEFINED, SM_DIST_UNDEFINED );
1582 void MC_(make_mem_undefined_w_otag) ( Addr a, SizeT len, UInt otag )
1584 PROF_EVENT(41, "MC_(make_mem_undefined)");
1585 DEBUG("MC_(make_mem_undefined)(%p, %lu)\n", a, len);
1586 set_address_range_perms ( a, len, VA_BITS16_UNDEFINED, SM_DIST_UNDEFINED );
1587 if (UNLIKELY( MC_(clo_mc_level) == 3 ))
1588 ocache_sarp_Set_Origins ( a, len, otag );
1592 void make_mem_undefined_w_tid_and_okind ( Addr a, SizeT len,
1593 ThreadId tid, UInt okind )
1597 /* VG_(record_ExeContext) checks for validity of tid, and asserts
1598 if it is invalid. So no need to do it here. */
1599 tl_assert(okind <= 3);
1600 here = VG_(record_ExeContext)( tid, 0/*first_ip_delta*/ );
1602 ecu = VG_(get_ECU_from_ExeContext)(here);
1603 tl_assert(VG_(is_plausible_ECU)(ecu));
1604 MC_(make_mem_undefined_w_otag) ( a, len, ecu | okind );
1608 void make_mem_undefined_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1609 make_mem_undefined_w_tid_and_okind ( a, len, tid, MC_OKIND_UNKNOWN );
1613 void MC_(make_mem_defined) ( Addr a, SizeT len )
1615 PROF_EVENT(42, "MC_(make_mem_defined)");
1616 DEBUG("MC_(make_mem_defined)(%p, %lu)\n", a, len);
1617 set_address_range_perms ( a, len, VA_BITS16_DEFINED, SM_DIST_DEFINED );
1618 if (UNLIKELY( MC_(clo_mc_level) == 3 ))
1619 ocache_sarp_Clear_Origins ( a, len );
1622 /* For each byte in [a,a+len), if the byte is addressable, make it be
1623 defined, but if it isn't addressible, leave it alone. In other
1624 words a version of MC_(make_mem_defined) that doesn't mess with
1625 addressibility. Low-performance implementation. */
1626 static void make_mem_defined_if_addressable ( Addr a, SizeT len )
1630 DEBUG("make_mem_defined_if_addressable(%p, %llu)\n", a, (ULong)len);
1631 for (i = 0; i < len; i++) {
1632 vabits2 = get_vabits2( a+i );
1633 if (LIKELY(VA_BITS2_NOACCESS != vabits2)) {
1634 set_vabits2(a+i, VA_BITS2_DEFINED);
1635 if (UNLIKELY(MC_(clo_mc_level) >= 3)) {
1636 MC_(helperc_b_store1)( a+i, 0 ); /* clear the origin tag */
1642 /* Similarly (needed for mprotect handling ..) */
1643 static void make_mem_defined_if_noaccess ( Addr a, SizeT len )
1647 DEBUG("make_mem_defined_if_noaccess(%p, %llu)\n", a, (ULong)len);
1648 for (i = 0; i < len; i++) {
1649 vabits2 = get_vabits2( a+i );
1650 if (LIKELY(VA_BITS2_NOACCESS == vabits2)) {
1651 set_vabits2(a+i, VA_BITS2_DEFINED);
1652 if (UNLIKELY(MC_(clo_mc_level) >= 3)) {
1653 MC_(helperc_b_store1)( a+i, 0 ); /* clear the origin tag */
1659 /* --- Block-copy permissions (needed for implementing realloc() and
1662 void MC_(copy_address_range_state) ( Addr src, Addr dst, SizeT len )
1665 UChar vabits2, vabits8;
1666 Bool aligned, nooverlap;
1668 DEBUG("MC_(copy_address_range_state)\n");
1669 PROF_EVENT(50, "MC_(copy_address_range_state)");
1671 if (len == 0 || src == dst)
1674 aligned = VG_IS_4_ALIGNED(src) && VG_IS_4_ALIGNED(dst);
1675 nooverlap = src+len <= dst || dst+len <= src;
1677 if (nooverlap && aligned) {
1679 /* Vectorised fast case, when no overlap and suitably aligned */
1683 vabits8 = get_vabits8_for_aligned_word32( src+i );
1684 set_vabits8_for_aligned_word32( dst+i, vabits8 );
1685 if (LIKELY(VA_BITS8_DEFINED == vabits8
1686 || VA_BITS8_UNDEFINED == vabits8
1687 || VA_BITS8_NOACCESS == vabits8)) {
1690 /* have to copy secondary map info */
1691 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+0 ))
1692 set_sec_vbits8( dst+i+0, get_sec_vbits8( src+i+0 ) );
1693 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+1 ))
1694 set_sec_vbits8( dst+i+1, get_sec_vbits8( src+i+1 ) );
1695 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+2 ))
1696 set_sec_vbits8( dst+i+2, get_sec_vbits8( src+i+2 ) );
1697 if (VA_BITS2_PARTDEFINED == get_vabits2( src+i+3 ))
1698 set_sec_vbits8( dst+i+3, get_sec_vbits8( src+i+3 ) );
1705 vabits2 = get_vabits2( src+i );
1706 set_vabits2( dst+i, vabits2 );
1707 if (VA_BITS2_PARTDEFINED == vabits2) {
1708 set_sec_vbits8( dst+i, get_sec_vbits8( src+i ) );
1716 /* We have to do things the slow way */
1718 for (i = 0, j = len-1; i < len; i++, j--) {
1719 PROF_EVENT(51, "MC_(copy_address_range_state)(loop)");
1720 vabits2 = get_vabits2( src+j );
1721 set_vabits2( dst+j, vabits2 );
1722 if (VA_BITS2_PARTDEFINED == vabits2) {
1723 set_sec_vbits8( dst+j, get_sec_vbits8( src+j ) );
1729 for (i = 0; i < len; i++) {
1730 PROF_EVENT(52, "MC_(copy_address_range_state)(loop)");
1731 vabits2 = get_vabits2( src+i );
1732 set_vabits2( dst+i, vabits2 );
1733 if (VA_BITS2_PARTDEFINED == vabits2) {
1734 set_sec_vbits8( dst+i, get_sec_vbits8( src+i ) );
1743 /*------------------------------------------------------------*/
1744 /*--- Origin tracking stuff - cache basics ---*/
1745 /*------------------------------------------------------------*/
1747 /* AN OVERVIEW OF THE ORIGIN TRACKING IMPLEMENTATION
1748 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1750 Note that this implementation draws inspiration from the "origin
1751 tracking by value piggybacking" scheme described in "Tracking Bad
1752 Apples: Reporting the Origin of Null and Undefined Value Errors"
1753 (Michael Bond, Nicholas Nethercote, Stephen Kent, Samuel Guyer,
1754 Kathryn McKinley, OOPSLA07, Montreal, Oct 2007) but in fact it is
1755 implemented completely differently.
1757 Origin tags and ECUs -- about the shadow values
1758 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1760 This implementation tracks the defining point of all uninitialised
1761 values using so called "origin tags", which are 32-bit integers,
1762 rather than using the values themselves to encode the origins. The
1763 latter, so-called value piggybacking", is what the OOPSLA07 paper
1766 Origin tags, as tracked by the machinery below, are 32-bit unsigned
1767 ints (UInts), regardless of the machine's word size. Each tag
1768 comprises an upper 30-bit ECU field and a lower 2-bit
1769 'kind' field. The ECU field is a number given out by m_execontext
1770 and has a 1-1 mapping with ExeContext*s. An ECU can be used
1771 directly as an origin tag (otag), but in fact we want to put
1772 additional information 'kind' field to indicate roughly where the
1773 tag came from. This helps print more understandable error messages
1774 for the user -- it has no other purpose. In summary:
1776 * Both ECUs and origin tags are represented as 32-bit words
1778 * m_execontext and the core-tool interface deal purely in ECUs.
1779 They have no knowledge of origin tags - that is a purely
1780 Memcheck-internal matter.
1782 * all valid ECUs have the lowest 2 bits zero and at least
1783 one of the upper 30 bits nonzero (see VG_(is_plausible_ECU))
1785 * to convert from an ECU to an otag, OR in one of the MC_OKIND_
1786 constants defined in mc_include.h.
1788 * to convert an otag back to an ECU, AND it with ~3
1790 One important fact is that no valid otag is zero. A zero otag is
1791 used by the implementation to indicate "no origin", which could
1792 mean that either the value is defined, or it is undefined but the
1793 implementation somehow managed to lose the origin.
1795 The ECU used for memory created by malloc etc is derived from the
1796 stack trace at the time the malloc etc happens. This means the
1797 mechanism can show the exact allocation point for heap-created
1798 uninitialised values.
1800 In contrast, it is simply too expensive to create a complete
1801 backtrace for each stack allocation. Therefore we merely use a
1802 depth-1 backtrace for stack allocations, which can be done once at
1803 translation time, rather than N times at run time. The result of
1804 this is that, for stack created uninitialised values, Memcheck can
1805 only show the allocating function, and not what called it.
1806 Furthermore, compilers tend to move the stack pointer just once at
1807 the start of the function, to allocate all locals, and so in fact
1808 the stack origin almost always simply points to the opening brace
1809 of the function. Net result is, for stack origins, the mechanism
1810 can tell you in which function the undefined value was created, but
1811 that's all. Users will need to carefully check all locals in the
1814 Shadowing registers and memory
1815 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1817 Memory is shadowed using a two level cache structure (ocacheL1 and
1818 ocacheL2). Memory references are first directed to ocacheL1. This
1819 is a traditional 2-way set associative cache with 32-byte lines and
1820 approximate LRU replacement within each set.
1822 A naive implementation would require storing one 32 bit otag for
1823 each byte of memory covered, a 4:1 space overhead. Instead, there
1824 is one otag for every 4 bytes of memory covered, plus a 4-bit mask
1825 that shows which of the 4 bytes have that shadow value and which
1826 have a shadow value of zero (indicating no origin). Hence a lot of
1827 space is saved, but the cost is that only one different origin per
1828 4 bytes of address space can be represented. This is a source of
1829 imprecision, but how much of a problem it really is remains to be
1832 A cache line that contains all zeroes ("no origins") contains no
1833 useful information, and can be ejected from the L1 cache "for
1834 free", in the sense that a read miss on the L1 causes a line of
1835 zeroes to be installed. However, ejecting a line containing
1836 nonzeroes risks losing origin information permanently. In order to
1837 prevent such lossage, ejected nonzero lines are placed in a
1838 secondary cache (ocacheL2), which is an OSet (AVL tree) of cache
1839 lines. This can grow arbitrarily large, and so should ensure that
1840 Memcheck runs out of memory in preference to losing useful origin
1841 info due to cache size limitations.
1843 Shadowing registers is a bit tricky, because the shadow values are
1844 32 bits, regardless of the size of the register. That gives a
1845 problem for registers smaller than 32 bits. The solution is to
1846 find spaces in the guest state that are unused, and use those to
1847 shadow guest state fragments smaller than 32 bits. For example, on
1848 ppc32/64, each vector register is 16 bytes long. If 4 bytes of the
1849 shadow are allocated for the register's otag, then there are still
1850 12 bytes left over which could be used to shadow 3 other values.
1852 This implies there is some non-obvious mapping from guest state
1853 (start,length) pairs to the relevant shadow offset (for the origin
1854 tags). And it is unfortunately guest-architecture specific. The
1855 mapping is contained in mc_machine.c, which is quite lengthy but
1858 Instrumenting the IR
1859 ~~~~~~~~~~~~~~~~~~~~
1861 Instrumentation is largely straightforward, and done by the
1862 functions schemeE and schemeS in mc_translate.c. These generate
1863 code for handling the origin tags of expressions (E) and statements
1864 (S) respectively. The rather strange names are a reference to the
1865 "compilation schemes" shown in Simon Peyton Jones' book "The
1866 Implementation of Functional Programming Languages" (Prentice Hall,
1868 http://research.microsoft.com/~simonpj/papers/slpj-book-1987/index.htm).
1870 schemeS merely arranges to move shadow values around the guest
1871 state to track the incoming IR. schemeE is largely trivial too.
1872 The only significant point is how to compute the otag corresponding
1873 to binary (or ternary, quaternary, etc) operator applications. The
1874 rule is simple: just take whichever value is larger (32-bit
1875 unsigned max). Constants get the special value zero. Hence this
1876 rule always propagates a nonzero (known) otag in preference to a
1877 zero (unknown, or more likely, value-is-defined) tag, as we want.
1878 If two different undefined values are inputs to a binary operator
1879 application, then which is propagated is arbitrary, but that
1880 doesn't matter, since the program is erroneous in using either of
1881 the values, and so there's no point in attempting to propagate
1884 Since constants are abstracted to (otag) zero, much of the
1885 instrumentation code can be folded out without difficulty by the
1886 generic post-instrumentation IR cleanup pass, using these rules:
1887 Max32U(0,x) -> x, Max32U(x,0) -> x, Max32(x,y) where x and y are
1888 constants is evaluated at JIT time. And the resulting dead code
1889 removal. In practice this causes surprisingly few Max32Us to
1890 survive through to backend code generation.
1892 Integration with the V-bits machinery
1893 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1895 This is again largely straightforward. Mostly the otag and V bits
1896 stuff are independent. The only point of interaction is when the V
1897 bits instrumenter creates a call to a helper function to report an
1898 uninitialised value error -- in that case it must first use schemeE
1899 to get hold of the origin tag expression for the value, and pass
1900 that to the helper too.
1902 There is the usual stuff to do with setting address range
1903 permissions. When memory is painted undefined, we must also know
1904 the origin tag to paint with, which involves some tedious plumbing,
1905 particularly to do with the fast case stack handlers. When memory
1906 is painted defined or noaccess then the origin tags must be forced
1909 One of the goals of the implementation was to ensure that the
1910 non-origin tracking mode isn't slowed down at all. To do this,
1911 various functions to do with memory permissions setting (again,
1912 mostly pertaining to the stack) are duplicated for the with- and
1915 Dealing with stack redzones, and the NIA cache
1916 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1918 This is one of the few non-obvious parts of the implementation.
1920 Some ABIs (amd64-ELF, ppc64-ELF, ppc32/64-XCOFF) define a small
1921 reserved area below the stack pointer, that can be used as scratch
1922 space by compiler generated code for functions. In the Memcheck
1923 sources this is referred to as the "stack redzone". The important
1924 thing here is that such redzones are considered volatile across
1925 function calls and returns. So Memcheck takes care to mark them as
1926 undefined for each call and return, on the afflicted platforms.
1927 Past experience shows this is essential in order to get reliable
1928 messages about uninitialised values that come from the stack.
1930 So the question is, when we paint a redzone undefined, what origin
1931 tag should we use for it? Consider a function f() calling g(). If
1932 we paint the redzone using an otag derived from the ExeContext of
1933 the CALL/BL instruction in f, then any errors in g causing it to
1934 use uninitialised values that happen to lie in the redzone, will be
1935 reported as having their origin in f. Which is highly confusing.
1937 The same applies for returns: if, on a return, we paint the redzone
1938 using a origin tag derived from the ExeContext of the RET/BLR
1939 instruction in g, then any later errors in f causing it to use
1940 uninitialised values in the redzone, will be reported as having
1941 their origin in g. Which is just as confusing.
1943 To do it right, in both cases we need to use an origin tag which
1944 pertains to the instruction which dynamically follows the CALL/BL
1945 or RET/BLR. In short, one derived from the NIA - the "next
1946 instruction address".
1948 To make this work, Memcheck's redzone-painting helper,
1949 MC_(helperc_MAKE_STACK_UNINIT), now takes a third argument, the
1950 NIA. It converts the NIA to a 1-element ExeContext, and uses that
1951 ExeContext's ECU as the basis for the otag used to paint the
1952 redzone. The expensive part of this is converting an NIA into an
1953 ECU, since this happens once for every call and every return. So
1954 we use a simple 511-line, 2-way set associative cache
1955 (nia_to_ecu_cache) to cache the mappings, and that knocks most of
1958 Further background comments
1959 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
1961 > Question: why is otag a UInt? Wouldn't a UWord be better? Isn't
1962 > it really just the address of the relevant ExeContext?
1964 Well, it's not the address, but a value which has a 1-1 mapping
1965 with ExeContexts, and is guaranteed not to be zero, since zero
1966 denotes (to memcheck) "unknown origin or defined value". So these
1967 UInts are just numbers starting at 4 and incrementing by 4; each
1968 ExeContext is given a number when it is created. (*** NOTE this
1969 confuses otags and ECUs; see comments above ***).
1971 Making these otags 32-bit regardless of the machine's word size
1972 makes the 64-bit implementation easier (next para). And it doesn't
1973 really limit us in any way, since for the tags to overflow would
1974 require that the program somehow caused 2^30-1 different
1975 ExeContexts to be created, in which case it is probably in deep
1976 trouble. Not to mention V will have soaked up many tens of
1977 gigabytes of memory merely to store them all.
1979 So having 64-bit origins doesn't really buy you anything, and has
1980 the following downsides:
1982 Suppose that instead, an otag is a UWord. This would mean that, on
1985 1. It becomes hard to shadow any element of guest state which is
1986 smaller than 8 bytes. To do so means you'd need to find some
1987 8-byte-sized hole in the guest state which you don't want to
1988 shadow, and use that instead to hold the otag. On ppc64, the
1989 condition code register(s) are split into 20 UChar sized pieces,
1990 all of which need to be tracked (guest_XER_SO .. guest_CR7_0)
1991 and so that would entail finding 160 bytes somewhere else in the
1994 Even on x86, I want to track origins for %AH .. %DH (bits 15:8
1995 of %EAX .. %EDX) that are separate from %AL .. %DL (bits 7:0 of
1996 same) and so I had to look for 4 untracked otag-sized areas in
1997 the guest state to make that possible.
1999 The same problem exists of course when origin tags are only 32
2000 bits, but it's less extreme.
2002 2. (More compelling) it doubles the size of the origin shadow
2003 memory. Given that the shadow memory is organised as a fixed
2004 size cache, and that accuracy of tracking is limited by origins
2005 falling out the cache due to space conflicts, this isn't good.
2007 > Another question: is the origin tracking perfect, or are there
2008 > cases where it fails to determine an origin?
2010 It is imperfect for at least for the following reasons, and
2013 * Insufficient capacity in the origin cache. When a line is
2014 evicted from the cache it is gone forever, and so subsequent
2015 queries for the line produce zero, indicating no origin
2016 information. Interestingly, a line containing all zeroes can be
2017 evicted "free" from the cache, since it contains no useful
2018 information, so there is scope perhaps for some cleverer cache
2019 management schemes. (*** NOTE, with the introduction of the
2020 second level origin tag cache, ocacheL2, this is no longer a
2023 * The origin cache only stores one otag per 32-bits of address
2024 space, plus 4 bits indicating which of the 4 bytes has that tag
2025 and which are considered defined. The result is that if two
2026 undefined bytes in the same word are stored in memory, the first
2027 stored byte's origin will be lost and replaced by the origin for
2030 * Nonzero origin tags for defined values. Consider a binary
2031 operator application op(x,y). Suppose y is undefined (and so has
2032 a valid nonzero origin tag), and x is defined, but erroneously
2033 has a nonzero origin tag (defined values should have tag zero).
2034 If the erroneous tag has a numeric value greater than y's tag,
2035 then the rule for propagating origin tags though binary
2036 operations, which is simply to take the unsigned max of the two
2037 tags, will erroneously propagate x's tag rather than y's.
2039 * Some obscure uses of x86/amd64 byte registers can cause lossage
2040 or confusion of origins. %AH .. %DH are treated as different
2041 from, and unrelated to, their parent registers, %EAX .. %EDX.
2042 So some wierd sequences like
2044 movb undefined-value, %AH
2045 movb defined-value, %AL
2046 .. use %AX or %EAX ..
2048 will cause the origin attributed to %AH to be ignored, since %AL,
2049 %AX, %EAX are treated as the same register, and %AH as a
2050 completely separate one.
2052 But having said all that, it actually seems to work fairly well in
2056 static UWord stats_ocacheL1_find = 0;
2057 static UWord stats_ocacheL1_found_at_1 = 0;
2058 static UWord stats_ocacheL1_found_at_N = 0;
2059 static UWord stats_ocacheL1_misses = 0;
2060 static UWord stats_ocacheL1_lossage = 0;
2061 static UWord stats_ocacheL1_movefwds = 0;
2063 static UWord stats__ocacheL2_refs = 0;
2064 static UWord stats__ocacheL2_misses = 0;
2065 static UWord stats__ocacheL2_n_nodes_max = 0;
2067 /* Cache of 32-bit values, one every 32 bits of address space */
2069 #define OC_BITS_PER_LINE 5
2070 #define OC_W32S_PER_LINE (1 << (OC_BITS_PER_LINE - 2))
2072 static INLINE UWord oc_line_offset ( Addr a ) {
2073 return (a >> 2) & (OC_W32S_PER_LINE - 1);
2075 static INLINE Bool is_valid_oc_tag ( Addr tag ) {
2076 return 0 == (tag & ((1 << OC_BITS_PER_LINE) - 1));
2079 #define OC_LINES_PER_SET 2
2081 #define OC_N_SET_BITS 20
2082 #define OC_N_SETS (1 << OC_N_SET_BITS)
2084 /* These settings give:
2085 64 bit host: ocache: 100,663,296 sizeB 67,108,864 useful
2086 32 bit host: ocache: 92,274,688 sizeB 67,108,864 useful
2089 #define OC_MOVE_FORWARDS_EVERY_BITS 7
2095 UInt w32[OC_W32S_PER_LINE];
2096 UChar descr[OC_W32S_PER_LINE];
2100 /* Classify and also sanity-check 'line'. Return 'e' (empty) if not
2101 in use, 'n' (nonzero) if it contains at least one valid origin tag,
2102 and 'z' if all the represented tags are zero. */
2103 static UChar classify_OCacheLine ( OCacheLine* line )
2106 if (line->tag == 1/*invalid*/)
2107 return 'e'; /* EMPTY */
2108 tl_assert(is_valid_oc_tag(line->tag));
2109 for (i = 0; i < OC_W32S_PER_LINE; i++) {
2110 tl_assert(0 == ((~0xF) & line->descr[i]));
2111 if (line->w32[i] > 0 && line->descr[i] > 0)
2112 return 'n'; /* NONZERO - contains useful info */
2114 return 'z'; /* ZERO - no useful info */
2119 OCacheLine line[OC_LINES_PER_SET];
2125 OCacheSet set[OC_N_SETS];
2129 static OCache* ocacheL1 = NULL;
2130 static UWord ocacheL1_event_ctr = 0;
2132 static void init_ocacheL2 ( void ); /* fwds */
2133 static void init_OCache ( void )
2136 tl_assert(MC_(clo_mc_level) >= 3);
2137 tl_assert(ocacheL1 == NULL);
2138 ocacheL1 = VG_(am_shadow_alloc)(sizeof(OCache));
2139 if (ocacheL1 == NULL) {
2140 VG_(out_of_memory_NORETURN)( "memcheck:allocating ocacheL1",
2143 tl_assert(ocacheL1 != NULL);
2144 for (set = 0; set < OC_N_SETS; set++) {
2145 for (line = 0; line < OC_LINES_PER_SET; line++) {
2146 ocacheL1->set[set].line[line].tag = 1/*invalid*/;
2152 static void moveLineForwards ( OCacheSet* set, UWord lineno )
2155 stats_ocacheL1_movefwds++;
2156 tl_assert(lineno > 0 && lineno < OC_LINES_PER_SET);
2157 tmp = set->line[lineno-1];
2158 set->line[lineno-1] = set->line[lineno];
2159 set->line[lineno] = tmp;
2162 static void zeroise_OCacheLine ( OCacheLine* line, Addr tag ) {
2164 for (i = 0; i < OC_W32S_PER_LINE; i++) {
2165 line->w32[i] = 0; /* NO ORIGIN */
2166 line->descr[i] = 0; /* REALLY REALLY NO ORIGIN! */
2171 //////////////////////////////////////////////////////////////
2172 //// OCache backing store
2174 static OSet* ocacheL2 = NULL;
2176 static void* ocacheL2_malloc ( HChar* cc, SizeT szB ) {
2177 return VG_(malloc)(cc, szB);
2179 static void ocacheL2_free ( void* v ) {
2183 /* Stats: # nodes currently in tree */
2184 static UWord stats__ocacheL2_n_nodes = 0;
2186 static void init_ocacheL2 ( void )
2188 tl_assert(!ocacheL2);
2189 tl_assert(sizeof(Word) == sizeof(Addr)); /* since OCacheLine.tag :: Addr */
2190 tl_assert(0 == offsetof(OCacheLine,tag));
2192 = VG_(OSetGen_Create)( offsetof(OCacheLine,tag),
2193 NULL, /* fast cmp */
2194 ocacheL2_malloc, "mc.ioL2", ocacheL2_free );
2195 tl_assert(ocacheL2);
2196 stats__ocacheL2_n_nodes = 0;
2199 /* Find line with the given tag in the tree, or NULL if not found. */
2200 static OCacheLine* ocacheL2_find_tag ( Addr tag )
2203 tl_assert(is_valid_oc_tag(tag));
2204 stats__ocacheL2_refs++;
2205 line = VG_(OSetGen_Lookup)( ocacheL2, &tag );
2209 /* Delete the line with the given tag from the tree, if it is present, and
2210 free up the associated memory. */
2211 static void ocacheL2_del_tag ( Addr tag )
2214 tl_assert(is_valid_oc_tag(tag));
2215 stats__ocacheL2_refs++;
2216 line = VG_(OSetGen_Remove)( ocacheL2, &tag );
2218 VG_(OSetGen_FreeNode)(ocacheL2, line);
2219 tl_assert(stats__ocacheL2_n_nodes > 0);
2220 stats__ocacheL2_n_nodes--;
2224 /* Add a copy of the given line to the tree. It must not already be
2226 static void ocacheL2_add_line ( OCacheLine* line )
2229 tl_assert(is_valid_oc_tag(line->tag));
2230 copy = VG_(OSetGen_AllocNode)( ocacheL2, sizeof(OCacheLine) );
2233 stats__ocacheL2_refs++;
2234 VG_(OSetGen_Insert)( ocacheL2, copy );
2235 stats__ocacheL2_n_nodes++;
2236 if (stats__ocacheL2_n_nodes > stats__ocacheL2_n_nodes_max)
2237 stats__ocacheL2_n_nodes_max = stats__ocacheL2_n_nodes;
2241 //////////////////////////////////////////////////////////////
2243 __attribute__((noinline))
2244 static OCacheLine* find_OCacheLine_SLOW ( Addr a )
2246 OCacheLine *victim, *inL2;
2249 UWord setno = (a >> OC_BITS_PER_LINE) & (OC_N_SETS - 1);
2250 UWord tagmask = ~((1 << OC_BITS_PER_LINE) - 1);
2251 UWord tag = a & tagmask;
2252 tl_assert(setno >= 0 && setno < OC_N_SETS);
2254 /* we already tried line == 0; skip therefore. */
2255 for (line = 1; line < OC_LINES_PER_SET; line++) {
2256 if (ocacheL1->set[setno].line[line].tag == tag) {
2258 stats_ocacheL1_found_at_1++;
2260 stats_ocacheL1_found_at_N++;
2262 if (UNLIKELY(0 == (ocacheL1_event_ctr++
2263 & ((1<<OC_MOVE_FORWARDS_EVERY_BITS)-1)))) {
2264 moveLineForwards( &ocacheL1->set[setno], line );
2267 return &ocacheL1->set[setno].line[line];
2271 /* A miss. Use the last slot. Implicitly this means we're
2272 ejecting the line in the last slot. */
2273 stats_ocacheL1_misses++;
2274 tl_assert(line == OC_LINES_PER_SET);
2276 tl_assert(line > 0);
2278 /* First, move the to-be-ejected line to the L2 cache. */
2279 victim = &ocacheL1->set[setno].line[line];
2280 c = classify_OCacheLine(victim);
2283 /* the line is empty (has invalid tag); ignore it. */
2286 /* line contains zeroes. We must ensure the backing store is
2287 updated accordingly, either by copying the line there
2288 verbatim, or by ensuring it isn't present there. We
2289 chosse the latter on the basis that it reduces the size of
2290 the backing store. */
2291 ocacheL2_del_tag( victim->tag );
2294 /* line contains at least one real, useful origin. Copy it
2295 to the backing store. */
2296 stats_ocacheL1_lossage++;
2297 inL2 = ocacheL2_find_tag( victim->tag );
2301 ocacheL2_add_line( victim );
2308 /* Now we must reload the L1 cache from the backing tree, if
2310 tl_assert(tag != victim->tag); /* stay sane */
2311 inL2 = ocacheL2_find_tag( tag );
2313 /* We're in luck. It's in the L2. */
2314 ocacheL1->set[setno].line[line] = *inL2;
2316 /* Missed at both levels of the cache hierarchy. We have to
2317 declare it as full of zeroes (unknown origins). */
2318 stats__ocacheL2_misses++;
2319 zeroise_OCacheLine( &ocacheL1->set[setno].line[line], tag );
2322 /* Move it one forwards */
2323 moveLineForwards( &ocacheL1->set[setno], line );
2326 return &ocacheL1->set[setno].line[line];
2329 static INLINE OCacheLine* find_OCacheLine ( Addr a )
2331 UWord setno = (a >> OC_BITS_PER_LINE) & (OC_N_SETS - 1);
2332 UWord tagmask = ~((1 << OC_BITS_PER_LINE) - 1);
2333 UWord tag = a & tagmask;
2335 stats_ocacheL1_find++;
2337 if (OC_ENABLE_ASSERTIONS) {
2338 tl_assert(setno >= 0 && setno < OC_N_SETS);
2339 tl_assert(0 == (tag & (4 * OC_W32S_PER_LINE - 1)));
2342 if (LIKELY(ocacheL1->set[setno].line[0].tag == tag)) {
2343 return &ocacheL1->set[setno].line[0];
2346 return find_OCacheLine_SLOW( a );
2349 static INLINE void set_aligned_word64_Origin_to_undef ( Addr a, UInt otag )
2351 //// BEGIN inlined, specialised version of MC_(helperc_b_store8)
2352 //// Set the origins for a+0 .. a+7
2354 UWord lineoff = oc_line_offset(a);
2355 if (OC_ENABLE_ASSERTIONS) {
2356 tl_assert(lineoff >= 0
2357 && lineoff < OC_W32S_PER_LINE -1/*'cos 8-aligned*/);
2359 line = find_OCacheLine( a );
2360 line->descr[lineoff+0] = 0xF;
2361 line->descr[lineoff+1] = 0xF;
2362 line->w32[lineoff+0] = otag;
2363 line->w32[lineoff+1] = otag;
2365 //// END inlined, specialised version of MC_(helperc_b_store8)
2369 /*------------------------------------------------------------*/
2370 /*--- Aligned fast case permission setters, ---*/
2371 /*--- for dealing with stacks ---*/
2372 /*------------------------------------------------------------*/
2374 /*--------------------- 32-bit ---------------------*/
2376 /* Nb: by "aligned" here we mean 4-byte aligned */
2378 static INLINE void make_aligned_word32_undefined ( Addr a )
2380 PROF_EVENT(300, "make_aligned_word32_undefined");
2382 #ifndef PERF_FAST_STACK2
2383 make_mem_undefined(a, 4);
2389 if (UNLIKELY(a > MAX_PRIMARY_ADDRESS)) {
2390 PROF_EVENT(301, "make_aligned_word32_undefined-slow1");
2391 make_mem_undefined(a, 4);
2395 sm = get_secmap_for_writing_low(a);
2397 sm->vabits8[sm_off] = VA_BITS8_UNDEFINED;
2403 void make_aligned_word32_undefined_w_otag ( Addr a, UInt otag )
2405 make_aligned_word32_undefined(a);
2406 //// BEGIN inlined, specialised version of MC_(helperc_b_store4)
2407 //// Set the origins for a+0 .. a+3
2409 UWord lineoff = oc_line_offset(a);
2410 if (OC_ENABLE_ASSERTIONS) {
2411 tl_assert(lineoff >= 0 && lineoff < OC_W32S_PER_LINE);
2413 line = find_OCacheLine( a );
2414 line->descr[lineoff] = 0xF;
2415 line->w32[lineoff] = otag;
2417 //// END inlined, specialised version of MC_(helperc_b_store4)
2421 void make_aligned_word32_noaccess ( Addr a )
2423 PROF_EVENT(310, "make_aligned_word32_noaccess");
2425 #ifndef PERF_FAST_STACK2
2426 MC_(make_mem_noaccess)(a, 4);
2432 if (UNLIKELY(a > MAX_PRIMARY_ADDRESS)) {
2433 PROF_EVENT(311, "make_aligned_word32_noaccess-slow1");
2434 MC_(make_mem_noaccess)(a, 4);
2438 sm = get_secmap_for_writing_low(a);
2440 sm->vabits8[sm_off] = VA_BITS8_NOACCESS;
2442 //// BEGIN inlined, specialised version of MC_(helperc_b_store4)
2443 //// Set the origins for a+0 .. a+3.
2444 if (UNLIKELY( MC_(clo_mc_level) == 3 )) {
2446 UWord lineoff = oc_line_offset(a);
2447 if (OC_ENABLE_ASSERTIONS) {
2448 tl_assert(lineoff >= 0 && lineoff < OC_W32S_PER_LINE);
2450 line = find_OCacheLine( a );
2451 line->descr[lineoff] = 0;
2453 //// END inlined, specialised version of MC_(helperc_b_store4)
2458 /*--------------------- 64-bit ---------------------*/
2460 /* Nb: by "aligned" here we mean 8-byte aligned */
2462 static INLINE void make_aligned_word64_undefined ( Addr a )
2464 PROF_EVENT(320, "make_aligned_word64_undefined");
2466 #ifndef PERF_FAST_STACK2
2467 make_mem_undefined(a, 8);
2473 if (UNLIKELY(a > MAX_PRIMARY_ADDRESS)) {
2474 PROF_EVENT(321, "make_aligned_word64_undefined-slow1");
2475 make_mem_undefined(a, 8);
2479 sm = get_secmap_for_writing_low(a);
2480 sm_off16 = SM_OFF_16(a);
2481 ((UShort*)(sm->vabits8))[sm_off16] = VA_BITS16_UNDEFINED;
2487 void make_aligned_word64_undefined_w_otag ( Addr a, UInt otag )
2489 make_aligned_word64_undefined(a);
2490 //// BEGIN inlined, specialised version of MC_(helperc_b_store8)
2491 //// Set the origins for a+0 .. a+7
2493 UWord lineoff = oc_line_offset(a);
2494 tl_assert(lineoff >= 0
2495 && lineoff < OC_W32S_PER_LINE -1/*'cos 8-aligned*/);
2496 line = find_OCacheLine( a );
2497 line->descr[lineoff+0] = 0xF;
2498 line->descr[lineoff+1] = 0xF;
2499 line->w32[lineoff+0] = otag;
2500 line->w32[lineoff+1] = otag;
2502 //// END inlined, specialised version of MC_(helperc_b_store8)
2506 void make_aligned_word64_noaccess ( Addr a )
2508 PROF_EVENT(330, "make_aligned_word64_noaccess");
2510 #ifndef PERF_FAST_STACK2
2511 MC_(make_mem_noaccess)(a, 8);
2517 if (UNLIKELY(a > MAX_PRIMARY_ADDRESS)) {
2518 PROF_EVENT(331, "make_aligned_word64_noaccess-slow1");
2519 MC_(make_mem_noaccess)(a, 8);
2523 sm = get_secmap_for_writing_low(a);
2524 sm_off16 = SM_OFF_16(a);
2525 ((UShort*)(sm->vabits8))[sm_off16] = VA_BITS16_NOACCESS;
2527 //// BEGIN inlined, specialised version of MC_(helperc_b_store8)
2528 //// Clear the origins for a+0 .. a+7.
2529 if (UNLIKELY( MC_(clo_mc_level) == 3 )) {
2531 UWord lineoff = oc_line_offset(a);
2532 tl_assert(lineoff >= 0
2533 && lineoff < OC_W32S_PER_LINE -1/*'cos 8-aligned*/);
2534 line = find_OCacheLine( a );
2535 line->descr[lineoff+0] = 0;
2536 line->descr[lineoff+1] = 0;
2538 //// END inlined, specialised version of MC_(helperc_b_store8)
2544 /*------------------------------------------------------------*/
2545 /*--- Stack pointer adjustment ---*/
2546 /*------------------------------------------------------------*/
2548 #ifdef PERF_FAST_STACK
2551 # define MAYBE_USED __attribute__((unused))
2554 /*--------------- adjustment by 4 bytes ---------------*/
2557 static void VG_REGPARM(2) mc_new_mem_stack_4_w_ECU(Addr new_SP, UInt ecu)
2559 UInt otag = ecu | MC_OKIND_STACK;
2560 PROF_EVENT(110, "new_mem_stack_4");
2561 if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2562 make_aligned_word32_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP, otag );
2564 MC_(make_mem_undefined_w_otag) ( -VG_STACK_REDZONE_SZB + new_SP, 4, otag );
2569 static void VG_REGPARM(1) mc_new_mem_stack_4(Addr new_SP)
2571 PROF_EVENT(110, "new_mem_stack_4");
2572 if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2573 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
2575 make_mem_undefined ( -VG_STACK_REDZONE_SZB + new_SP, 4 );
2580 static void VG_REGPARM(1) mc_die_mem_stack_4(Addr new_SP)
2582 PROF_EVENT(120, "die_mem_stack_4");
2583 if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2584 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
2586 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-4, 4 );
2590 /*--------------- adjustment by 8 bytes ---------------*/
2593 static void VG_REGPARM(2) mc_new_mem_stack_8_w_ECU(Addr new_SP, UInt ecu)
2595 UInt otag = ecu | MC_OKIND_STACK;
2596 PROF_EVENT(111, "new_mem_stack_8");
2597 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2598 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP, otag );
2599 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2600 make_aligned_word32_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP , otag );
2601 make_aligned_word32_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+4, otag );
2603 MC_(make_mem_undefined_w_otag) ( -VG_STACK_REDZONE_SZB + new_SP, 8, otag );
2608 static void VG_REGPARM(1) mc_new_mem_stack_8(Addr new_SP)
2610 PROF_EVENT(111, "new_mem_stack_8");
2611 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2612 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
2613 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2614 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
2615 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
2617 make_mem_undefined ( -VG_STACK_REDZONE_SZB + new_SP, 8 );
2622 static void VG_REGPARM(1) mc_die_mem_stack_8(Addr new_SP)
2624 PROF_EVENT(121, "die_mem_stack_8");
2625 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2626 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
2627 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2628 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
2629 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
2631 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-8, 8 );
2635 /*--------------- adjustment by 12 bytes ---------------*/
2638 static void VG_REGPARM(2) mc_new_mem_stack_12_w_ECU(Addr new_SP, UInt ecu)
2640 UInt otag = ecu | MC_OKIND_STACK;
2641 PROF_EVENT(112, "new_mem_stack_12");
2642 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2643 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP , otag );
2644 make_aligned_word32_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+8, otag );
2645 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2646 /* from previous test we don't have 8-alignment at offset +0,
2647 hence must have 8 alignment at offsets +4/-4. Hence safe to
2648 do 4 at +0 and then 8 at +4/. */
2649 make_aligned_word32_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP , otag );
2650 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+4, otag );
2652 MC_(make_mem_undefined_w_otag) ( -VG_STACK_REDZONE_SZB + new_SP, 12, otag );
2657 static void VG_REGPARM(1) mc_new_mem_stack_12(Addr new_SP)
2659 PROF_EVENT(112, "new_mem_stack_12");
2660 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2661 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
2662 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
2663 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2664 /* from previous test we don't have 8-alignment at offset +0,
2665 hence must have 8 alignment at offsets +4/-4. Hence safe to
2666 do 4 at +0 and then 8 at +4/. */
2667 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
2668 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
2670 make_mem_undefined ( -VG_STACK_REDZONE_SZB + new_SP, 12 );
2675 static void VG_REGPARM(1) mc_die_mem_stack_12(Addr new_SP)
2677 PROF_EVENT(122, "die_mem_stack_12");
2678 /* Note the -12 in the test */
2679 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP-12 )) {
2680 /* We have 8-alignment at -12, hence ok to do 8 at -12 and 4 at
2682 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
2683 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
2684 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2685 /* We have 4-alignment at +0, but we don't have 8-alignment at
2686 -12. So we must have 8-alignment at -8. Hence do 4 at -12
2687 and then 8 at -8. */
2688 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
2689 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
2691 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-12, 12 );
2695 /*--------------- adjustment by 16 bytes ---------------*/
2698 static void VG_REGPARM(2) mc_new_mem_stack_16_w_ECU(Addr new_SP, UInt ecu)
2700 UInt otag = ecu | MC_OKIND_STACK;
2701 PROF_EVENT(113, "new_mem_stack_16");
2702 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2703 /* Have 8-alignment at +0, hence do 8 at +0 and 8 at +8. */
2704 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP , otag );
2705 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+8, otag );
2706 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2707 /* Have 4 alignment at +0 but not 8; hence 8 must be at +4.
2708 Hence do 4 at +0, 8 at +4, 4 at +12. */
2709 make_aligned_word32_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP , otag );
2710 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+4 , otag );
2711 make_aligned_word32_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+12, otag );
2713 MC_(make_mem_undefined_w_otag) ( -VG_STACK_REDZONE_SZB + new_SP, 16, otag );
2718 static void VG_REGPARM(1) mc_new_mem_stack_16(Addr new_SP)
2720 PROF_EVENT(113, "new_mem_stack_16");
2721 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2722 /* Have 8-alignment at +0, hence do 8 at +0 and 8 at +8. */
2723 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
2724 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
2725 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2726 /* Have 4 alignment at +0 but not 8; hence 8 must be at +4.
2727 Hence do 4 at +0, 8 at +4, 4 at +12. */
2728 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
2729 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
2730 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+12 );
2732 make_mem_undefined ( -VG_STACK_REDZONE_SZB + new_SP, 16 );
2737 static void VG_REGPARM(1) mc_die_mem_stack_16(Addr new_SP)
2739 PROF_EVENT(123, "die_mem_stack_16");
2740 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2741 /* Have 8-alignment at +0, hence do 8 at -16 and 8 at -8. */
2742 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
2743 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-8 );
2744 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2745 /* 8 alignment must be at -12. Do 4 at -16, 8 at -12, 4 at -4. */
2746 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
2747 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
2748 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
2750 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-16, 16 );
2754 /*--------------- adjustment by 32 bytes ---------------*/
2757 static void VG_REGPARM(2) mc_new_mem_stack_32_w_ECU(Addr new_SP, UInt ecu)
2759 UInt otag = ecu | MC_OKIND_STACK;
2760 PROF_EVENT(114, "new_mem_stack_32");
2761 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2762 /* Straightforward */
2763 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP , otag );
2764 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+8 , otag );
2765 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+16, otag );
2766 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+24, otag );
2767 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2768 /* 8 alignment must be at +4. Hence do 8 at +4,+12,+20 and 4 at
2770 make_aligned_word32_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP , otag );
2771 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+4 , otag );
2772 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+12, otag );
2773 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+20, otag );
2774 make_aligned_word32_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+28, otag );
2776 MC_(make_mem_undefined_w_otag) ( -VG_STACK_REDZONE_SZB + new_SP, 32, otag );
2781 static void VG_REGPARM(1) mc_new_mem_stack_32(Addr new_SP)
2783 PROF_EVENT(114, "new_mem_stack_32");
2784 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2785 /* Straightforward */
2786 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
2787 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
2788 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
2789 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
2790 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2791 /* 8 alignment must be at +4. Hence do 8 at +4,+12,+20 and 4 at
2793 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
2794 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+4 );
2795 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+12 );
2796 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+20 );
2797 make_aligned_word32_undefined ( -VG_STACK_REDZONE_SZB + new_SP+28 );
2799 make_mem_undefined ( -VG_STACK_REDZONE_SZB + new_SP, 32 );
2804 static void VG_REGPARM(1) mc_die_mem_stack_32(Addr new_SP)
2806 PROF_EVENT(124, "die_mem_stack_32");
2807 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2808 /* Straightforward */
2809 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
2810 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
2811 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
2812 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
2813 } else if (VG_IS_4_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2814 /* 8 alignment must be at -4 etc. Hence do 8 at -12,-20,-28 and
2816 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
2817 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-28 );
2818 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-20 );
2819 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-12 );
2820 make_aligned_word32_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-4 );
2822 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-32, 32 );
2826 /*--------------- adjustment by 112 bytes ---------------*/
2829 static void VG_REGPARM(2) mc_new_mem_stack_112_w_ECU(Addr new_SP, UInt ecu)
2831 UInt otag = ecu | MC_OKIND_STACK;
2832 PROF_EVENT(115, "new_mem_stack_112");
2833 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2834 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP , otag );
2835 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+8 , otag );
2836 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+16, otag );
2837 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+24, otag );
2838 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+32, otag );
2839 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+40, otag );
2840 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+48, otag );
2841 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+56, otag );
2842 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+64, otag );
2843 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+72, otag );
2844 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+80, otag );
2845 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+88, otag );
2846 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+96, otag );
2847 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+104, otag );
2849 MC_(make_mem_undefined_w_otag) ( -VG_STACK_REDZONE_SZB + new_SP, 112, otag );
2854 static void VG_REGPARM(1) mc_new_mem_stack_112(Addr new_SP)
2856 PROF_EVENT(115, "new_mem_stack_112");
2857 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2858 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
2859 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
2860 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
2861 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
2862 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
2863 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
2864 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
2865 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
2866 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
2867 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
2868 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
2869 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
2870 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
2871 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104 );
2873 make_mem_undefined ( -VG_STACK_REDZONE_SZB + new_SP, 112 );
2878 static void VG_REGPARM(1) mc_die_mem_stack_112(Addr new_SP)
2880 PROF_EVENT(125, "die_mem_stack_112");
2881 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2882 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
2883 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
2884 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
2885 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
2886 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
2887 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
2888 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
2889 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
2890 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
2891 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
2892 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
2893 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
2894 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
2895 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
2897 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-112, 112 );
2901 /*--------------- adjustment by 128 bytes ---------------*/
2904 static void VG_REGPARM(2) mc_new_mem_stack_128_w_ECU(Addr new_SP, UInt ecu)
2906 UInt otag = ecu | MC_OKIND_STACK;
2907 PROF_EVENT(116, "new_mem_stack_128");
2908 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2909 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP , otag );
2910 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+8 , otag );
2911 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+16, otag );
2912 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+24, otag );
2913 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+32, otag );
2914 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+40, otag );
2915 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+48, otag );
2916 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+56, otag );
2917 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+64, otag );
2918 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+72, otag );
2919 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+80, otag );
2920 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+88, otag );
2921 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+96, otag );
2922 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+104, otag );
2923 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+112, otag );
2924 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+120, otag );
2926 MC_(make_mem_undefined_w_otag) ( -VG_STACK_REDZONE_SZB + new_SP, 128, otag );
2931 static void VG_REGPARM(1) mc_new_mem_stack_128(Addr new_SP)
2933 PROF_EVENT(116, "new_mem_stack_128");
2934 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2935 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
2936 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
2937 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
2938 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
2939 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
2940 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
2941 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
2942 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
2943 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
2944 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
2945 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
2946 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
2947 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
2948 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104 );
2949 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+112 );
2950 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+120 );
2952 make_mem_undefined ( -VG_STACK_REDZONE_SZB + new_SP, 128 );
2957 static void VG_REGPARM(1) mc_die_mem_stack_128(Addr new_SP)
2959 PROF_EVENT(126, "die_mem_stack_128");
2960 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2961 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
2962 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
2963 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
2964 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
2965 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
2966 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
2967 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
2968 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
2969 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
2970 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
2971 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
2972 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
2973 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
2974 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
2975 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
2976 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
2978 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-128, 128 );
2982 /*--------------- adjustment by 144 bytes ---------------*/
2985 static void VG_REGPARM(2) mc_new_mem_stack_144_w_ECU(Addr new_SP, UInt ecu)
2987 UInt otag = ecu | MC_OKIND_STACK;
2988 PROF_EVENT(117, "new_mem_stack_144");
2989 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
2990 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP, otag );
2991 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+8, otag );
2992 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+16, otag );
2993 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+24, otag );
2994 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+32, otag );
2995 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+40, otag );
2996 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+48, otag );
2997 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+56, otag );
2998 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+64, otag );
2999 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+72, otag );
3000 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+80, otag );
3001 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+88, otag );
3002 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+96, otag );
3003 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+104, otag );
3004 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+112, otag );
3005 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+120, otag );
3006 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+128, otag );
3007 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+136, otag );
3009 MC_(make_mem_undefined_w_otag) ( -VG_STACK_REDZONE_SZB + new_SP, 144, otag );
3014 static void VG_REGPARM(1) mc_new_mem_stack_144(Addr new_SP)
3016 PROF_EVENT(117, "new_mem_stack_144");
3017 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
3018 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
3019 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
3020 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
3021 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
3022 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
3023 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
3024 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
3025 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
3026 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
3027 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
3028 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
3029 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
3030 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
3031 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104 );
3032 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+112 );
3033 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+120 );
3034 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+128 );
3035 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+136 );
3037 make_mem_undefined ( -VG_STACK_REDZONE_SZB + new_SP, 144 );
3042 static void VG_REGPARM(1) mc_die_mem_stack_144(Addr new_SP)
3044 PROF_EVENT(127, "die_mem_stack_144");
3045 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
3046 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-144);
3047 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-136);
3048 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
3049 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
3050 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
3051 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
3052 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
3053 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
3054 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
3055 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
3056 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
3057 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
3058 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
3059 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
3060 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
3061 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
3062 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
3063 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
3065 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-144, 144 );
3069 /*--------------- adjustment by 160 bytes ---------------*/
3072 static void VG_REGPARM(2) mc_new_mem_stack_160_w_ECU(Addr new_SP, UInt ecu)
3074 UInt otag = ecu | MC_OKIND_STACK;
3075 PROF_EVENT(118, "new_mem_stack_160");
3076 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
3077 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP, otag );
3078 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+8, otag );
3079 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+16, otag );
3080 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+24, otag );
3081 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+32, otag );
3082 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+40, otag );
3083 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+48, otag );
3084 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+56, otag );
3085 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+64, otag );
3086 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+72, otag );
3087 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+80, otag );
3088 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+88, otag );
3089 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+96, otag );
3090 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+104, otag );
3091 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+112, otag );
3092 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+120, otag );
3093 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+128, otag );
3094 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+136, otag );
3095 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+144, otag );
3096 make_aligned_word64_undefined_w_otag ( -VG_STACK_REDZONE_SZB + new_SP+152, otag );
3098 MC_(make_mem_undefined_w_otag) ( -VG_STACK_REDZONE_SZB + new_SP, 160, otag );
3103 static void VG_REGPARM(1) mc_new_mem_stack_160(Addr new_SP)
3105 PROF_EVENT(118, "new_mem_stack_160");
3106 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
3107 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP );
3108 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+8 );
3109 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+16 );
3110 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+24 );
3111 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+32 );
3112 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+40 );
3113 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+48 );
3114 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+56 );
3115 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+64 );
3116 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+72 );
3117 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+80 );
3118 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+88 );
3119 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+96 );
3120 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+104 );
3121 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+112 );
3122 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+120 );
3123 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+128 );
3124 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+136 );
3125 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+144 );
3126 make_aligned_word64_undefined ( -VG_STACK_REDZONE_SZB + new_SP+152 );
3128 make_mem_undefined ( -VG_STACK_REDZONE_SZB + new_SP, 160 );
3133 static void VG_REGPARM(1) mc_die_mem_stack_160(Addr new_SP)
3135 PROF_EVENT(128, "die_mem_stack_160");
3136 if (VG_IS_8_ALIGNED( -VG_STACK_REDZONE_SZB + new_SP )) {
3137 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-160);
3138 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-152);
3139 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-144);
3140 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-136);
3141 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-128);
3142 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-120);
3143 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-112);
3144 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-104);
3145 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-96 );
3146 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-88 );
3147 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-80 );
3148 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-72 );
3149 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-64 );
3150 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-56 );
3151 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-48 );
3152 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-40 );
3153 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-32 );
3154 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-24 );
3155 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP-16 );
3156 make_aligned_word64_noaccess ( -VG_STACK_REDZONE_SZB + new_SP- 8 );
3158 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + new_SP-160, 160 );
3162 /*--------------- adjustment by N bytes ---------------*/
3164 static void mc_new_mem_stack_w_ECU ( Addr a, SizeT len, UInt ecu )
3166 UInt otag = ecu | MC_OKIND_STACK;
3167 PROF_EVENT(115, "new_mem_stack_w_otag");
3168 MC_(make_mem_undefined_w_otag) ( -VG_STACK_REDZONE_SZB + a, len, otag );
3171 static void mc_new_mem_stack ( Addr a, SizeT len )
3173 PROF_EVENT(115, "new_mem_stack");
3174 make_mem_undefined ( -VG_STACK_REDZONE_SZB + a, len );
3177 static void mc_die_mem_stack ( Addr a, SizeT len )
3179 PROF_EVENT(125, "die_mem_stack");
3180 MC_(make_mem_noaccess) ( -VG_STACK_REDZONE_SZB + a, len );
3184 /* The AMD64 ABI says:
3186 "The 128-byte area beyond the location pointed to by %rsp is considered
3187 to be reserved and shall not be modified by signal or interrupt
3188 handlers. Therefore, functions may use this area for temporary data
3189 that is not needed across function calls. In particular, leaf functions
3190 may use this area for their entire stack frame, rather than adjusting
3191 the stack pointer in the prologue and epilogue. This area is known as
3194 So after any call or return we need to mark this redzone as containing
3197 Consider this: we're in function f. f calls g. g moves rsp down
3198 modestly (say 16 bytes) and writes stuff all over the red zone, making it
3199 defined. g returns. f is buggy and reads from parts of the red zone
3200 that it didn't write on. But because g filled that area in, f is going
3201 to be picking up defined V bits and so any errors from reading bits of
3202 the red zone it didn't write, will be missed. The only solution I could
3203 think of was to make the red zone undefined when g returns to f.
3205 This is in accordance with the ABI, which makes it clear the redzone
3206 is volatile across function calls.
3208 The problem occurs the other way round too: f could fill the RZ up
3209 with defined values and g could mistakenly read them. So the RZ
3210 also needs to be nuked on function calls.
3214 /* Here's a simple cache to hold nia -> ECU mappings. It could be
3215 improved so as to have a lower miss rate. */
3217 static UWord stats__nia_cache_queries = 0;
3218 static UWord stats__nia_cache_misses = 0;
3221 struct { UWord nia0; UWord ecu0; /* nia0 maps to ecu0 */
3222 UWord nia1; UWord ecu1; } /* nia1 maps to ecu1 */
3225 #define N_NIA_TO_ECU_CACHE 511
3227 static WCacheEnt nia_to_ecu_cache[N_NIA_TO_ECU_CACHE];
3229 static void init_nia_to_ecu_cache ( void )
3233 ExeContext* zero_ec;
3235 /* Fill all the slots with an entry for address zero, and the
3236 relevant otags accordingly. Hence the cache is initially filled
3238 zero_ec = VG_(make_depth_1_ExeContext_from_Addr)(zero_addr);
3240 zero_ecu = VG_(get_ECU_from_ExeContext)(zero_ec);
3241 tl_assert(VG_(is_plausible_ECU)(zero_ecu));
3242 for (i = 0; i < N_NIA_TO_ECU_CACHE; i++) {
3243 nia_to_ecu_cache[i].nia0 = zero_addr;
3244 nia_to_ecu_cache[i].ecu0 = zero_ecu;
3245 nia_to_ecu_cache[i].nia1 = zero_addr;
3246 nia_to_ecu_cache[i].ecu1 = zero_ecu;
3250 static inline UInt convert_nia_to_ecu ( Addr nia )
3256 tl_assert( sizeof(nia_to_ecu_cache[0].nia1) == sizeof(nia) );
3258 stats__nia_cache_queries++;
3259 i = nia % N_NIA_TO_ECU_CACHE;
3260 tl_assert(i >= 0 && i < N_NIA_TO_ECU_CACHE);
3262 if (LIKELY( nia_to_ecu_cache[i].nia0 == nia ))
3263 return nia_to_ecu_cache[i].ecu0;
3265 if (LIKELY( nia_to_ecu_cache[i].nia1 == nia )) {
3266 # define SWAP(_w1,_w2) { UWord _t = _w1; _w1 = _w2; _w2 = _t; }
3267 SWAP( nia_to_ecu_cache[i].nia0, nia_to_ecu_cache[i].nia1 );
3268 SWAP( nia_to_ecu_cache[i].ecu0, nia_to_ecu_cache[i].ecu1 );
3270 return nia_to_ecu_cache[i].ecu0;
3273 stats__nia_cache_misses++;
3274 ec = VG_(make_depth_1_ExeContext_from_Addr)(nia);
3276 ecu = VG_(get_ECU_from_ExeContext)(ec);
3277 tl_assert(VG_(is_plausible_ECU)(ecu));
3279 nia_to_ecu_cache[i].nia1 = nia_to_ecu_cache[i].nia0;
3280 nia_to_ecu_cache[i].ecu1 = nia_to_ecu_cache[i].ecu0;
3282 nia_to_ecu_cache[i].nia0 = nia;
3283 nia_to_ecu_cache[i].ecu0 = (UWord)ecu;
3288 /* Note that this serves both the origin-tracking and
3289 no-origin-tracking modes. We assume that calls to it are
3290 sufficiently infrequent that it isn't worth specialising for the
3291 with/without origin-tracking cases. */
3292 void MC_(helperc_MAKE_STACK_UNINIT) ( Addr base, UWord len, Addr nia )
3295 tl_assert(sizeof(UWord) == sizeof(SizeT));
3297 VG_(printf)("helperc_MAKE_STACK_UNINIT (%#lx,%lu,nia=%#lx)\n",
3300 if (UNLIKELY( MC_(clo_mc_level) == 3 )) {
3301 UInt ecu = convert_nia_to_ecu ( nia );
3302 tl_assert(VG_(is_plausible_ECU)(ecu));
3303 otag = ecu | MC_OKIND_STACK;
3305 tl_assert(nia == 0);
3310 /* Really slow version */
3311 MC_(make_mem_undefined)(base, len, otag);
3315 /* Slow(ish) version, which is fairly easily seen to be correct.
3317 if (LIKELY( VG_IS_8_ALIGNED(base) && len==128 )) {
3318 make_aligned_word64_undefined(base + 0, otag);
3319 make_aligned_word64_undefined(base + 8, otag);
3320 make_aligned_word64_undefined(base + 16, otag);
3321 make_aligned_word64_undefined(base + 24, otag);
3323 make_aligned_word64_undefined(base + 32, otag);
3324 make_aligned_word64_undefined(base + 40, otag);
3325 make_aligned_word64_undefined(base + 48, otag);
3326 make_aligned_word64_undefined(base + 56, otag);
3328 make_aligned_word64_undefined(base + 64, otag);
3329 make_aligned_word64_undefined(base + 72, otag);
3330 make_aligned_word64_undefined(base + 80, otag);
3331 make_aligned_word64_undefined(base + 88, otag);
3333 make_aligned_word64_undefined(base + 96, otag);
3334 make_aligned_word64_undefined(base + 104, otag);
3335 make_aligned_word64_undefined(base + 112, otag);
3336 make_aligned_word64_undefined(base + 120, otag);
3338 MC_(make_mem_undefined)(base, len, otag);
3342 /* Idea is: go fast when
3343 * 8-aligned and length is 128
3344 * the sm is available in the main primary map
3345 * the address range falls entirely with a single secondary map
3346 If all those conditions hold, just update the V+A bits by writing
3347 directly into the vabits array. (If the sm was distinguished, this
3348 will make a copy and then write to it.)
3351 if (LIKELY( len == 128 && VG_IS_8_ALIGNED(base) )) {
3352 /* Now we know the address range is suitably sized and aligned. */
3353 UWord a_lo = (UWord)(base);
3354 UWord a_hi = (UWord)(base + 128 - 1);
3355 tl_assert(a_lo < a_hi); // paranoia: detect overflow
3356 if (a_hi <= MAX_PRIMARY_ADDRESS) {
3357 // Now we know the entire range is within the main primary map.
3358 SecMap* sm = get_secmap_for_writing_low(a_lo);
3359 SecMap* sm_hi = get_secmap_for_writing_low(a_hi);
3360 /* Now we know that the entire address range falls within a
3361 single secondary map, and that that secondary 'lives' in
3362 the main primary map. */
3363 if (LIKELY(sm == sm_hi)) {
3364 // Finally, we know that the range is entirely within one secmap.
3365 UWord v_off = SM_OFF(a_lo);
3366 UShort* p = (UShort*)(&sm->vabits8[v_off]);
3367 p[ 0] = VA_BITS16_UNDEFINED;
3368 p[ 1] = VA_BITS16_UNDEFINED;
3369 p[ 2] = VA_BITS16_UNDEFINED;
3370 p[ 3] = VA_BITS16_UNDEFINED;
3371 p[ 4] = VA_BITS16_UNDEFINED;
3372 p[ 5] = VA_BITS16_UNDEFINED;
3373 p[ 6] = VA_BITS16_UNDEFINED;
3374 p[ 7] = VA_BITS16_UNDEFINED;
3375 p[ 8] = VA_BITS16_UNDEFINED;
3376 p[ 9] = VA_BITS16_UNDEFINED;
3377 p[10] = VA_BITS16_UNDEFINED;
3378 p[11] = VA_BITS16_UNDEFINED;
3379 p[12] = VA_BITS16_UNDEFINED;
3380 p[13] = VA_BITS16_UNDEFINED;
3381 p[14] = VA_BITS16_UNDEFINED;
3382 p[15] = VA_BITS16_UNDEFINED;
3383 if (UNLIKELY( MC_(clo_mc_level) == 3 )) {
3384 set_aligned_word64_Origin_to_undef( base + 8 * 0, otag );
3385 set_aligned_word64_Origin_to_undef( base + 8 * 1, otag );
3386 set_aligned_word64_Origin_to_undef( base + 8 * 2, otag );
3387 set_aligned_word64_Origin_to_undef( base + 8 * 3, otag );
3388 set_aligned_word64_Origin_to_undef( base + 8 * 4, otag );
3389 set_aligned_word64_Origin_to_undef( base + 8 * 5, otag );
3390 set_aligned_word64_Origin_to_undef( base + 8 * 6, otag );
3391 set_aligned_word64_Origin_to_undef( base + 8 * 7, otag );
3392 set_aligned_word64_Origin_to_undef( base + 8 * 8, otag );
3393 set_aligned_word64_Origin_to_undef( base + 8 * 9, otag );
3394 set_aligned_word64_Origin_to_undef( base + 8 * 10, otag );
3395 set_aligned_word64_Origin_to_undef( base + 8 * 11, otag );
3396 set_aligned_word64_Origin_to_undef( base + 8 * 12, otag );
3397 set_aligned_word64_Origin_to_undef( base + 8 * 13, otag );
3398 set_aligned_word64_Origin_to_undef( base + 8 * 14, otag );
3399 set_aligned_word64_Origin_to_undef( base + 8 * 15, otag );
3406 /* 288 bytes (36 ULongs) is the magic value for ELF ppc64. */
3407 if (LIKELY( len == 288 && VG_IS_8_ALIGNED(base) )) {
3408 /* Now we know the address range is suitably sized and aligned. */
3409 UWord a_lo = (UWord)(base);
3410 UWord a_hi = (UWord)(base + 288 - 1);
3411 tl_assert(a_lo < a_hi); // paranoia: detect overflow
3412 if (a_hi <= MAX_PRIMARY_ADDRESS) {
3413 // Now we know the entire range is within the main primary map.
3414 SecMap* sm = get_secmap_for_writing_low(a_lo);
3415 SecMap* sm_hi = get_secmap_for_writing_low(a_hi);
3416 /* Now we know that the entire address range falls within a
3417 single secondary map, and that that secondary 'lives' in
3418 the main primary map. */
3419 if (LIKELY(sm == sm_hi)) {
3420 // Finally, we know that the range is entirely within one secmap.
3421 UWord v_off = SM_OFF(a_lo);
3422 UShort* p = (UShort*)(&sm->vabits8[v_off]);
3423 p[ 0] = VA_BITS16_UNDEFINED;
3424 p[ 1] = VA_BITS16_UNDEFINED;
3425 p[ 2] = VA_BITS16_UNDEFINED;
3426 p[ 3] = VA_BITS16_UNDEFINED;
3427 p[ 4] = VA_BITS16_UNDEFINED;
3428 p[ 5] = VA_BITS16_UNDEFINED;
3429 p[ 6] = VA_BITS16_UNDEFINED;
3430 p[ 7] = VA_BITS16_UNDEFINED;
3431 p[ 8] = VA_BITS16_UNDEFINED;
3432 p[ 9] = VA_BITS16_UNDEFINED;
3433 p[10] = VA_BITS16_UNDEFINED;
3434 p[11] = VA_BITS16_UNDEFINED;
3435 p[12] = VA_BITS16_UNDEFINED;
3436 p[13] = VA_BITS16_UNDEFINED;
3437 p[14] = VA_BITS16_UNDEFINED;
3438 p[15] = VA_BITS16_UNDEFINED;
3439 p[16] = VA_BITS16_UNDEFINED;
3440 p[17] = VA_BITS16_UNDEFINED;
3441 p[18] = VA_BITS16_UNDEFINED;
3442 p[19] = VA_BITS16_UNDEFINED;
3443 p[20] = VA_BITS16_UNDEFINED;
3444 p[21] = VA_BITS16_UNDEFINED;
3445 p[22] = VA_BITS16_UNDEFINED;
3446 p[23] = VA_BITS16_UNDEFINED;
3447 p[24] = VA_BITS16_UNDEFINED;
3448 p[25] = VA_BITS16_UNDEFINED;
3449 p[26] = VA_BITS16_UNDEFINED;
3450 p[27] = VA_BITS16_UNDEFINED;
3451 p[28] = VA_BITS16_UNDEFINED;
3452 p[29] = VA_BITS16_UNDEFINED;
3453 p[30] = VA_BITS16_UNDEFINED;
3454 p[31] = VA_BITS16_UNDEFINED;
3455 p[32] = VA_BITS16_UNDEFINED;
3456 p[33] = VA_BITS16_UNDEFINED;
3457 p[34] = VA_BITS16_UNDEFINED;
3458 p[35] = VA_BITS16_UNDEFINED;
3459 if (UNLIKELY( MC_(clo_mc_level) == 3 )) {
3460 set_aligned_word64_Origin_to_undef( base + 8 * 0, otag );
3461 set_aligned_word64_Origin_to_undef( base + 8 * 1, otag );
3462 set_aligned_word64_Origin_to_undef( base + 8 * 2, otag );
3463 set_aligned_word64_Origin_to_undef( base + 8 * 3, otag );
3464 set_aligned_word64_Origin_to_undef( base + 8 * 4, otag );
3465 set_aligned_word64_Origin_to_undef( base + 8 * 5, otag );
3466 set_aligned_word64_Origin_to_undef( base + 8 * 6, otag );
3467 set_aligned_word64_Origin_to_undef( base + 8 * 7, otag );
3468 set_aligned_word64_Origin_to_undef( base + 8 * 8, otag );
3469 set_aligned_word64_Origin_to_undef( base + 8 * 9, otag );
3470 set_aligned_word64_Origin_to_undef( base + 8 * 10, otag );
3471 set_aligned_word64_Origin_to_undef( base + 8 * 11, otag );
3472 set_aligned_word64_Origin_to_undef( base + 8 * 12, otag );
3473 set_aligned_word64_Origin_to_undef( base + 8 * 13, otag );
3474 set_aligned_word64_Origin_to_undef( base + 8 * 14, otag );
3475 set_aligned_word64_Origin_to_undef( base + 8 * 15, otag );
3476 set_aligned_word64_Origin_to_undef( base + 8 * 16, otag );
3477 set_aligned_word64_Origin_to_undef( base + 8 * 17, otag );
3478 set_aligned_word64_Origin_to_undef( base + 8 * 18, otag );
3479 set_aligned_word64_Origin_to_undef( base + 8 * 19, otag );
3480 set_aligned_word64_Origin_to_undef( base + 8 * 20, otag );
3481 set_aligned_word64_Origin_to_undef( base + 8 * 21, otag );
3482 set_aligned_word64_Origin_to_undef( base + 8 * 22, otag );
3483 set_aligned_word64_Origin_to_undef( base + 8 * 23, otag );
3484 set_aligned_word64_Origin_to_undef( base + 8 * 24, otag );
3485 set_aligned_word64_Origin_to_undef( base + 8 * 25, otag );
3486 set_aligned_word64_Origin_to_undef( base + 8 * 26, otag );
3487 set_aligned_word64_Origin_to_undef( base + 8 * 27, otag );
3488 set_aligned_word64_Origin_to_undef( base + 8 * 28, otag );
3489 set_aligned_word64_Origin_to_undef( base + 8 * 29, otag );
3490 set_aligned_word64_Origin_to_undef( base + 8 * 30, otag );
3491 set_aligned_word64_Origin_to_undef( base + 8 * 31, otag );
3492 set_aligned_word64_Origin_to_undef( base + 8 * 32, otag );
3493 set_aligned_word64_Origin_to_undef( base + 8 * 33, otag );
3494 set_aligned_word64_Origin_to_undef( base + 8 * 34, otag );
3495 set_aligned_word64_Origin_to_undef( base + 8 * 35, otag );
3502 /* else fall into slow case */
3503 MC_(make_mem_undefined_w_otag)(base, len, otag);
3507 /*------------------------------------------------------------*/
3508 /*--- Checking memory ---*/
3509 /*------------------------------------------------------------*/
3520 /* Check permissions for address range. If inadequate permissions
3521 exist, *bad_addr is set to the offending address, so the caller can
3524 /* Returns True if [a .. a+len) is not addressible. Otherwise,
3525 returns False, and if bad_addr is non-NULL, sets *bad_addr to
3526 indicate the lowest failing address. Functions below are
3528 Bool MC_(check_mem_is_noaccess) ( Addr a, SizeT len, Addr* bad_addr )
3533 PROF_EVENT(60, "check_mem_is_noaccess");
3534 for (i = 0; i < len; i++) {
3535 PROF_EVENT(61, "check_mem_is_noaccess(loop)");
3536 vabits2 = get_vabits2(a);
3537 if (VA_BITS2_NOACCESS != vabits2) {
3538 if (bad_addr != NULL) *bad_addr = a;
3546 static Bool is_mem_addressable ( Addr a, SizeT len,
3547 /*OUT*/Addr* bad_addr )
3552 PROF_EVENT(62, "is_mem_addressable");
3553 for (i = 0; i < len; i++) {
3554 PROF_EVENT(63, "is_mem_addressable(loop)");
3555 vabits2 = get_vabits2(a);
3556 if (VA_BITS2_NOACCESS == vabits2) {
3557 if (bad_addr != NULL) *bad_addr = a;
3565 static MC_ReadResult is_mem_defined ( Addr a, SizeT len,
3566 /*OUT*/Addr* bad_addr,
3572 PROF_EVENT(64, "is_mem_defined");
3573 DEBUG("is_mem_defined\n");
3575 if (otag) *otag = 0;
3576 if (bad_addr) *bad_addr = 0;
3577 for (i = 0; i < len; i++) {
3578 PROF_EVENT(65, "is_mem_defined(loop)");
3579 vabits2 = get_vabits2(a);
3580 if (VA_BITS2_DEFINED != vabits2) {
3581 // Error! Nb: Report addressability errors in preference to
3582 // definedness errors. And don't report definedeness errors unless
3583 // --undef-value-errors=yes.
3587 if (VA_BITS2_NOACCESS == vabits2) {
3590 if (MC_(clo_mc_level) >= 2) {
3591 if (otag && MC_(clo_mc_level) == 3) {
3592 *otag = MC_(helperc_b_load1)( a );
3603 /* Check a zero-terminated ascii string. Tricky -- don't want to
3604 examine the actual bytes, to find the end, until we're sure it is
3607 static Bool mc_is_defined_asciiz ( Addr a, Addr* bad_addr, UInt* otag )
3611 PROF_EVENT(66, "mc_is_defined_asciiz");
3612 DEBUG("mc_is_defined_asciiz\n");
3614 if (otag) *otag = 0;
3615 if (bad_addr) *bad_addr = 0;
3617 PROF_EVENT(67, "mc_is_defined_asciiz(loop)");
3618 vabits2 = get_vabits2(a);
3619 if (VA_BITS2_DEFINED != vabits2) {
3620 // Error! Nb: Report addressability errors in preference to
3621 // definedness errors. And don't report definedeness errors unless
3622 // --undef-value-errors=yes.
3626 if (VA_BITS2_NOACCESS == vabits2) {
3629 if (MC_(clo_mc_level) >= 2) {
3630 if (otag && MC_(clo_mc_level) == 3) {
3631 *otag = MC_(helperc_b_load1)( a );
3636 /* Ok, a is safe to read. */
3637 if (* ((UChar*)a) == 0) {
3645 /*------------------------------------------------------------*/
3646 /*--- Memory event handlers ---*/
3647 /*------------------------------------------------------------*/
3650 void check_mem_is_addressable ( CorePart part, ThreadId tid, Char* s,
3651 Addr base, SizeT size )
3654 Bool ok = is_mem_addressable ( base, size, &bad_addr );
3658 case Vg_CoreSysCall:
3659 MC_(record_memparam_error) ( tid, bad_addr,
3660 /*isAddrErr*/True, s, 0/*otag*/ );
3664 MC_(record_core_mem_error)( tid, s );
3668 VG_(tool_panic)("check_mem_is_addressable: unexpected CorePart");
3674 void check_mem_is_defined ( CorePart part, ThreadId tid, Char* s,
3675 Addr base, SizeT size )
3679 MC_ReadResult res = is_mem_defined ( base, size, &bad_addr, &otag );
3682 Bool isAddrErr = ( MC_AddrErr == res ? True : False );
3685 case Vg_CoreSysCall:
3686 MC_(record_memparam_error) ( tid, bad_addr, isAddrErr, s,
3687 isAddrErr ? 0 : otag );
3690 case Vg_CoreSysCallArgInMem:
3691 MC_(record_regparam_error) ( tid, s, otag );
3694 /* If we're being asked to jump to a silly address, record an error
3695 message before potentially crashing the entire system. */
3696 case Vg_CoreTranslate:
3697 MC_(record_jump_error)( tid, bad_addr );
3701 VG_(tool_panic)("check_mem_is_defined: unexpected CorePart");
3707 void check_mem_is_defined_asciiz ( CorePart part, ThreadId tid,
3711 Addr bad_addr = 0; // shut GCC up
3714 tl_assert(part == Vg_CoreSysCall);
3715 res = mc_is_defined_asciiz ( (Addr)str, &bad_addr, &otag );
3717 Bool isAddrErr = ( MC_AddrErr == res ? True : False );
3718 MC_(record_memparam_error) ( tid, bad_addr, isAddrErr, s,
3719 isAddrErr ? 0 : otag );
3723 /* Handling of mmap and mprotect is not as simple as it seems.
3725 The underlying semantics are that memory obtained from mmap is
3726 always initialised, but may be inaccessible. And changes to the
3727 protection of memory do not change its contents and hence not its
3728 definedness state. Problem is we can't model
3729 inaccessible-but-with-some-definedness state; once we mark memory
3730 as inaccessible we lose all info about definedness, and so can't
3731 restore that if it is later made accessible again.
3733 One obvious thing to do is this:
3735 mmap/mprotect NONE -> noaccess
3736 mmap/mprotect other -> defined
3738 The problem case here is: taking accessible memory, writing
3739 uninitialised data to it, mprotecting it NONE and later mprotecting
3740 it back to some accessible state causes the undefinedness to be
3743 A better proposal is:
3745 (1) mmap NONE -> make noaccess
3746 (2) mmap other -> make defined
3748 (3) mprotect NONE -> # no change
3749 (4) mprotect other -> change any "noaccess" to "defined"
3751 (2) is OK because memory newly obtained from mmap really is defined
3752 (zeroed out by the kernel -- doing anything else would
3753 constitute a massive security hole.)
3755 (1) is OK because the only way to make the memory usable is via
3756 (4), in which case we also wind up correctly marking it all as
3759 (3) is the weak case. We choose not to change memory state.
3760 (presumably the range is in some mixture of "defined" and
3761 "undefined", viz, accessible but with arbitrary V bits). Doing
3762 nothing means we retain the V bits, so that if the memory is
3763 later mprotected "other", the V bits remain unchanged, so there
3764 can be no false negatives. The bad effect is that if there's
3765 an access in the area, then MC cannot warn; but at least we'll
3766 get a SEGV to show, so it's better than nothing.
3768 Consider the sequence (3) followed by (4). Any memory that was
3769 "defined" or "undefined" previously retains its state (as
3770 required). Any memory that was "noaccess" before can only have
3771 been made that way by (1), and so it's OK to change it to
3774 See https://bugs.kde.org/show_bug.cgi?id=205541
3775 and https://bugs.kde.org/show_bug.cgi?id=210268
3778 void mc_new_mem_mmap ( Addr a, SizeT len, Bool rr, Bool ww, Bool xx,
3781 if (rr || ww || xx) {
3782 /* (2) mmap/mprotect other -> defined */
3783 MC_(make_mem_defined)(a, len);
3785 /* (1) mmap/mprotect NONE -> noaccess */
3786 MC_(make_mem_noaccess)(a, len);
3791 void mc_new_mem_mprotect ( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
3793 if (rr || ww || xx) {
3794 /* (4) mprotect other -> change any "noaccess" to "defined" */
3795 make_mem_defined_if_noaccess(a, len);
3797 /* (3) mprotect NONE -> # no change */
3804 void mc_new_mem_startup( Addr a, SizeT len,
3805 Bool rr, Bool ww, Bool xx, ULong di_handle )
3807 // Because code is defined, initialised variables get put in the data
3808 // segment and are defined, and uninitialised variables get put in the
3809 // bss segment and are auto-zeroed (and so defined).
3811 // It's possible that there will be padding between global variables.
3812 // This will also be auto-zeroed, and marked as defined by Memcheck. If
3813 // a program uses it, Memcheck will not complain. This is arguably a
3814 // false negative, but it's a grey area -- the behaviour is defined (the
3815 // padding is zeroed) but it's probably not what the user intended. And
3816 // we can't avoid it.
3818 // Note: we generally ignore RWX permissions, because we can't track them
3819 // without requiring more than one A bit which would slow things down a
3820 // lot. But on Darwin the 0th page is mapped but !R and !W and !X.
3821 // So we mark any such pages as "unaddressable".
3822 DEBUG("mc_new_mem_startup(%#lx, %llu, rr=%u, ww=%u, xx=%u)\n",
3823 a, (ULong)len, rr, ww, xx);
3824 mc_new_mem_mmap(a, len, rr, ww, xx, di_handle);
3828 void mc_post_mem_write(CorePart part, ThreadId tid, Addr a, SizeT len)
3830 MC_(make_mem_defined)(a, len);
3834 /*------------------------------------------------------------*/
3835 /*--- Register event handlers ---*/
3836 /*------------------------------------------------------------*/
3838 /* Try and get a nonzero origin for the guest state section of thread
3839 tid characterised by (offset,size). Return 0 if nothing to show
3841 static UInt mb_get_origin_for_guest_offset ( ThreadId tid,
3842 Int offset, SizeT size )
3847 sh2off = MC_(get_otrack_shadow_offset)( offset, size );
3849 return 0; /* This piece of guest state is not tracked */
3850 tl_assert(sh2off >= 0);
3851 tl_assert(0 == (sh2off % 4));
3854 VG_(get_shadow_regs_area)( tid, &area[1], 2/*shadowno*/,sh2off,4 );
3855 tl_assert(area[0] == 0x31);
3856 tl_assert(area[5] == 0x27);
3857 otag = *(UInt*)&area[1];
3862 /* When some chunk of guest state is written, mark the corresponding
3863 shadow area as valid. This is used to initialise arbitrarily large
3864 chunks of guest state, hence the _SIZE value, which has to be as
3865 big as the biggest guest state.
3867 static void mc_post_reg_write ( CorePart part, ThreadId tid,
3868 PtrdiffT offset, SizeT size)
3870 # define MAX_REG_WRITE_SIZE 1408
3871 UChar area[MAX_REG_WRITE_SIZE];
3872 tl_assert(size <= MAX_REG_WRITE_SIZE);
3873 VG_(memset)(area, V_BITS8_DEFINED, size);
3874 VG_(set_shadow_regs_area)( tid, 1/*shadowNo*/,offset,size, area );
3875 # undef MAX_REG_WRITE_SIZE
3879 void mc_post_reg_write_clientcall ( ThreadId tid,
3880 PtrdiffT offset, SizeT size, Addr f)
3882 mc_post_reg_write(/*dummy*/0, tid, offset, size);
3885 /* Look at the definedness of the guest's shadow state for
3886 [offset, offset+len). If any part of that is undefined, record
3889 static void mc_pre_reg_read ( CorePart part, ThreadId tid, Char* s,
3890 PtrdiffT offset, SizeT size)
3897 tl_assert(size <= 16);
3899 VG_(get_shadow_regs_area)( tid, area, 1/*shadowNo*/,offset,size );
3902 for (i = 0; i < size; i++) {
3903 if (area[i] != V_BITS8_DEFINED) {
3912 /* We've found some undefinedness. See if we can also find an
3914 otag = mb_get_origin_for_guest_offset( tid, offset, size );
3915 MC_(record_regparam_error) ( tid, s, otag );
3919 /*------------------------------------------------------------*/
3920 /*--- Functions called directly from generated code: ---*/
3921 /*--- Load/store handlers. ---*/
3922 /*------------------------------------------------------------*/
3924 /* Types: LOADV32, LOADV16, LOADV8 are:
3926 so they return 32-bits on 32-bit machines and 64-bits on
3927 64-bit machines. Addr has the same size as a host word.
3929 LOADV64 is always ULong fn ( Addr a )
3931 Similarly for STOREV8, STOREV16, STOREV32, the supplied vbits
3932 are a UWord, and for STOREV64 they are a ULong.
3935 /* If any part of '_a' indicated by the mask is 1, either '_a' is not
3936 naturally '_sz/8'-aligned, or it exceeds the range covered by the
3937 primary map. This is all very tricky (and important!), so let's
3938 work through the maths by hand (below), *and* assert for these
3939 values at startup. */
3940 #define MASK(_szInBytes) \
3941 ( ~((0x10000UL-(_szInBytes)) | ((N_PRIMARY_MAP-1) << 16)) )
3943 /* MASK only exists so as to define this macro. */
3944 #define UNALIGNED_OR_HIGH(_a,_szInBits) \
3945 ((_a) & MASK((_szInBits>>3)))
3947 /* On a 32-bit machine:
3949 N_PRIMARY_BITS == 16, so
3950 N_PRIMARY_MAP == 0x10000, so
3951 N_PRIMARY_MAP-1 == 0xFFFF, so
3952 (N_PRIMARY_MAP-1) << 16 == 0xFFFF0000, and so
3954 MASK(1) = ~ ( (0x10000 - 1) | 0xFFFF0000 )
3955 = ~ ( 0xFFFF | 0xFFFF0000 )
3959 MASK(2) = ~ ( (0x10000 - 2) | 0xFFFF0000 )
3960 = ~ ( 0xFFFE | 0xFFFF0000 )
3964 MASK(4) = ~ ( (0x10000 - 4) | 0xFFFF0000 )
3965 = ~ ( 0xFFFC | 0xFFFF0000 )
3969 MASK(8) = ~ ( (0x10000 - 8) | 0xFFFF0000 )
3970 = ~ ( 0xFFF8 | 0xFFFF0000 )
3974 Hence in the 32-bit case, "a & MASK(1/2/4/8)" is a nonzero value
3975 precisely when a is not 1/2/4/8-bytes aligned. And obviously, for
3976 the 1-byte alignment case, it is always a zero value, since MASK(1)
3977 is zero. All as expected.
3979 On a 64-bit machine, it's more complex, since we're testing
3980 simultaneously for misalignment and for the address being at or
3983 N_PRIMARY_BITS == 19, so
3984 N_PRIMARY_MAP == 0x80000, so
3985 N_PRIMARY_MAP-1 == 0x7FFFF, so
3986 (N_PRIMARY_MAP-1) << 16 == 0x7FFFF'0000, and so
3988 MASK(1) = ~ ( (0x10000 - 1) | 0x7FFFF'0000 )
3989 = ~ ( 0xFFFF | 0x7FFFF'0000 )
3991 = 0xFFFF'FFF8'0000'0000
3993 MASK(2) = ~ ( (0x10000 - 2) | 0x7FFFF'0000 )
3994 = ~ ( 0xFFFE | 0x7FFFF'0000 )
3996 = 0xFFFF'FFF8'0000'0001
3998 MASK(4) = ~ ( (0x10000 - 4) | 0x7FFFF'0000 )
3999 = ~ ( 0xFFFC | 0x7FFFF'0000 )
4001 = 0xFFFF'FFF8'0000'0003
4003 MASK(8) = ~ ( (0x10000 - 8) | 0x7FFFF'0000 )
4004 = ~ ( 0xFFF8 | 0x7FFFF'0000 )
4006 = 0xFFFF'FFF8'0000'0007
4010 /* ------------------------ Size = 8 ------------------------ */
4013 ULong mc_LOADV64 ( Addr a, Bool isBigEndian )
4015 PROF_EVENT(200, "mc_LOADV64");
4017 #ifndef PERF_FAST_LOADV
4018 return mc_LOADVn_slow( a, 64, isBigEndian );
4021 UWord sm_off16, vabits16;
4024 if (UNLIKELY( UNALIGNED_OR_HIGH(a,64) )) {
4025 PROF_EVENT(201, "mc_LOADV64-slow1");
4026 return (ULong)mc_LOADVn_slow( a, 64, isBigEndian );
4029 sm = get_secmap_for_reading_low(a);
4030 sm_off16 = SM_OFF_16(a);
4031 vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
4033 // Handle common case quickly: a is suitably aligned, is mapped, and
4035 // Convert V bits from compact memory form to expanded register form.
4036 if (LIKELY(vabits16 == VA_BITS16_DEFINED)) {
4037 return V_BITS64_DEFINED;
4038 } else if (LIKELY(vabits16 == VA_BITS16_UNDEFINED)) {
4039 return V_BITS64_UNDEFINED;
4041 /* Slow case: the 8 bytes are not all-defined or all-undefined. */
4042 PROF_EVENT(202, "mc_LOADV64-slow2");
4043 return mc_LOADVn_slow( a, 64, isBigEndian );
4049 VG_REGPARM(1) ULong MC_(helperc_LOADV64be) ( Addr a )
4051 return mc_LOADV64(a, True);
4053 VG_REGPARM(1) ULong MC_(helperc_LOADV64le) ( Addr a )
4055 return mc_LOADV64(a, False);
4060 void mc_STOREV64 ( Addr a, ULong vbits64, Bool isBigEndian )
4062 PROF_EVENT(210, "mc_STOREV64");
4064 #ifndef PERF_FAST_STOREV
4065 // XXX: this slow case seems to be marginally faster than the fast case!
4066 // Investigate further.
4067 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
4070 UWord sm_off16, vabits16;
4073 if (UNLIKELY( UNALIGNED_OR_HIGH(a,64) )) {
4074 PROF_EVENT(211, "mc_STOREV64-slow1");
4075 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
4079 sm = get_secmap_for_reading_low(a);
4080 sm_off16 = SM_OFF_16(a);
4081 vabits16 = ((UShort*)(sm->vabits8))[sm_off16];
4083 if (LIKELY( !is_distinguished_sm(sm) &&
4084 (VA_BITS16_DEFINED == vabits16 ||
4085 VA_BITS16_UNDEFINED == vabits16) ))
4087 /* Handle common case quickly: a is suitably aligned, */
4088 /* is mapped, and is addressible. */
4089 // Convert full V-bits in register to compact 2-bit form.
4090 if (V_BITS64_DEFINED == vbits64) {
4091 ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_DEFINED;
4092 } else if (V_BITS64_UNDEFINED == vbits64) {
4093 ((UShort*)(sm->vabits8))[sm_off16] = (UShort)VA_BITS16_UNDEFINED;
4095 /* Slow but general case -- writing partially defined bytes. */
4096 PROF_EVENT(212, "mc_STOREV64-slow2");
4097 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
4100 /* Slow but general case. */
4101 PROF_EVENT(213, "mc_STOREV64-slow3");
4102 mc_STOREVn_slow( a, 64, vbits64, isBigEndian );
4108 VG_REGPARM(1) void MC_(helperc_STOREV64be) ( Addr a, ULong vbits64 )
4110 mc_STOREV64(a, vbits64, True);
4112 VG_REGPARM(1) void MC_(helperc_STOREV64le) ( Addr a, ULong vbits64 )
4114 mc_STOREV64(a, vbits64, False);
4118 /* ------------------------ Size = 4 ------------------------ */
4121 UWord mc_LOADV32 ( Addr a, Bool isBigEndian )
4123 PROF_EVENT(220, "mc_LOADV32");
4125 #ifndef PERF_FAST_LOADV
4126 return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
4129 UWord sm_off, vabits8;
4132 if (UNLIKELY( UNALIGNED_OR_HIGH(a,32) )) {
4133 PROF_EVENT(221, "mc_LOADV32-slow1");
4134 return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
4137 sm = get_secmap_for_reading_low(a);
4139 vabits8 = sm->vabits8[sm_off];
4141 // Handle common case quickly: a is suitably aligned, is mapped, and the
4142 // entire word32 it lives in is addressible.
4143 // Convert V bits from compact memory form to expanded register form.
4144 // For 64-bit platforms, set the high 32 bits of retval to 1 (undefined).
4145 // Almost certainly not necessary, but be paranoid.
4146 if (LIKELY(vabits8 == VA_BITS8_DEFINED)) {
4147 return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_DEFINED);
4148 } else if (LIKELY(vabits8 == VA_BITS8_UNDEFINED)) {
4149 return ((UWord)0xFFFFFFFF00000000ULL | (UWord)V_BITS32_UNDEFINED);
4151 /* Slow case: the 4 bytes are not all-defined or all-undefined. */
4152 PROF_EVENT(222, "mc_LOADV32-slow2");
4153 return (UWord)mc_LOADVn_slow( a, 32, isBigEndian );
4159 VG_REGPARM(1) UWord MC_(helperc_LOADV32be) ( Addr a )
4161 return mc_LOADV32(a, True);
4163 VG_REGPARM(1) UWord MC_(helperc_LOADV32le) ( Addr a )
4165 return mc_LOADV32(a, False);
4170 void mc_STOREV32 ( Addr a, UWord vbits32, Bool isBigEndian )
4172 PROF_EVENT(230, "mc_STOREV32");
4174 #ifndef PERF_FAST_STOREV
4175 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
4178 UWord sm_off, vabits8;
4181 if (UNLIKELY( UNALIGNED_OR_HIGH(a,32) )) {
4182 PROF_EVENT(231, "mc_STOREV32-slow1");
4183 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
4187 sm = get_secmap_for_reading_low(a);
4189 vabits8 = sm->vabits8[sm_off];
4191 // Cleverness: sometimes we don't have to write the shadow memory at
4192 // all, if we can tell that what we want to write is the same as what is
4193 // already there. The 64/16/8 bit cases also have cleverness at this
4194 // point, but it works a little differently to the code below.
4195 if (V_BITS32_DEFINED == vbits32) {
4196 if (vabits8 == (UInt)VA_BITS8_DEFINED) {
4198 } else if (!is_distinguished_sm(sm) && VA_BITS8_UNDEFINED == vabits8) {
4199 sm->vabits8[sm_off] = (UInt)VA_BITS8_DEFINED;
4201 // not defined/undefined, or distinguished and changing state
4202 PROF_EVENT(232, "mc_STOREV32-slow2");
4203 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
4205 } else if (V_BITS32_UNDEFINED == vbits32) {
4206 if (vabits8 == (UInt)VA_BITS8_UNDEFINED) {
4208 } else if (!is_distinguished_sm(sm) && VA_BITS8_DEFINED == vabits8) {
4209 sm->vabits8[sm_off] = (UInt)VA_BITS8_UNDEFINED;
4211 // not defined/undefined, or distinguished and changing state
4212 PROF_EVENT(233, "mc_STOREV32-slow3");
4213 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
4216 // Partially defined word
4217 PROF_EVENT(234, "mc_STOREV32-slow4");
4218 mc_STOREVn_slow( a, 32, (ULong)vbits32, isBigEndian );
4224 VG_REGPARM(2) void MC_(helperc_STOREV32be) ( Addr a, UWord vbits32 )
4226 mc_STOREV32(a, vbits32, True);
4228 VG_REGPARM(2) void MC_(helperc_STOREV32le) ( Addr a, UWord vbits32 )
4230 mc_STOREV32(a, vbits32, False);
4234 /* ------------------------ Size = 2 ------------------------ */
4237 UWord mc_LOADV16 ( Addr a, Bool isBigEndian )
4239 PROF_EVENT(240, "mc_LOADV16");
4241 #ifndef PERF_FAST_LOADV
4242 return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
4245 UWord sm_off, vabits8;
4248 if (UNLIKELY( UNALIGNED_OR_HIGH(a,16) )) {
4249 PROF_EVENT(241, "mc_LOADV16-slow1");
4250 return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
4253 sm = get_secmap_for_reading_low(a);
4255 vabits8 = sm->vabits8[sm_off];
4256 // Handle common case quickly: a is suitably aligned, is mapped, and is
4258 // Convert V bits from compact memory form to expanded register form
4259 if (vabits8 == VA_BITS8_DEFINED ) { return V_BITS16_DEFINED; }
4260 else if (vabits8 == VA_BITS8_UNDEFINED) { return V_BITS16_UNDEFINED; }
4262 // The 4 (yes, 4) bytes are not all-defined or all-undefined, check
4263 // the two sub-bytes.
4264 UChar vabits4 = extract_vabits4_from_vabits8(a, vabits8);
4265 if (vabits4 == VA_BITS4_DEFINED ) { return V_BITS16_DEFINED; }
4266 else if (vabits4 == VA_BITS4_UNDEFINED) { return V_BITS16_UNDEFINED; }
4268 /* Slow case: the two bytes are not all-defined or all-undefined. */
4269 PROF_EVENT(242, "mc_LOADV16-slow2");
4270 return (UWord)mc_LOADVn_slow( a, 16, isBigEndian );
4277 VG_REGPARM(1) UWord MC_(helperc_LOADV16be) ( Addr a )
4279 return mc_LOADV16(a, True);
4281 VG_REGPARM(1) UWord MC_(helperc_LOADV16le) ( Addr a )
4283 return mc_LOADV16(a, False);
4288 void mc_STOREV16 ( Addr a, UWord vbits16, Bool isBigEndian )
4290 PROF_EVENT(250, "mc_STOREV16");
4292 #ifndef PERF_FAST_STOREV
4293 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
4296 UWord sm_off, vabits8;
4299 if (UNLIKELY( UNALIGNED_OR_HIGH(a,16) )) {
4300 PROF_EVENT(251, "mc_STOREV16-slow1");
4301 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
4305 sm = get_secmap_for_reading_low(a);
4307 vabits8 = sm->vabits8[sm_off];
4308 if (LIKELY( !is_distinguished_sm(sm) &&
4309 (VA_BITS8_DEFINED == vabits8 ||
4310 VA_BITS8_UNDEFINED == vabits8) ))
4312 /* Handle common case quickly: a is suitably aligned, */
4313 /* is mapped, and is addressible. */
4314 // Convert full V-bits in register to compact 2-bit form.
4315 if (V_BITS16_DEFINED == vbits16) {
4316 insert_vabits4_into_vabits8( a, VA_BITS4_DEFINED ,
4317 &(sm->vabits8[sm_off]) );
4318 } else if (V_BITS16_UNDEFINED == vbits16) {
4319 insert_vabits4_into_vabits8( a, VA_BITS4_UNDEFINED,
4320 &(sm->vabits8[sm_off]) );
4322 /* Slow but general case -- writing partially defined bytes. */
4323 PROF_EVENT(252, "mc_STOREV16-slow2");
4324 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
4327 /* Slow but general case. */
4328 PROF_EVENT(253, "mc_STOREV16-slow3");
4329 mc_STOREVn_slow( a, 16, (ULong)vbits16, isBigEndian );
4335 VG_REGPARM(2) void MC_(helperc_STOREV16be) ( Addr a, UWord vbits16 )
4337 mc_STOREV16(a, vbits16, True);
4339 VG_REGPARM(2) void MC_(helperc_STOREV16le) ( Addr a, UWord vbits16 )
4341 mc_STOREV16(a, vbits16, False);
4345 /* ------------------------ Size = 1 ------------------------ */
4346 /* Note: endianness is irrelevant for size == 1 */
4349 UWord MC_(helperc_LOADV8) ( Addr a )
4351 PROF_EVENT(260, "mc_LOADV8");
4353 #ifndef PERF_FAST_LOADV
4354 return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
4357 UWord sm_off, vabits8;
4360 if (UNLIKELY( UNALIGNED_OR_HIGH(a,8) )) {
4361 PROF_EVENT(261, "mc_LOADV8-slow1");
4362 return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
4365 sm = get_secmap_for_reading_low(a);
4367 vabits8 = sm->vabits8[sm_off];
4368 // Convert V bits from compact memory form to expanded register form
4369 // Handle common case quickly: a is mapped, and the entire
4370 // word32 it lives in is addressible.
4371 if (vabits8 == VA_BITS8_DEFINED ) { return V_BITS8_DEFINED; }
4372 else if (vabits8 == VA_BITS8_UNDEFINED) { return V_BITS8_UNDEFINED; }
4374 // The 4 (yes, 4) bytes are not all-defined or all-undefined, check
4376 UChar vabits2 = extract_vabits2_from_vabits8(a, vabits8);
4377 if (vabits2 == VA_BITS2_DEFINED ) { return V_BITS8_DEFINED; }
4378 else if (vabits2 == VA_BITS2_UNDEFINED) { return V_BITS8_UNDEFINED; }
4380 /* Slow case: the byte is not all-defined or all-undefined. */
4381 PROF_EVENT(262, "mc_LOADV8-slow2");
4382 return (UWord)mc_LOADVn_slow( a, 8, False/*irrelevant*/ );
4391 void MC_(helperc_STOREV8) ( Addr a, UWord vbits8 )
4393 PROF_EVENT(270, "mc_STOREV8");
4395 #ifndef PERF_FAST_STOREV
4396 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
4399 UWord sm_off, vabits8;
4402 if (UNLIKELY( UNALIGNED_OR_HIGH(a,8) )) {
4403 PROF_EVENT(271, "mc_STOREV8-slow1");
4404 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
4408 sm = get_secmap_for_reading_low(a);
4410 vabits8 = sm->vabits8[sm_off];
4412 ( !is_distinguished_sm(sm) &&
4413 ( (VA_BITS8_DEFINED == vabits8 || VA_BITS8_UNDEFINED == vabits8)
4414 || (VA_BITS2_NOACCESS != extract_vabits2_from_vabits8(a, vabits8))
4419 /* Handle common case quickly: a is mapped, the entire word32 it
4420 lives in is addressible. */
4421 // Convert full V-bits in register to compact 2-bit form.
4422 if (V_BITS8_DEFINED == vbits8) {
4423 insert_vabits2_into_vabits8( a, VA_BITS2_DEFINED,
4424 &(sm->vabits8[sm_off]) );
4425 } else if (V_BITS8_UNDEFINED == vbits8) {
4426 insert_vabits2_into_vabits8( a, VA_BITS2_UNDEFINED,
4427 &(sm->vabits8[sm_off]) );
4429 /* Slow but general case -- writing partially defined bytes. */
4430 PROF_EVENT(272, "mc_STOREV8-slow2");
4431 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
4434 /* Slow but general case. */
4435 PROF_EVENT(273, "mc_STOREV8-slow3");
4436 mc_STOREVn_slow( a, 8, (ULong)vbits8, False/*irrelevant*/ );
4443 /*------------------------------------------------------------*/
4444 /*--- Functions called directly from generated code: ---*/
4445 /*--- Value-check failure handlers. ---*/
4446 /*------------------------------------------------------------*/
4448 /* Call these ones when an origin is available ... */
4450 void MC_(helperc_value_check0_fail_w_o) ( UWord origin ) {
4451 MC_(record_cond_error) ( VG_(get_running_tid)(), (UInt)origin );
4455 void MC_(helperc_value_check1_fail_w_o) ( UWord origin ) {
4456 MC_(record_value_error) ( VG_(get_running_tid)(), 1, (UInt)origin );
4460 void MC_(helperc_value_check4_fail_w_o) ( UWord origin ) {
4461 MC_(record_value_error) ( VG_(get_running_tid)(), 4, (UInt)origin );
4465 void MC_(helperc_value_check8_fail_w_o) ( UWord origin ) {
4466 MC_(record_value_error) ( VG_(get_running_tid)(), 8, (UInt)origin );
4470 void MC_(helperc_value_checkN_fail_w_o) ( HWord sz, UWord origin ) {
4471 MC_(record_value_error) ( VG_(get_running_tid)(), (Int)sz, (UInt)origin );
4474 /* ... and these when an origin isn't available. */
4477 void MC_(helperc_value_check0_fail_no_o) ( void ) {
4478 MC_(record_cond_error) ( VG_(get_running_tid)(), 0/*origin*/ );
4482 void MC_(helperc_value_check1_fail_no_o) ( void ) {
4483 MC_(record_value_error) ( VG_(get_running_tid)(), 1, 0/*origin*/ );
4487 void MC_(helperc_value_check4_fail_no_o) ( void ) {
4488 MC_(record_value_error) ( VG_(get_running_tid)(), 4, 0/*origin*/ );
4492 void MC_(helperc_value_check8_fail_no_o) ( void ) {
4493 MC_(record_value_error) ( VG_(get_running_tid)(), 8, 0/*origin*/ );
4497 void MC_(helperc_value_checkN_fail_no_o) ( HWord sz ) {
4498 MC_(record_value_error) ( VG_(get_running_tid)(), (Int)sz, 0/*origin*/ );
4502 /*------------------------------------------------------------*/
4503 /*--- Metadata get/set functions, for client requests. ---*/
4504 /*------------------------------------------------------------*/
4506 // Nb: this expands the V+A bits out into register-form V bits, even though
4507 // they're in memory. This is for backward compatibility, and because it's
4508 // probably what the user wants.
4510 /* Copy Vbits from/to address 'a'. Returns: 1 == OK, 2 == alignment
4511 error [no longer used], 3 == addressing error. */
4512 /* Nb: We used to issue various definedness/addressability errors from here,
4513 but we took them out because they ranged from not-very-helpful to
4514 downright annoying, and they complicated the error data structures. */
4515 static Int mc_get_or_set_vbits_for_client (
4519 Bool setting /* True <=> set vbits, False <=> get vbits */
4526 /* Check that arrays are addressible before doing any getting/setting. */
4527 for (i = 0; i < szB; i++) {
4528 if (VA_BITS2_NOACCESS == get_vabits2(a + i) ||
4529 VA_BITS2_NOACCESS == get_vabits2(vbits + i)) {
4537 for (i = 0; i < szB; i++) {
4538 ok = set_vbits8(a + i, ((UChar*)vbits)[i]);
4543 for (i = 0; i < szB; i++) {
4544 ok = get_vbits8(a + i, &vbits8);
4546 ((UChar*)vbits)[i] = vbits8;
4548 // The bytes in vbits[] have now been set, so mark them as such.
4549 MC_(make_mem_defined)(vbits, szB);
4556 /*------------------------------------------------------------*/
4557 /*--- Detecting leaked (unreachable) malloc'd blocks. ---*/
4558 /*------------------------------------------------------------*/
4560 /* For the memory leak detector, say whether an entire 64k chunk of
4561 address space is possibly in use, or not. If in doubt return
4564 Bool MC_(is_within_valid_secondary) ( Addr a )
4566 SecMap* sm = maybe_get_secmap_for ( a );
4567 if (sm == NULL || sm == &sm_distinguished[SM_DIST_NOACCESS]
4568 || MC_(in_ignored_range)(a)) {
4569 /* Definitely not in use. */
4577 /* For the memory leak detector, say whether or not a given word
4578 address is to be regarded as valid. */
4579 Bool MC_(is_valid_aligned_word) ( Addr a )
4581 tl_assert(sizeof(UWord) == 4 || sizeof(UWord) == 8);
4582 tl_assert(VG_IS_WORD_ALIGNED(a));
4583 if (is_mem_defined( a, sizeof(UWord), NULL, NULL) == MC_Ok
4584 && !MC_(in_ignored_range)(a)) {
4592 /*------------------------------------------------------------*/
4593 /*--- Initialisation ---*/
4594 /*------------------------------------------------------------*/
4596 static void init_shadow_memory ( void )
4601 tl_assert(V_BIT_UNDEFINED == 1);
4602 tl_assert(V_BIT_DEFINED == 0);
4603 tl_assert(V_BITS8_UNDEFINED == 0xFF);
4604 tl_assert(V_BITS8_DEFINED == 0);
4606 /* Build the 3 distinguished secondaries */
4607 sm = &sm_distinguished[SM_DIST_NOACCESS];
4608 for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_NOACCESS;
4610 sm = &sm_distinguished[SM_DIST_UNDEFINED];
4611 for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_UNDEFINED;
4613 sm = &sm_distinguished[SM_DIST_DEFINED];
4614 for (i = 0; i < SM_CHUNKS; i++) sm->vabits8[i] = VA_BITS8_DEFINED;
4616 /* Set up the primary map. */
4617 /* These entries gradually get overwritten as the used address
4619 for (i = 0; i < N_PRIMARY_MAP; i++)
4620 primary_map[i] = &sm_distinguished[SM_DIST_NOACCESS];
4622 /* Auxiliary primary maps */
4623 init_auxmap_L1_L2();
4625 /* auxmap_size = auxmap_used = 0;
4626 no ... these are statically initialised */
4628 /* Secondary V bit table */
4629 secVBitTable = createSecVBitTable();
4633 /*------------------------------------------------------------*/
4634 /*--- Sanity check machinery (permanently engaged) ---*/
4635 /*------------------------------------------------------------*/
4637 static Bool mc_cheap_sanity_check ( void )
4640 PROF_EVENT(490, "cheap_sanity_check");
4641 /* Check for sane operating level */
4642 if (MC_(clo_mc_level) < 1 || MC_(clo_mc_level) > 3)
4644 /* nothing else useful we can rapidly check */
4648 static Bool mc_expensive_sanity_check ( void )
4651 Word n_secmaps_found;
4656 if (0) VG_(printf)("expensive sanity check\n");
4659 n_sanity_expensive++;
4660 PROF_EVENT(491, "expensive_sanity_check");
4662 /* Check for sane operating level */
4663 if (MC_(clo_mc_level) < 1 || MC_(clo_mc_level) > 3)
4666 /* Check that the 3 distinguished SMs are still as they should be. */
4668 /* Check noaccess DSM. */
4669 sm = &sm_distinguished[SM_DIST_NOACCESS];
4670 for (i = 0; i < SM_CHUNKS; i++)
4671 if (sm->vabits8[i] != VA_BITS8_NOACCESS)
4674 /* Check undefined DSM. */
4675 sm = &sm_distinguished[SM_DIST_UNDEFINED];
4676 for (i = 0; i < SM_CHUNKS; i++)
4677 if (sm->vabits8[i] != VA_BITS8_UNDEFINED)
4680 /* Check defined DSM. */
4681 sm = &sm_distinguished[SM_DIST_DEFINED];
4682 for (i = 0; i < SM_CHUNKS; i++)
4683 if (sm->vabits8[i] != VA_BITS8_DEFINED)
4687 VG_(printf)("memcheck expensive sanity: "
4688 "distinguished_secondaries have changed\n");
4692 /* If we're not checking for undefined value errors, the secondary V bit
4693 * table should be empty. */
4694 if (MC_(clo_mc_level) == 1) {
4695 if (0 != VG_(OSetGen_Size)(secVBitTable))
4699 /* check the auxiliary maps, very thoroughly */
4700 n_secmaps_found = 0;
4701 errmsg = check_auxmap_L1_L2_sanity( &n_secmaps_found );
4703 VG_(printf)("memcheck expensive sanity, auxmaps:\n\t%s", errmsg);
4707 /* n_secmaps_found is now the number referred to by the auxiliary
4708 primary map. Now add on the ones referred to by the main
4710 for (i = 0; i < N_PRIMARY_MAP; i++) {
4711 if (primary_map[i] == NULL) {
4714 if (!is_distinguished_sm(primary_map[i]))
4719 /* check that the number of secmaps issued matches the number that
4720 are reachable (iow, no secmap leaks) */
4721 if (n_secmaps_found != (n_issued_SMs - n_deissued_SMs))
4725 VG_(printf)("memcheck expensive sanity: "
4726 "apparent secmap leakage\n");
4731 VG_(printf)("memcheck expensive sanity: "
4732 "auxmap covers wrong address space\n");
4736 /* there is only one pointer to each secmap (expensive) */
4741 /*------------------------------------------------------------*/
4742 /*--- Command line args ---*/
4743 /*------------------------------------------------------------*/
4745 Bool MC_(clo_partial_loads_ok) = False;
4746 Long MC_(clo_freelist_vol) = 10*1000*1000LL;
4747 LeakCheckMode MC_(clo_leak_check) = LC_Summary;
4748 VgRes MC_(clo_leak_resolution) = Vg_HighRes;
4749 Bool MC_(clo_show_reachable) = False;
4750 Bool MC_(clo_show_possibly_lost) = True;
4751 Bool MC_(clo_workaround_gcc296_bugs) = False;
4752 Int MC_(clo_malloc_fill) = -1;
4753 Int MC_(clo_free_fill) = -1;
4754 Int MC_(clo_mc_level) = 2;
4756 static Bool mc_process_cmd_line_options(Char* arg)
4760 tl_assert( MC_(clo_mc_level) >= 1 && MC_(clo_mc_level) <= 3 );
4762 /* Set MC_(clo_mc_level):
4763 1 = A bit tracking only
4764 2 = A and V bit tracking, but no V bit origins
4765 3 = A and V bit tracking, and V bit origins
4767 Do this by inspecting --undef-value-errors= and
4768 --track-origins=. Reject the case --undef-value-errors=no
4769 --track-origins=yes as meaningless.
4771 if (0 == VG_(strcmp)(arg, "--undef-value-errors=no")) {
4772 if (MC_(clo_mc_level) == 3) {
4775 MC_(clo_mc_level) = 1;
4779 if (0 == VG_(strcmp)(arg, "--undef-value-errors=yes")) {
4780 if (MC_(clo_mc_level) == 1)
4781 MC_(clo_mc_level) = 2;
4784 if (0 == VG_(strcmp)(arg, "--track-origins=no")) {
4785 if (MC_(clo_mc_level) == 3)
4786 MC_(clo_mc_level) = 2;
4789 if (0 == VG_(strcmp)(arg, "--track-origins=yes")) {
4790 if (MC_(clo_mc_level) == 1) {
4793 MC_(clo_mc_level) = 3;
4798 if VG_BOOL_CLO(arg, "--partial-loads-ok", MC_(clo_partial_loads_ok)) {}
4799 else if VG_BOOL_CLO(arg, "--show-reachable", MC_(clo_show_reachable)) {}
4800 else if VG_BOOL_CLO(arg, "--show-possibly-lost",
4801 MC_(clo_show_possibly_lost)) {}
4802 else if VG_BOOL_CLO(arg, "--workaround-gcc296-bugs",
4803 MC_(clo_workaround_gcc296_bugs)) {}
4805 else if VG_BINT_CLO(arg, "--freelist-vol", MC_(clo_freelist_vol),
4806 0, 10*1000*1000*1000LL) {}
4808 else if VG_XACT_CLO(arg, "--leak-check=no",
4809 MC_(clo_leak_check), LC_Off) {}
4810 else if VG_XACT_CLO(arg, "--leak-check=summary",
4811 MC_(clo_leak_check), LC_Summary) {}
4812 else if VG_XACT_CLO(arg, "--leak-check=yes",
4813 MC_(clo_leak_check), LC_Full) {}
4814 else if VG_XACT_CLO(arg, "--leak-check=full",
4815 MC_(clo_leak_check), LC_Full) {}
4817 else if VG_XACT_CLO(arg, "--leak-resolution=low",
4818 MC_(clo_leak_resolution), Vg_LowRes) {}
4819 else if VG_XACT_CLO(arg, "--leak-resolution=med",
4820 MC_(clo_leak_resolution), Vg_MedRes) {}
4821 else if VG_XACT_CLO(arg, "--leak-resolution=high",
4822 MC_(clo_leak_resolution), Vg_HighRes) {}
4824 else if VG_STR_CLO(arg, "--ignore-ranges", tmp_str) {
4826 Bool ok = parse_ignore_ranges(tmp_str);
4829 tl_assert(ignoreRanges.used >= 0);
4830 tl_assert(ignoreRanges.used < M_IGNORE_RANGES);
4831 for (i = 0; i < ignoreRanges.used; i++) {
4832 Addr s = ignoreRanges.start[i];
4833 Addr e = ignoreRanges.end[i];
4834 Addr limit = 0x4000000; /* 64M - entirely arbitrary limit */
4836 VG_(message)(Vg_DebugMsg,
4837 "ERROR: --ignore-ranges: end <= start in range:\n");
4838 VG_(message)(Vg_DebugMsg,
4839 " 0x%lx-0x%lx\n", s, e);
4842 if (e - s > limit) {
4843 VG_(message)(Vg_DebugMsg,
4844 "ERROR: --ignore-ranges: suspiciously large range:\n");
4845 VG_(message)(Vg_DebugMsg,
4846 " 0x%lx-0x%lx (size %ld)\n", s, e, (UWord)(e-s));
4852 else if VG_BHEX_CLO(arg, "--malloc-fill", MC_(clo_malloc_fill), 0x00,0xFF) {}
4853 else if VG_BHEX_CLO(arg, "--free-fill", MC_(clo_free_fill), 0x00,0xFF) {}
4856 return VG_(replacement_malloc_process_cmd_line_option)(arg);
4862 VG_(fmsg_bad_option)(arg,
4863 "--track-origins=yes has no effect when --undef-value-errors=no.\n");
4866 static void mc_print_usage(void)
4869 " --leak-check=no|summary|full search for memory leaks at exit? [summary]\n"
4870 " --leak-resolution=low|med|high differentiation of leak stack traces [high]\n"
4871 " --show-reachable=no|yes show reachable blocks in leak check? [no]\n"
4872 " --show-possibly-lost=no|yes show possibly lost blocks in leak check?\n"
4874 " --undef-value-errors=no|yes check for undefined value errors [yes]\n"
4875 " --track-origins=no|yes show origins of undefined values? [no]\n"
4876 " --partial-loads-ok=no|yes too hard to explain here; see manual [no]\n"
4877 " --freelist-vol=<number> volume of freed blocks queue [10000000]\n"
4878 " --workaround-gcc296-bugs=no|yes self explanatory [no]\n"
4879 " --ignore-ranges=0xPP-0xQQ[,0xRR-0xSS] assume given addresses are OK\n"
4880 " --malloc-fill=<hexnumber> fill malloc'd areas with given value\n"
4881 " --free-fill=<hexnumber> fill free'd areas with given value\n"
4885 static void mc_print_debug_usage(void)
4893 /*------------------------------------------------------------*/
4894 /*--- Client blocks ---*/
4895 /*------------------------------------------------------------*/
4897 /* Client block management:
4899 This is managed as an expanding array of client block descriptors.
4900 Indices of live descriptors are issued to the client, so it can ask
4901 to free them later. Therefore we cannot slide live entries down
4902 over dead ones. Instead we must use free/inuse flags and scan for
4903 an empty slot at allocation time. This in turn means allocation is
4904 relatively expensive, so we hope this does not happen too often.
4906 An unused block has start == size == 0
4909 /* type CGenBlock is defined in mc_include.h */
4911 /* This subsystem is self-initialising. */
4912 static UWord cgb_size = 0;
4913 static UWord cgb_used = 0;
4914 static CGenBlock* cgbs = NULL;
4916 /* Stats for this subsystem. */
4917 static ULong cgb_used_MAX = 0; /* Max in use. */
4918 static ULong cgb_allocs = 0; /* Number of allocs. */
4919 static ULong cgb_discards = 0; /* Number of discards. */
4920 static ULong cgb_search = 0; /* Number of searches. */
4923 /* Get access to the client block array. */
4924 void MC_(get_ClientBlock_array)( /*OUT*/CGenBlock** blocks,
4925 /*OUT*/UWord* nBlocks )
4928 *nBlocks = cgb_used;
4933 Int alloc_client_block ( void )
4936 CGenBlock* cgbs_new;
4940 for (i = 0; i < cgb_used; i++) {
4942 if (cgbs[i].start == 0 && cgbs[i].size == 0)
4946 /* Not found. Try to allocate one at the end. */
4947 if (cgb_used < cgb_size) {
4952 /* Ok, we have to allocate a new one. */
4953 tl_assert(cgb_used == cgb_size);
4954 sz_new = (cgbs == NULL) ? 10 : (2 * cgb_size);
4956 cgbs_new = VG_(malloc)( "mc.acb.1", sz_new * sizeof(CGenBlock) );
4957 for (i = 0; i < cgb_used; i++)
4958 cgbs_new[i] = cgbs[i];
4966 if (cgb_used > cgb_used_MAX)
4967 cgb_used_MAX = cgb_used;
4972 static void show_client_block_stats ( void )
4974 VG_(message)(Vg_DebugMsg,
4975 "general CBs: %llu allocs, %llu discards, %llu maxinuse, %llu search\n",
4976 cgb_allocs, cgb_discards, cgb_used_MAX, cgb_search
4981 /*------------------------------------------------------------*/
4982 /*--- Client requests ---*/
4983 /*------------------------------------------------------------*/
4985 static Bool mc_handle_client_request ( ThreadId tid, UWord* arg, UWord* ret )
4991 if (!VG_IS_TOOL_USERREQ('M','C',arg[0])
4992 && VG_USERREQ__MALLOCLIKE_BLOCK != arg[0]
4993 && VG_USERREQ__FREELIKE_BLOCK != arg[0]
4994 && VG_USERREQ__CREATE_MEMPOOL != arg[0]
4995 && VG_USERREQ__DESTROY_MEMPOOL != arg[0]
4996 && VG_USERREQ__MEMPOOL_ALLOC != arg[0]
4997 && VG_USERREQ__MEMPOOL_FREE != arg[0]
4998 && VG_USERREQ__MEMPOOL_TRIM != arg[0]
4999 && VG_USERREQ__MOVE_MEMPOOL != arg[0]
5000 && VG_USERREQ__MEMPOOL_CHANGE != arg[0]
5001 && VG_USERREQ__MEMPOOL_EXISTS != arg[0])
5005 case VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE:
5006 ok = is_mem_addressable ( arg[1], arg[2], &bad_addr );
5008 MC_(record_user_error) ( tid, bad_addr, /*isAddrErr*/True, 0 );
5009 *ret = ok ? (UWord)NULL : bad_addr;
5012 case VG_USERREQ__CHECK_MEM_IS_DEFINED: {
5015 res = is_mem_defined ( arg[1], arg[2], &bad_addr, &otag );
5016 if (MC_AddrErr == res)
5017 MC_(record_user_error) ( tid, bad_addr, /*isAddrErr*/True, 0 );
5018 else if (MC_ValueErr == res)
5019 MC_(record_user_error) ( tid, bad_addr, /*isAddrErr*/False, otag );
5020 *ret = ( res==MC_Ok ? (UWord)NULL : bad_addr );
5024 case VG_USERREQ__DO_LEAK_CHECK:
5025 MC_(detect_memory_leaks)(tid, arg[1] ? LC_Summary : LC_Full);
5026 *ret = 0; /* return value is meaningless */
5029 case VG_USERREQ__MAKE_MEM_NOACCESS:
5030 MC_(make_mem_noaccess) ( arg[1], arg[2] );
5034 case VG_USERREQ__MAKE_MEM_UNDEFINED:
5035 make_mem_undefined_w_tid_and_okind ( arg[1], arg[2], tid,
5040 case VG_USERREQ__MAKE_MEM_DEFINED:
5041 MC_(make_mem_defined) ( arg[1], arg[2] );
5045 case VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE:
5046 make_mem_defined_if_addressable ( arg[1], arg[2] );
5050 case VG_USERREQ__CREATE_BLOCK: /* describe a block */
5051 if (arg[1] != 0 && arg[2] != 0) {
5052 i = alloc_client_block();
5053 /* VG_(printf)("allocated %d %p\n", i, cgbs); */
5054 cgbs[i].start = arg[1];
5055 cgbs[i].size = arg[2];
5056 cgbs[i].desc = VG_(strdup)("mc.mhcr.1", (Char *)arg[3]);
5057 cgbs[i].where = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
5063 case VG_USERREQ__DISCARD: /* discard */
5065 || arg[2] >= cgb_used ||
5066 (cgbs[arg[2]].start == 0 && cgbs[arg[2]].size == 0)) {
5069 tl_assert(arg[2] >= 0 && arg[2] < cgb_used);
5070 cgbs[arg[2]].start = cgbs[arg[2]].size = 0;
5071 VG_(free)(cgbs[arg[2]].desc);
5077 case VG_USERREQ__GET_VBITS:
5078 *ret = mc_get_or_set_vbits_for_client
5079 ( arg[1], arg[2], arg[3], False /* get them */ );
5082 case VG_USERREQ__SET_VBITS:
5083 *ret = mc_get_or_set_vbits_for_client
5084 ( arg[1], arg[2], arg[3], True /* set them */ );
5087 case VG_USERREQ__COUNT_LEAKS: { /* count leaked bytes */
5088 UWord** argp = (UWord**)arg;
5089 // MC_(bytes_leaked) et al were set by the last leak check (or zero
5090 // if no prior leak checks performed).
5091 *argp[1] = MC_(bytes_leaked) + MC_(bytes_indirect);
5092 *argp[2] = MC_(bytes_dubious);
5093 *argp[3] = MC_(bytes_reachable);
5094 *argp[4] = MC_(bytes_suppressed);
5095 // there is no argp[5]
5096 //*argp[5] = MC_(bytes_indirect);
5097 // XXX need to make *argp[1-4] defined; currently done in the
5098 // VALGRIND_COUNT_LEAKS_MACRO by initialising them to zero.
5102 case VG_USERREQ__COUNT_LEAK_BLOCKS: { /* count leaked blocks */
5103 UWord** argp = (UWord**)arg;
5104 // MC_(blocks_leaked) et al were set by the last leak check (or zero
5105 // if no prior leak checks performed).
5106 *argp[1] = MC_(blocks_leaked) + MC_(blocks_indirect);
5107 *argp[2] = MC_(blocks_dubious);
5108 *argp[3] = MC_(blocks_reachable);
5109 *argp[4] = MC_(blocks_suppressed);
5110 // there is no argp[5]
5111 //*argp[5] = MC_(blocks_indirect);
5112 // XXX need to make *argp[1-4] defined; currently done in the
5113 // VALGRIND_COUNT_LEAK_BLOCKS_MACRO by initialising them to zero.
5117 case VG_USERREQ__MALLOCLIKE_BLOCK: {
5118 Addr p = (Addr)arg[1];
5119 SizeT sizeB = arg[2];
5120 //UInt rzB = arg[3]; XXX: unused!
5121 Bool is_zeroed = (Bool)arg[4];
5123 MC_(new_block) ( tid, p, sizeB, /*ignored*/0, is_zeroed,
5124 MC_AllocCustom, MC_(malloc_list) );
5127 case VG_USERREQ__FREELIKE_BLOCK: {
5128 Addr p = (Addr)arg[1];
5131 MC_(handle_free) ( tid, p, rzB, MC_AllocCustom );
5135 case _VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR: {
5136 Char* s = (Char*)arg[1];
5137 Addr dst = (Addr) arg[2];
5138 Addr src = (Addr) arg[3];
5139 SizeT len = (SizeT)arg[4];
5140 MC_(record_overlap_error)(tid, s, src, dst, len);
5144 case VG_USERREQ__CREATE_MEMPOOL: {
5145 Addr pool = (Addr)arg[1];
5147 Bool is_zeroed = (Bool)arg[3];
5149 MC_(create_mempool) ( pool, rzB, is_zeroed );
5153 case VG_USERREQ__DESTROY_MEMPOOL: {
5154 Addr pool = (Addr)arg[1];
5156 MC_(destroy_mempool) ( pool );
5160 case VG_USERREQ__MEMPOOL_ALLOC: {
5161 Addr pool = (Addr)arg[1];
5162 Addr addr = (Addr)arg[2];
5165 MC_(mempool_alloc) ( tid, pool, addr, size );
5169 case VG_USERREQ__MEMPOOL_FREE: {
5170 Addr pool = (Addr)arg[1];
5171 Addr addr = (Addr)arg[2];
5173 MC_(mempool_free) ( pool, addr );
5177 case VG_USERREQ__MEMPOOL_TRIM: {
5178 Addr pool = (Addr)arg[1];
5179 Addr addr = (Addr)arg[2];
5182 MC_(mempool_trim) ( pool, addr, size );
5186 case VG_USERREQ__MOVE_MEMPOOL: {
5187 Addr poolA = (Addr)arg[1];
5188 Addr poolB = (Addr)arg[2];
5190 MC_(move_mempool) ( poolA, poolB );
5194 case VG_USERREQ__MEMPOOL_CHANGE: {
5195 Addr pool = (Addr)arg[1];
5196 Addr addrA = (Addr)arg[2];
5197 Addr addrB = (Addr)arg[3];
5200 MC_(mempool_change) ( pool, addrA, addrB, size );
5204 case VG_USERREQ__MEMPOOL_EXISTS: {
5205 Addr pool = (Addr)arg[1];
5207 *ret = (UWord) MC_(mempool_exists) ( pool );
5215 "Warning: unknown memcheck client request code %llx\n",
5224 /*------------------------------------------------------------*/
5225 /*--- Crude profiling machinery. ---*/
5226 /*------------------------------------------------------------*/
5228 // We track a number of interesting events (using PROF_EVENT)
5229 // if MC_PROFILE_MEMORY is defined.
5231 #ifdef MC_PROFILE_MEMORY
5233 UInt MC_(event_ctr)[N_PROF_EVENTS];
5234 HChar* MC_(event_ctr_name)[N_PROF_EVENTS];
5236 static void init_prof_mem ( void )
5239 for (i = 0; i < N_PROF_EVENTS; i++) {
5240 MC_(event_ctr)[i] = 0;
5241 MC_(event_ctr_name)[i] = NULL;
5245 static void done_prof_mem ( void )
5248 Bool spaced = False;
5249 for (i = 0; i < N_PROF_EVENTS; i++) {
5250 if (!spaced && (i % 10) == 0) {
5254 if (MC_(event_ctr)[i] > 0) {
5256 VG_(printf)( "prof mem event %3d: %9d %s\n",
5257 i, MC_(event_ctr)[i],
5258 MC_(event_ctr_name)[i]
5259 ? MC_(event_ctr_name)[i] : "unnamed");
5266 static void init_prof_mem ( void ) { }
5267 static void done_prof_mem ( void ) { }
5272 /*------------------------------------------------------------*/
5273 /*--- Origin tracking stuff ---*/
5274 /*------------------------------------------------------------*/
5276 /*--------------------------------------------*/
5277 /*--- Origin tracking: load handlers ---*/
5278 /*--------------------------------------------*/
5280 static INLINE UInt merge_origins ( UInt or1, UInt or2 ) {
5281 return or1 > or2 ? or1 : or2;
5284 UWord VG_REGPARM(1) MC_(helperc_b_load1)( Addr a ) {
5287 UWord lineoff = oc_line_offset(a);
5288 UWord byteoff = a & 3; /* 0, 1, 2 or 3 */
5290 if (OC_ENABLE_ASSERTIONS) {
5291 tl_assert(lineoff >= 0 && lineoff < OC_W32S_PER_LINE);
5294 line = find_OCacheLine( a );
5296 descr = line->descr[lineoff];
5297 if (OC_ENABLE_ASSERTIONS) {
5298 tl_assert(descr < 0x10);
5301 if (LIKELY(0 == (descr & (1 << byteoff)))) {
5304 return line->w32[lineoff];
5308 UWord VG_REGPARM(1) MC_(helperc_b_load2)( Addr a ) {
5311 UWord lineoff, byteoff;
5313 if (UNLIKELY(a & 1)) {
5314 /* Handle misaligned case, slowly. */
5315 UInt oLo = (UInt)MC_(helperc_b_load1)( a + 0 );
5316 UInt oHi = (UInt)MC_(helperc_b_load1)( a + 1 );
5317 return merge_origins(oLo, oHi);
5320 lineoff = oc_line_offset(a);
5321 byteoff = a & 3; /* 0 or 2 */
5323 if (OC_ENABLE_ASSERTIONS) {
5324 tl_assert(lineoff >= 0 && lineoff < OC_W32S_PER_LINE);
5326 line = find_OCacheLine( a );
5328 descr = line->descr[lineoff];
5329 if (OC_ENABLE_ASSERTIONS) {
5330 tl_assert(descr < 0x10);
5333 if (LIKELY(0 == (descr & (3 << byteoff)))) {
5336 return line->w32[lineoff];
5340 UWord VG_REGPARM(1) MC_(helperc_b_load4)( Addr a ) {
5345 if (UNLIKELY(a & 3)) {
5346 /* Handle misaligned case, slowly. */
5347 UInt oLo = (UInt)MC_(helperc_b_load2)( a + 0 );
5348 UInt oHi = (UInt)MC_(helperc_b_load2)( a + 2 );
5349 return merge_origins(oLo, oHi);
5352 lineoff = oc_line_offset(a);
5353 if (OC_ENABLE_ASSERTIONS) {
5354 tl_assert(lineoff >= 0 && lineoff < OC_W32S_PER_LINE);
5357 line = find_OCacheLine( a );
5359 descr = line->descr[lineoff];
5360 if (OC_ENABLE_ASSERTIONS) {
5361 tl_assert(descr < 0x10);
5364 if (LIKELY(0 == descr)) {
5367 return line->w32[lineoff];
5371 UWord VG_REGPARM(1) MC_(helperc_b_load8)( Addr a ) {
5373 UChar descrLo, descrHi, descr;
5376 if (UNLIKELY(a & 7)) {
5377 /* Handle misaligned case, slowly. */
5378 UInt oLo = (UInt)MC_(helperc_b_load4)( a + 0 );
5379 UInt oHi = (UInt)MC_(helperc_b_load4)( a + 4 );
5380 return merge_origins(oLo, oHi);
5383 lineoff = oc_line_offset(a);
5384 if (OC_ENABLE_ASSERTIONS) {
5385 tl_assert(lineoff == (lineoff & 6)); /*0,2,4,6*//*since 8-aligned*/
5388 line = find_OCacheLine( a );
5390 descrLo = line->descr[lineoff + 0];
5391 descrHi = line->descr[lineoff + 1];
5392 descr = descrLo | descrHi;
5393 if (OC_ENABLE_ASSERTIONS) {
5394 tl_assert(descr < 0x10);
5397 if (LIKELY(0 == descr)) {
5398 return 0; /* both 32-bit chunks are defined */
5400 UInt oLo = descrLo == 0 ? 0 : line->w32[lineoff + 0];
5401 UInt oHi = descrHi == 0 ? 0 : line->w32[lineoff + 1];
5402 return merge_origins(oLo, oHi);
5406 UWord VG_REGPARM(1) MC_(helperc_b_load16)( Addr a ) {
5407 UInt oLo = (UInt)MC_(helperc_b_load8)( a + 0 );
5408 UInt oHi = (UInt)MC_(helperc_b_load8)( a + 8 );
5409 UInt oBoth = merge_origins(oLo, oHi);
5410 return (UWord)oBoth;
5414 /*--------------------------------------------*/
5415 /*--- Origin tracking: store handlers ---*/
5416 /*--------------------------------------------*/
5418 void VG_REGPARM(2) MC_(helperc_b_store1)( Addr a, UWord d32 ) {
5420 UWord lineoff = oc_line_offset(a);
5421 UWord byteoff = a & 3; /* 0, 1, 2 or 3 */
5423 if (OC_ENABLE_ASSERTIONS) {
5424 tl_assert(lineoff >= 0 && lineoff < OC_W32S_PER_LINE);
5427 line = find_OCacheLine( a );
5430 line->descr[lineoff] &= ~(1 << byteoff);
5432 line->descr[lineoff] |= (1 << byteoff);
5433 line->w32[lineoff] = d32;
5437 void VG_REGPARM(2) MC_(helperc_b_store2)( Addr a, UWord d32 ) {
5439 UWord lineoff, byteoff;
5441 if (UNLIKELY(a & 1)) {
5442 /* Handle misaligned case, slowly. */
5443 MC_(helperc_b_store1)( a + 0, d32 );
5444 MC_(helperc_b_store1)( a + 1, d32 );
5448 lineoff = oc_line_offset(a);
5449 byteoff = a & 3; /* 0 or 2 */
5451 if (OC_ENABLE_ASSERTIONS) {
5452 tl_assert(lineoff >= 0 && lineoff < OC_W32S_PER_LINE);
5455 line = find_OCacheLine( a );
5458 line->descr[lineoff] &= ~(3 << byteoff);
5460 line->descr[lineoff] |= (3 << byteoff);
5461 line->w32[lineoff] = d32;
5465 void VG_REGPARM(2) MC_(helperc_b_store4)( Addr a, UWord d32 ) {
5469 if (UNLIKELY(a & 3)) {
5470 /* Handle misaligned case, slowly. */
5471 MC_(helperc_b_store2)( a + 0, d32 );
5472 MC_(helperc_b_store2)( a + 2, d32 );
5476 lineoff = oc_line_offset(a);
5477 if (OC_ENABLE_ASSERTIONS) {
5478 tl_assert(lineoff >= 0 && lineoff < OC_W32S_PER_LINE);
5481 line = find_OCacheLine( a );
5484 line->descr[lineoff] = 0;
5486 line->descr[lineoff] = 0xF;
5487 line->w32[lineoff] = d32;
5491 void VG_REGPARM(2) MC_(helperc_b_store8)( Addr a, UWord d32 ) {
5495 if (UNLIKELY(a & 7)) {
5496 /* Handle misaligned case, slowly. */
5497 MC_(helperc_b_store4)( a + 0, d32 );
5498 MC_(helperc_b_store4)( a + 4, d32 );
5502 lineoff = oc_line_offset(a);
5503 if (OC_ENABLE_ASSERTIONS) {
5504 tl_assert(lineoff == (lineoff & 6)); /*0,2,4,6*//*since 8-aligned*/
5507 line = find_OCacheLine( a );
5510 line->descr[lineoff + 0] = 0;
5511 line->descr[lineoff + 1] = 0;
5513 line->descr[lineoff + 0] = 0xF;
5514 line->descr[lineoff + 1] = 0xF;
5515 line->w32[lineoff + 0] = d32;
5516 line->w32[lineoff + 1] = d32;
5520 void VG_REGPARM(2) MC_(helperc_b_store16)( Addr a, UWord d32 ) {
5521 MC_(helperc_b_store8)( a + 0, d32 );
5522 MC_(helperc_b_store8)( a + 8, d32 );
5526 /*--------------------------------------------*/
5527 /*--- Origin tracking: sarp handlers ---*/
5528 /*--------------------------------------------*/
5530 __attribute__((noinline))
5531 static void ocache_sarp_Set_Origins ( Addr a, UWord len, UInt otag ) {
5532 if ((a & 1) && len >= 1) {
5533 MC_(helperc_b_store1)( a, otag );
5537 if ((a & 2) && len >= 2) {
5538 MC_(helperc_b_store2)( a, otag );
5543 tl_assert(0 == (a & 3));
5545 MC_(helperc_b_store4)( a, otag );
5550 MC_(helperc_b_store2)( a, otag );
5555 MC_(helperc_b_store1)( a, otag );
5559 tl_assert(len == 0);
5562 __attribute__((noinline))
5563 static void ocache_sarp_Clear_Origins ( Addr a, UWord len ) {
5564 if ((a & 1) && len >= 1) {
5565 MC_(helperc_b_store1)( a, 0 );
5569 if ((a & 2) && len >= 2) {
5570 MC_(helperc_b_store2)( a, 0 );
5575 tl_assert(0 == (a & 3));
5577 MC_(helperc_b_store4)( a, 0 );
5582 MC_(helperc_b_store2)( a, 0 );
5587 MC_(helperc_b_store1)( a, 0 );
5591 tl_assert(len == 0);
5595 /*------------------------------------------------------------*/
5596 /*--- Setup and finalisation ---*/
5597 /*------------------------------------------------------------*/
5599 static void mc_post_clo_init ( void )
5601 /* If we've been asked to emit XML, mash around various other
5602 options so as to constrain the output somewhat. */
5604 /* Extract as much info as possible from the leak checker. */
5605 /* MC_(clo_show_reachable) = True; */
5606 MC_(clo_leak_check) = LC_Full;
5609 tl_assert( MC_(clo_mc_level) >= 1 && MC_(clo_mc_level) <= 3 );
5611 if (MC_(clo_mc_level) == 3) {
5612 /* We're doing origin tracking. */
5613 # ifdef PERF_FAST_STACK
5614 VG_(track_new_mem_stack_4_w_ECU) ( mc_new_mem_stack_4_w_ECU );
5615 VG_(track_new_mem_stack_8_w_ECU) ( mc_new_mem_stack_8_w_ECU );
5616 VG_(track_new_mem_stack_12_w_ECU) ( mc_new_mem_stack_12_w_ECU );
5617 VG_(track_new_mem_stack_16_w_ECU) ( mc_new_mem_stack_16_w_ECU );
5618 VG_(track_new_mem_stack_32_w_ECU) ( mc_new_mem_stack_32_w_ECU );
5619 VG_(track_new_mem_stack_112_w_ECU) ( mc_new_mem_stack_112_w_ECU );
5620 VG_(track_new_mem_stack_128_w_ECU) ( mc_new_mem_stack_128_w_ECU );
5621 VG_(track_new_mem_stack_144_w_ECU) ( mc_new_mem_stack_144_w_ECU );
5622 VG_(track_new_mem_stack_160_w_ECU) ( mc_new_mem_stack_160_w_ECU );
5624 VG_(track_new_mem_stack_w_ECU) ( mc_new_mem_stack_w_ECU );
5626 /* Not doing origin tracking */
5627 # ifdef PERF_FAST_STACK
5628 VG_(track_new_mem_stack_4) ( mc_new_mem_stack_4 );
5629 VG_(track_new_mem_stack_8) ( mc_new_mem_stack_8 );
5630 VG_(track_new_mem_stack_12) ( mc_new_mem_stack_12 );
5631 VG_(track_new_mem_stack_16) ( mc_new_mem_stack_16 );
5632 VG_(track_new_mem_stack_32) ( mc_new_mem_stack_32 );
5633 VG_(track_new_mem_stack_112) ( mc_new_mem_stack_112 );
5634 VG_(track_new_mem_stack_128) ( mc_new_mem_stack_128 );
5635 VG_(track_new_mem_stack_144) ( mc_new_mem_stack_144 );
5636 VG_(track_new_mem_stack_160) ( mc_new_mem_stack_160 );
5638 VG_(track_new_mem_stack) ( mc_new_mem_stack );
5641 /* This origin tracking cache is huge (~100M), so only initialise
5643 if (MC_(clo_mc_level) >= 3) {
5645 tl_assert(ocacheL1 != NULL);
5646 tl_assert(ocacheL2 != NULL);
5648 tl_assert(ocacheL1 == NULL);
5649 tl_assert(ocacheL2 == NULL);
5653 static void print_SM_info(char* type, int n_SMs)
5655 VG_(message)(Vg_DebugMsg,
5656 " memcheck: SMs: %s = %d (%ldk, %ldM)\n",
5659 n_SMs * sizeof(SecMap) / 1024UL,
5660 n_SMs * sizeof(SecMap) / (1024 * 1024UL) );
5663 static void mc_fini ( Int exitcode )
5665 MC_(print_malloc_stats)();
5667 if (MC_(clo_leak_check) != LC_Off) {
5668 MC_(detect_memory_leaks)(1/*bogus ThreadId*/, MC_(clo_leak_check));
5670 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
5672 "For a detailed leak analysis, rerun with: --leak-check=full\n"
5678 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
5679 VG_(message)(Vg_UserMsg,
5680 "For counts of detected and suppressed errors, rerun with: -v\n");
5683 if (MC_(any_value_errors) && !VG_(clo_xml) && VG_(clo_verbosity) >= 1
5684 && MC_(clo_mc_level) == 2) {
5685 VG_(message)(Vg_UserMsg,
5686 "Use --track-origins=yes to see where "
5687 "uninitialised values come from\n");
5692 if (VG_(clo_stats)) {
5693 SizeT max_secVBit_szB, max_SMs_szB, max_shmem_szB;
5695 VG_(message)(Vg_DebugMsg,
5696 " memcheck: sanity checks: %d cheap, %d expensive\n",
5697 n_sanity_cheap, n_sanity_expensive );
5698 VG_(message)(Vg_DebugMsg,
5699 " memcheck: auxmaps: %lld auxmap entries (%lldk, %lldM) in use\n",
5701 n_auxmap_L2_nodes * 64,
5702 n_auxmap_L2_nodes / 16 );
5703 VG_(message)(Vg_DebugMsg,
5704 " memcheck: auxmaps_L1: %lld searches, %lld cmps, ratio %lld:10\n",
5705 n_auxmap_L1_searches, n_auxmap_L1_cmps,
5706 (10ULL * n_auxmap_L1_cmps)
5707 / (n_auxmap_L1_searches ? n_auxmap_L1_searches : 1)
5709 VG_(message)(Vg_DebugMsg,
5710 " memcheck: auxmaps_L2: %lld searches, %lld nodes\n",
5711 n_auxmap_L2_searches, n_auxmap_L2_nodes
5714 print_SM_info("n_issued ", n_issued_SMs);
5715 print_SM_info("n_deissued ", n_deissued_SMs);
5716 print_SM_info("max_noaccess ", max_noaccess_SMs);
5717 print_SM_info("max_undefined", max_undefined_SMs);
5718 print_SM_info("max_defined ", max_defined_SMs);
5719 print_SM_info("max_non_DSM ", max_non_DSM_SMs);
5721 // Three DSMs, plus the non-DSM ones
5722 max_SMs_szB = (3 + max_non_DSM_SMs) * sizeof(SecMap);
5723 // The 3*sizeof(Word) bytes is the AVL node metadata size.
5724 // The 4*sizeof(Word) bytes is the malloc metadata size.
5725 // Hardwiring these sizes in sucks, but I don't see how else to do it.
5726 max_secVBit_szB = max_secVBit_nodes *
5727 (sizeof(SecVBitNode) + 3*sizeof(Word) + 4*sizeof(Word));
5728 max_shmem_szB = sizeof(primary_map) + max_SMs_szB + max_secVBit_szB;
5730 VG_(message)(Vg_DebugMsg,
5731 " memcheck: max sec V bit nodes: %d (%ldk, %ldM)\n",
5732 max_secVBit_nodes, max_secVBit_szB / 1024,
5733 max_secVBit_szB / (1024 * 1024));
5734 VG_(message)(Vg_DebugMsg,
5735 " memcheck: set_sec_vbits8 calls: %llu (new: %llu, updates: %llu)\n",
5736 sec_vbits_new_nodes + sec_vbits_updates,
5737 sec_vbits_new_nodes, sec_vbits_updates );
5738 VG_(message)(Vg_DebugMsg,
5739 " memcheck: max shadow mem size: %ldk, %ldM\n",
5740 max_shmem_szB / 1024, max_shmem_szB / (1024 * 1024));
5742 if (MC_(clo_mc_level) >= 3) {
5743 VG_(message)(Vg_DebugMsg,
5744 " ocacheL1: %'12lu refs %'12lu misses (%'lu lossage)\n",
5745 stats_ocacheL1_find,
5746 stats_ocacheL1_misses,
5747 stats_ocacheL1_lossage );
5748 VG_(message)(Vg_DebugMsg,
5749 " ocacheL1: %'12lu at 0 %'12lu at 1\n",
5750 stats_ocacheL1_find - stats_ocacheL1_misses
5751 - stats_ocacheL1_found_at_1
5752 - stats_ocacheL1_found_at_N,
5753 stats_ocacheL1_found_at_1 );
5754 VG_(message)(Vg_DebugMsg,
5755 " ocacheL1: %'12lu at 2+ %'12lu move-fwds\n",
5756 stats_ocacheL1_found_at_N,
5757 stats_ocacheL1_movefwds );
5758 VG_(message)(Vg_DebugMsg,
5759 " ocacheL1: %'12lu sizeB %'12u useful\n",
5760 (UWord)sizeof(OCache),
5761 4 * OC_W32S_PER_LINE * OC_LINES_PER_SET * OC_N_SETS );
5762 VG_(message)(Vg_DebugMsg,
5763 " ocacheL2: %'12lu refs %'12lu misses\n",
5764 stats__ocacheL2_refs,
5765 stats__ocacheL2_misses );
5766 VG_(message)(Vg_DebugMsg,
5767 " ocacheL2: %'9lu max nodes %'9lu curr nodes\n",
5768 stats__ocacheL2_n_nodes_max,
5769 stats__ocacheL2_n_nodes );
5770 VG_(message)(Vg_DebugMsg,
5771 " niacache: %'12lu refs %'12lu misses\n",
5772 stats__nia_cache_queries, stats__nia_cache_misses);
5774 tl_assert(ocacheL1 == NULL);
5775 tl_assert(ocacheL2 == NULL);
5780 VG_(message)(Vg_DebugMsg,
5781 "------ Valgrind's client block stats follow ---------------\n" );
5782 show_client_block_stats();
5786 static void mc_pre_clo_init(void)
5788 VG_(details_name) ("Memcheck");
5789 VG_(details_version) (NULL);
5790 VG_(details_description) ("a memory error detector");
5791 VG_(details_copyright_author)(
5792 "Copyright (C) 2002-2010, and GNU GPL'd, by Julian Seward et al.");
5793 VG_(details_bug_reports_to) (VG_BUGS_TO);
5794 VG_(details_avg_translation_sizeB) ( 556 );
5796 VG_(basic_tool_funcs) (mc_post_clo_init,
5800 VG_(needs_final_IR_tidy_pass) ( MC_(final_tidy) );
5803 VG_(needs_core_errors) ();
5804 VG_(needs_tool_errors) (MC_(eq_Error),
5805 MC_(before_pp_Error),
5807 True,/*show TIDs for errors*/
5808 MC_(update_Error_extra),
5809 MC_(is_recognised_suppression),
5810 MC_(read_extra_suppression_info),
5811 MC_(error_matches_suppression),
5812 MC_(get_error_name),
5813 MC_(get_extra_suppression_info));
5814 VG_(needs_libc_freeres) ();
5815 VG_(needs_command_line_options)(mc_process_cmd_line_options,
5817 mc_print_debug_usage);
5818 VG_(needs_client_requests) (mc_handle_client_request);
5819 VG_(needs_sanity_checks) (mc_cheap_sanity_check,
5820 mc_expensive_sanity_check);
5821 VG_(needs_malloc_replacement) (MC_(malloc),
5823 MC_(__builtin_vec_new),
5827 MC_(__builtin_delete),
5828 MC_(__builtin_vec_delete),
5830 MC_(malloc_usable_size),
5831 MC_MALLOC_REDZONE_SZB );
5833 VG_(needs_xml_output) ();
5835 VG_(track_new_mem_startup) ( mc_new_mem_startup );
5836 VG_(track_new_mem_stack_signal)( make_mem_undefined_w_tid );
5837 // We assume that brk()/sbrk() does not initialise new memory. Is this
5838 // accurate? John Reiser says:
5840 // 0) sbrk() can *decrease* process address space. No zero fill is done
5841 // for a decrease, not even the fragment on the high end of the last page
5842 // that is beyond the new highest address. For maximum safety and
5843 // portability, then the bytes in the last page that reside above [the
5844 // new] sbrk(0) should be considered to be uninitialized, but in practice
5845 // it is exceedingly likely that they will retain their previous
5848 // 1) If an increase is large enough to require new whole pages, then
5849 // those new whole pages (like all new pages) are zero-filled by the
5850 // operating system. So if sbrk(0) already is page aligned, then
5851 // sbrk(PAGE_SIZE) *does* zero-fill the new memory.
5853 // 2) Any increase that lies within an existing allocated page is not
5854 // changed. So if (x = sbrk(0)) is not page aligned, then
5855 // sbrk(PAGE_SIZE) yields ((PAGE_SIZE -1) & -x) bytes which keep their
5856 // existing contents, and an additional PAGE_SIZE bytes which are zeroed.
5857 // ((PAGE_SIZE -1) & x) of them are "covered" by the sbrk(), and the rest
5858 // of them come along for the ride because the operating system deals
5859 // only in whole pages. Again, for maximum safety and portability, then
5860 // anything that lives above [the new] sbrk(0) should be considered
5861 // uninitialized, but in practice will retain previous contents [zero in
5866 // A key property of sbrk/brk is that new whole pages that are supplied
5867 // by the operating system *do* get initialized to zero.
5869 // As for the portability of all this:
5871 // sbrk and brk are not POSIX. However, any system that is a derivative
5872 // of *nix has sbrk and brk because there are too many softwares (such as
5873 // the Bourne shell) which rely on the traditional memory map (.text,
5874 // .data+.bss, stack) and the existence of sbrk/brk.
5876 // So we should arguably observe all this. However:
5877 // - The current inaccuracy has caused maybe one complaint in seven years(?)
5878 // - Relying on the zeroed-ness of whole brk'd pages is pretty grotty... I
5879 // doubt most programmers know the above information.
5880 // So I'm not terribly unhappy with marking it as undefined. --njn.
5882 // [More: I think most of what John said only applies to sbrk(). It seems
5883 // that brk() always deals in whole pages. And since this event deals
5884 // directly with brk(), not with sbrk(), perhaps it would be reasonable to
5885 // just mark all memory it allocates as defined.]
5887 VG_(track_new_mem_brk) ( make_mem_undefined_w_tid );
5889 // Handling of mmap and mprotect isn't simple (well, it is simple,
5890 // but the justification isn't.) See comments above, just prior to
5892 VG_(track_new_mem_mmap) ( mc_new_mem_mmap );
5893 VG_(track_change_mem_mprotect) ( mc_new_mem_mprotect );
5895 VG_(track_copy_mem_remap) ( MC_(copy_address_range_state) );
5897 VG_(track_die_mem_stack_signal)( MC_(make_mem_noaccess) );
5898 VG_(track_die_mem_brk) ( MC_(make_mem_noaccess) );
5899 VG_(track_die_mem_munmap) ( MC_(make_mem_noaccess) );
5901 /* Defer the specification of the new_mem_stack functions to the
5902 post_clo_init function, since we need to first parse the command
5903 line before deciding which set to use. */
5905 # ifdef PERF_FAST_STACK
5906 VG_(track_die_mem_stack_4) ( mc_die_mem_stack_4 );
5907 VG_(track_die_mem_stack_8) ( mc_die_mem_stack_8 );
5908 VG_(track_die_mem_stack_12) ( mc_die_mem_stack_12 );
5909 VG_(track_die_mem_stack_16) ( mc_die_mem_stack_16 );
5910 VG_(track_die_mem_stack_32) ( mc_die_mem_stack_32 );
5911 VG_(track_die_mem_stack_112) ( mc_die_mem_stack_112 );
5912 VG_(track_die_mem_stack_128) ( mc_die_mem_stack_128 );
5913 VG_(track_die_mem_stack_144) ( mc_die_mem_stack_144 );
5914 VG_(track_die_mem_stack_160) ( mc_die_mem_stack_160 );
5916 VG_(track_die_mem_stack) ( mc_die_mem_stack );
5918 VG_(track_ban_mem_stack) ( MC_(make_mem_noaccess) );
5920 VG_(track_pre_mem_read) ( check_mem_is_defined );
5921 VG_(track_pre_mem_read_asciiz) ( check_mem_is_defined_asciiz );
5922 VG_(track_pre_mem_write) ( check_mem_is_addressable );
5923 VG_(track_post_mem_write) ( mc_post_mem_write );
5925 if (MC_(clo_mc_level) >= 2)
5926 VG_(track_pre_reg_read) ( mc_pre_reg_read );
5928 VG_(track_post_reg_write) ( mc_post_reg_write );
5929 VG_(track_post_reg_write_clientcall_return)( mc_post_reg_write_clientcall );
5931 init_shadow_memory();
5932 MC_(malloc_list) = VG_(HT_construct)( "MC_(malloc_list)" );
5933 MC_(mempool_list) = VG_(HT_construct)( "MC_(mempool_list)" );
5936 tl_assert( mc_expensive_sanity_check() );
5938 // {LOADV,STOREV}[8421] will all fail horribly if this isn't true.
5939 tl_assert(sizeof(UWord) == sizeof(Addr));
5940 // Call me paranoid. I don't care.
5941 tl_assert(sizeof(void*) == sizeof(Addr));
5943 // BYTES_PER_SEC_VBIT_NODE must be a power of two.
5944 tl_assert(-1 != VG_(log2)(BYTES_PER_SEC_VBIT_NODE));
5946 /* This is small. Always initialise it. */
5947 init_nia_to_ecu_cache();
5949 /* We can't initialise ocacheL1/ocacheL2 yet, since we don't know
5950 if we need to, since the command line args haven't been
5951 processed yet. Hence defer it to mc_post_clo_init. */
5952 tl_assert(ocacheL1 == NULL);
5953 tl_assert(ocacheL2 == NULL);
5955 /* Check some important stuff. See extensive comments above
5956 re UNALIGNED_OR_HIGH for background. */
5957 # if VG_WORDSIZE == 4
5958 tl_assert(sizeof(void*) == 4);
5959 tl_assert(sizeof(Addr) == 4);
5960 tl_assert(sizeof(UWord) == 4);
5961 tl_assert(sizeof(Word) == 4);
5962 tl_assert(MAX_PRIMARY_ADDRESS == 0xFFFFFFFFUL);
5963 tl_assert(MASK(1) == 0UL);
5964 tl_assert(MASK(2) == 1UL);
5965 tl_assert(MASK(4) == 3UL);
5966 tl_assert(MASK(8) == 7UL);
5968 tl_assert(VG_WORDSIZE == 8);
5969 tl_assert(sizeof(void*) == 8);
5970 tl_assert(sizeof(Addr) == 8);
5971 tl_assert(sizeof(UWord) == 8);
5972 tl_assert(sizeof(Word) == 8);
5973 tl_assert(MAX_PRIMARY_ADDRESS == 0x7FFFFFFFFULL);
5974 tl_assert(MASK(1) == 0xFFFFFFF800000000ULL);
5975 tl_assert(MASK(2) == 0xFFFFFFF800000001ULL);
5976 tl_assert(MASK(4) == 0xFFFFFFF800000003ULL);
5977 tl_assert(MASK(8) == 0xFFFFFFF800000007ULL);
5981 VG_DETERMINE_INTERFACE_VERSION(mc_pre_clo_init)
5983 /*--------------------------------------------------------------------*/
5984 /*--- end mc_main.c ---*/
5985 /*--------------------------------------------------------------------*/