2 // Simplified version of mempool.c, that is more oriented towards
3 // checking that the description of invalid addresses is correct.
7 #include "tests/sys_mman.h"
11 #include "../memcheck.h"
13 #define SUPERBLOCK_SIZE 100000
14 #define REDZONE_SIZE 8
16 typedef struct _level_list
18 struct _level_list *next;
20 // Padding ensures the struct is the same size on 32-bit and 64-bit
22 char padding[16 - 2*sizeof(char*)];
25 typedef struct _pool {
30 // Padding ensures the struct is the same size on 32-bit and 64-bit
32 char padding[24 - 3*sizeof(char*)];
35 pool *make_pool( int use_mmap )
40 p = (pool *)mmap(0, sizeof(pool), PROT_READ|PROT_WRITE|PROT_EXEC,
41 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
42 p->where = p->mem = (char *)mmap(NULL, SUPERBLOCK_SIZE,
43 PROT_READ|PROT_WRITE|PROT_EXEC,
44 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
46 p = (pool *)malloc(sizeof(pool));
47 p->where = p->mem = (char *)malloc(SUPERBLOCK_SIZE);
50 p->size = p->left = SUPERBLOCK_SIZE;
52 VALGRIND_MAKE_MEM_NOACCESS(p->where, SUPERBLOCK_SIZE);
56 void push(pool *p, int use_mmap )
61 l = (level_list *)mmap(0, sizeof(level_list),
62 PROT_READ|PROT_WRITE|PROT_EXEC,
63 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
65 l = (level_list *)malloc(sizeof(level_list));
69 VALGRIND_CREATE_MEMPOOL(l->where, REDZONE_SIZE, 0);
73 void pop(pool *p, int use_mmap)
75 level_list *l = p->levels;
77 VALGRIND_DESTROY_MEMPOOL(l->where);
78 VALGRIND_MAKE_MEM_NOACCESS(l->where, p->where-l->where);
81 munmap(l, sizeof(level_list));
86 void destroy_pool(pool *p, int use_mmap)
88 level_list *l = p->levels;
94 munmap(p->mem, SUPERBLOCK_SIZE);
95 munmap(p, sizeof(pool));
102 char *allocate(pool *p, int size)
105 p->left -= size + (REDZONE_SIZE*2);
106 where = p->where + REDZONE_SIZE;
107 p->where += size + (REDZONE_SIZE*2);
108 VALGRIND_MEMPOOL_ALLOC(p->levels->where, where, size);
112 //-------------------------------------------------------------------------
114 //-------------------------------------------------------------------------
121 // p1 is a malloc-backed pool
122 pool *p1 = make_pool(0);
124 // p2 is a mmap-backed pool
125 pool *p2 = make_pool(1);
130 x1 = allocate(p1, 10);
131 x2 = allocate(p2, 20);
134 "\n------ out of range reads in malloc-backed pool ------\n\n");
139 "\n------ out of range reads in mmap-backed pool ------\n\n");
140 res += x2[-1]; // invalid
141 res += x2[20]; // invalid
144 "\n------ read free in malloc-backed pool ------\n\n");
145 VALGRIND_MEMPOOL_FREE(p1, x1);
149 "\n------ read free in mmap-backed pool ------\n\n");
150 VALGRIND_MEMPOOL_FREE(p2, x2);
154 "\n------ double free in malloc-backed pool ------\n\n");
155 VALGRIND_MEMPOOL_FREE(p1, x1);
158 "\n------ double free in mmap-backed pool ------\n\n");
159 VALGRIND_MEMPOOL_FREE(p2, x2);
161 // claim res is used, so gcc can't nuke this all
162 __asm__ __volatile__("" : : "r"(res));
165 "\n------ done ------\n\n");