1 #define _GNU_SOURCE /* See feature_test_macros(7) */
17 #define SYS_memguard 793
21 * 0-31 - Sum of memory events
23 * 61 - Memory budget overrun
26 #define MGRET_MEM_POS 0
27 #define MGRET_TIM_POS 32
28 #define MGRET_OVER_MEM_POS 61
29 #define MGRET_OVER_TIM_POS 62
30 #define MGRET_ERROR_POS 63
32 #define MGRET_TIM_MASK (0x00FFFFFFul << MGRET_TIM_POS)
33 #define MGRET_MEM_MASK (0xFFFFFFFFul << MGRET_MEM_POS)
34 #define MGRET_OVER_MEM_MASK (1ul << MGRET_OVER_MEM_POS)
35 #define MGRET_OVER_TIM_MASK (1ul << MGRET_OVER_TIM_POS)
36 #define MGRET_ERROR_MASK (1ul << MGRET_ERROR_POS)
40 char memory[128*10204*1024];
42 static pthread_barrier_t barrier;
51 static struct mg_ret mgret(uint64_t retval)
53 struct mg_ret mgr = {};
54 mgr.time = (retval & MGRET_TIM_MASK) >> MGRET_TIM_POS;
55 mgr.mem = (retval & MGRET_MEM_MASK) >> MGRET_MEM_POS;
56 mgr.time_ovf = (retval >> MGRET_OVER_TIM_POS) & 1;
57 mgr.mem_ovf = (retval >> MGRET_OVER_MEM_POS) & 1;
61 void compute_kernel(int time_us)
64 uint64_t current_us, end_us;
65 clock_gettime(CLOCK_MONOTONIC, &ts);
66 end_us = ts.tv_sec * 1000000 + ts.tv_nsec / 1000 + time_us;
68 clock_gettime(CLOCK_MONOTONIC, &ts);
69 current_us = ts.tv_sec * 1000000 + ts.tv_nsec / 1000;
70 } while (current_us < end_us);
73 void read_memory(long lines)
77 for (uint64_t i = 0; i < lines; i++) {
78 sum += memory[(i * 64) % sizeof(memory)];
80 /* Do not optimize this function out */
81 volatile uint64_t x = sum;
85 void write_memory(long lines)
88 for (uint64_t i = 0; i < lines; i++) {
89 ptr = (void*)&memory[(i * 64) % sizeof(memory)];
94 void read_memory_rnd(long lines)
99 for (long i = 0; i < lines; i++) {
100 rnd = (rnd + 523) * 253573;
101 sum += memory[rnd % sizeof(memory)];
103 /* Do not optimize this function out */
104 volatile uint64_t x = sum;
109 #define MGF_PERIODIC \
110 (1 << 0) /* Chooses between periodic or one-shot budget replenishment */
111 #define MGF_MASK_INT \
113 << 1) /* Mask (disable) low priority interrupts until next memguard call */
115 long memguard(unsigned long timeout, unsigned long memory_budget,
118 return syscall(SYS_memguard, timeout, memory_budget, flags);
121 void wvtest_pass(bool cond, const char* file, int line, const char* str)
123 printf("! %s:%d %s %s\n", file, line, str, cond ? "ok" : "FAILURE");
126 #define WVPASS(cond) wvtest_pass(cond, __FILE__, __LINE__, #cond)
128 static void print_test_info(uint64_t timeout_us, uint64_t mem_budget, uint64_t flags,
129 int64_t retval, const char *code)
132 snprintf(call, sizeof(call), "memguard(%luus, %lumiss, %c%c):",
133 timeout_us, mem_budget,
134 flags & MGF_PERIODIC ? 'P' : '-',
135 flags & MGF_MASK_INT ? 'I' : '-'
137 struct mg_ret r = mgret(retval);
138 printf("Testing \"CPU%d: %-40s %-25s ⇒ time:%8u%c mem:%8u%c\" in %s:\n",
139 sched_getcpu(), call, code,
140 r.time, r.time_ovf ? '!' : ' ',
141 r.mem, r.mem_ovf ? '!' : ' ',
143 WVPASS((retval & MGRET_ERROR_MASK) == 0);
146 #define MGTEST(timeout_us, mem_budget, flags, code) \
149 retval = memguard(timeout_us, mem_budget, flags); \
151 retval = memguard(0, 0, 0); \
152 print_test_info(timeout_us, mem_budget, flags, retval, #code); \
156 void *test_thread(void *ptr)
159 int cpu = (intptr_t)ptr;
161 /* Ensure that our test thread does not migrate to another CPU
162 * during memguarding */
165 if (sched_setaffinity(0, sizeof(set), &set) < 0)
166 err(1, "sched_setaffinity");
168 pthread_barrier_wait(&barrier);
172 for (uint64_t flags = 0; flags < 4; flags++) {
173 compute_kernel(1); /* warm up */
175 r = MGTEST(500, 10000, flags, compute_kernel(17*1000*1000));
178 ///////////////////////////////////////////////////////
179 r = MGTEST(5000, 10000, flags, compute_kernel(1000));
181 WVPASS(r.time > 900);
183 r = MGTEST(500, 10000, flags, compute_kernel(1000));
185 WVPASS(r.time > 900);
187 r = MGTEST(5000, 10000, flags, compute_kernel(2000));
189 WVPASS(r.time > 1900);
191 r = MGTEST(500, 10000, flags, compute_kernel(2000));
193 WVPASS(r.time > 1900);
195 ///////////////////////////////////////////////////////
196 r = MGTEST(100000, 500000, flags, read_memory(100000));
198 WVPASS(r.mem >= 90000);
200 r = MGTEST(100000, 50000, flags, read_memory(100000));
202 WVPASS(r.mem >= 90000);
204 r = MGTEST(100000, 500000, flags, read_memory_rnd(100000));
206 WVPASS(r.mem >= 90000);
208 r = MGTEST(100000, 50000, flags, read_memory_rnd(100000));
210 WVPASS(r.mem >= 90000);
212 r = MGTEST(100000, 500000, flags, write_memory(100000));
214 WVPASS(r.mem >= 90000);
216 r = MGTEST(100000, 50000, flags, write_memory(100000));
218 WVPASS(r.mem >= 90000);
221 ///////////////////////////////////////////////////////
222 r = MGTEST(100000, 5000000, flags, read_memory(1000000));
224 WVPASS(r.mem >= 900000);
226 r = MGTEST(100000, 500000, flags, read_memory(1000000));
228 WVPASS(r.mem >= 900000);
230 r = MGTEST(100000, 5000000, flags, read_memory_rnd(1000000));
232 WVPASS(r.mem >= 900000);
234 r = MGTEST(100000, 500000, flags, read_memory_rnd(1000000));
236 WVPASS(r.mem >= 900000);
238 r = MGTEST(100000, 5000000, flags, write_memory(1000000));
240 WVPASS(r.mem >= 900000);
242 r = MGTEST(100000, 500000, flags, write_memory(1000000));
244 WVPASS(r.mem >= 900000);
249 /* Throttling tests */
250 struct mg_ret r1, r2, r3;
251 r1 = MGTEST(10000, 9000, MGF_PERIODIC | MGF_MASK_INT, read_memory(100000));
252 WVPASS(r1.time > 100*1000);
253 r2 = MGTEST(10000, 3000, MGF_PERIODIC | MGF_MASK_INT, read_memory(100000));
254 WVPASS(r2.time > 2 * r1.time);
255 r3 = MGTEST(10000, 1000, MGF_PERIODIC | MGF_MASK_INT, read_memory(100000));
256 WVPASS(r3.time > 2 * r2.time);
261 int main(int argc, char *argv[])
265 pthread_t threads[MAX_CORES];
267 /* TODO: currently shared memory */
268 for (int i = 0; i < sizeof(memory); i += 64)
272 cpu_mask = strtol(argv[1], NULL, 16);
274 for (int i = 0; i < MAX_CORES; i++) {
275 if (cpu_mask & (1 << i)) {
280 printf("CPU count:%d CPU mask:%#x\n", cpu_count, cpu_mask);
281 int s = pthread_barrier_init(&barrier, NULL, cpu_count);
283 error(1, s, "pthread_barrier_init");
285 for (intptr_t i = 0; i < MAX_CORES; i++) {
286 if (cpu_mask & (1 << i))
287 pthread_create(&threads[i], NULL, test_thread, (void *)i);
290 for (int i = 0; i < MAX_CORES; i++) {
291 if (cpu_mask & (1 << i)) {
292 pthread_join(threads[i], NULL);