3 * Dynamic memory manager
8 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
11 * Redistribution and use in source and binary forms, with or without modification,
12 * are permitted provided that the following conditions are met:
14 * 1. Redistributions of source code must retain the above copyright notice,
15 * this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright notice,
17 * this list of conditions and the following disclaimer in the documentation
18 * and/or other materials provided with the distribution.
19 * 3. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
25 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
26 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
27 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
30 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
33 * This file is part of the lwIP TCP/IP stack.
35 * Author: Adam Dunkels <adam@sics.se>
42 #if !MEM_LIBC_MALLOC /* don't build if not configured for use in lwipopts.h */
47 #include "lwip/stats.h"
52 /* lwIP head implemented with different sized pools */
55 * This structure is used to save the pool one element came from.
63 * Allocate memory: determine the smallest pool that is big enough
64 * to contain an element of 'size' and get an element from that pool.
66 * @param size the size in bytes of the memory needed
67 * @return a pointer to the allocated memory or NULL if the pool is empty
70 mem_malloc(mem_size_t size)
72 struct mem_helper *element;
75 for (poolnr = MEMP_POOL_START; poolnr <= MEMP_POOL_END; poolnr++) {
76 if ((size + sizeof(struct mem_helper)) <= memp_sizes[poolnr]) {
80 if (poolnr == MEMP_MAX) {
81 LWIP_ASSERT("mem_malloc(): no pool is that big!", 0);
84 element = (struct mem_helper*)memp_malloc(poolnr);
85 if (element == NULL) {
86 /* No need to DEBUGF or ASSERT: This error is already
87 taken care of in memp.c */
88 /** @todo: we could try a bigger pool if this one is empty! */
92 element->poolnr = poolnr;
99 * Free memory previously allocated by mem_malloc. Loads the pool number
100 * and calls memp_free with that pool number to put the element back into
103 * @param rmem the memory element to free
108 struct mem_helper *hmem = (struct mem_helper*)rmem;
110 LWIP_ASSERT("rmem != NULL", (rmem != NULL));
111 LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem)));
115 LWIP_ASSERT("hmem != NULL", (hmem != NULL));
116 LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem)));
117 LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX));
119 memp_free(hmem->poolnr, hmem);
122 #else /* MEM_USE_POOLS */
123 /* lwIP replacement for your libc malloc() */
125 /* This does not have to be aligned since for getting its size,
126 * we only use the macro SIZEOF_STRUCT_MEM, which automatically alignes.
134 /* All allocated blocks will be MIN_SIZE bytes big, at least!
135 * MIN_SIZE can be overridden to suit your needs. Smaller values save space,
136 * larger values could prevent too small blocks to fragment the RAM too much. */
139 #endif /* MIN_SIZE */
140 #define MIN_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MIN_SIZE)
141 #define SIZEOF_STRUCT_MEM LWIP_MEM_ALIGN_SIZE(sizeof(struct mem))
142 #define MEM_SIZE_ALIGNED LWIP_MEM_ALIGN_SIZE(MEM_SIZE)
144 static struct mem *ram_end;
145 /* the heap. we need one struct mem at the end and some room for alignment */
146 static u8_t ram_heap[MEM_SIZE_ALIGNED + (2*SIZEOF_STRUCT_MEM) + MEM_ALIGNMENT];
147 static u8_t *ram; /* for alignment, ram is now a pointer instead of an array */
148 static struct mem *lfree; /* pointer to the lowest free block */
149 static sys_sem_t mem_sem; /* concurrent access protection */
152 * "Plug holes" by combining adjacent empty struct mems.
153 * After this function is through, there should not exist
154 * one empty struct mem pointing to another empty struct mem.
156 * @param mem this points to a struct mem which just has been freed
157 * @internal this function is only called by mem_free() and mem_realloc()
159 * This assumes access to the heap is protected by the calling function
163 plug_holes(struct mem *mem)
168 LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram);
169 LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end);
170 LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0);
172 /* plug hole forward */
173 LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED);
175 nmem = (struct mem *)&ram[mem->next];
176 if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) {
177 /* if mem->next is unused and not end of ram, combine mem and mem->next */
181 mem->next = nmem->next;
182 ((struct mem *)&ram[nmem->next])->prev = (u8_t *)mem - ram;
185 /* plug hole backward */
186 pmem = (struct mem *)&ram[mem->prev];
187 if (pmem != mem && pmem->used == 0) {
188 /* if mem->prev is unused, combine mem and mem->prev */
192 pmem->next = mem->next;
193 ((struct mem *)&ram[mem->next])->prev = (u8_t *)pmem - ram;
198 * Zero the heap and initialize start, end and lowest-free
205 LWIP_ASSERT("Sanity check alignment",
206 (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT-1)) == 0);
209 memset(ram_heap, 0, sizeof(ram_heap));
210 ram = LWIP_MEM_ALIGN(ram_heap);
211 /* initialize the start of the heap */
212 mem = (struct mem *)ram;
213 mem->next = MEM_SIZE_ALIGNED;
216 /* initialize the end of the heap */
217 ram_end = (struct mem *)&ram[MEM_SIZE_ALIGNED];
219 ram_end->next = MEM_SIZE_ALIGNED;
220 ram_end->prev = MEM_SIZE_ALIGNED;
222 mem_sem = sys_sem_new(1);
224 /* initialize the lowest-free pointer to the start of the heap */
225 lfree = (struct mem *)ram;
228 lwip_stats.mem.avail = MEM_SIZE_ALIGNED;
229 #endif /* MEM_STATS */
232 /* Put a struct mem back on the heap
233 * @param rmem is the data portion of a struct mem as returned by a previous
234 * call to mem_malloc()
242 LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | 2, ("mem_free(p == NULL) was called.\n"));
245 LWIP_ASSERT("mem_free: sanity check alignment", (((mem_ptr_t)rmem) & (MEM_ALIGNMENT-1)) == 0);
247 /* protect the heap from concurrent access */
248 sys_arch_sem_wait(mem_sem, 0);
250 LWIP_ASSERT("mem_free: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
251 (u8_t *)rmem < (u8_t *)ram_end);
253 if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
254 LWIP_DEBUGF(MEM_DEBUG | 3, ("mem_free: illegal memory\n"));
256 ++lwip_stats.mem.err;
257 #endif /* MEM_STATS */
258 sys_sem_signal(mem_sem);
261 /* Get the corresponding struct mem ... */
262 mem = (struct mem *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
263 /* ... which has to be in a used state ... */
264 LWIP_ASSERT("mem_free: mem->used", mem->used);
265 /* ... and is now unused. */
269 /* the newly freed struct is now the lowest */
274 lwip_stats.mem.used -= mem->next - ((u8_t *)mem - ram);
275 #endif /* MEM_STATS */
277 /* finally, see if prev or next are free also */
279 sys_sem_signal(mem_sem);
282 /* In contrast to its name, mem_realloc can only shrink memory, not expand it.
283 * Since the only use (for now) is in pbuf_realloc (which also can only shrink),
284 * this shouldn't be a problem!
287 mem_realloc(void *rmem, mem_size_t newsize)
290 mem_size_t ptr, ptr2;
291 struct mem *mem, *mem2;
293 /* Expand the size of the allocated memory region so that we can
294 adjust for alignment. */
295 newsize = LWIP_MEM_ALIGN_SIZE(newsize);
297 if(newsize < MIN_SIZE_ALIGNED) {
298 /* every data block must be at least MIN_SIZE_ALIGNED long */
299 newsize = MIN_SIZE_ALIGNED;
302 if (newsize > MEM_SIZE_ALIGNED) {
306 LWIP_ASSERT("mem_realloc: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
307 (u8_t *)rmem < (u8_t *)ram_end);
309 if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
310 LWIP_DEBUGF(MEM_DEBUG | 3, ("mem_realloc: illegal memory\n"));
313 /* Get the corresponding struct mem ... */
314 mem = (struct mem *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
315 /* ... and its offset pointer */
316 ptr = (u8_t *)mem - ram;
318 size = mem->next - ptr - SIZEOF_STRUCT_MEM;
319 LWIP_ASSERT("mem_realloc can only shrink memory", newsize <= size);
320 if (newsize > size) {
324 if (newsize == size) {
325 /* No change in size, simply return */
329 /* protect the heap from concurrent access */
330 sys_arch_sem_wait(mem_sem, 0);
333 lwip_stats.mem.used -= (size - newsize);
334 #endif /* MEM_STATS */
336 mem2 = (struct mem *)&ram[mem->next];
337 if(mem2->used == 0) {
338 /* The next struct is unused, we can simply move it at little */
340 /* remember the old next pointer */
342 /* create new struct mem which is moved directly after the shrinked mem */
343 ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
345 lfree = (struct mem *)&ram[ptr2];
347 mem2 = (struct mem *)&ram[ptr2];
349 /* restore the next pointer */
351 /* link it back to mem */
355 /* last thing to restore linked list: as we have moved mem2,
356 * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not
357 * the end of the heap */
358 if (mem2->next != MEM_SIZE_ALIGNED) {
359 ((struct mem *)&ram[mem2->next])->prev = ptr2;
361 /* no need to plug holes, we've already done that */
362 } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) {
363 /* Next struct is used but there's room for another struct mem with
364 * at least MIN_SIZE_ALIGNED of data.
365 * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem
366 * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED').
367 * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
368 * region that couldn't hold data, but when mem->next gets freed,
369 * the 2 regions would be combined, resulting in more free memory */
370 ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
371 mem2 = (struct mem *)&ram[ptr2];
376 mem2->next = mem->next;
379 if (mem2->next != MEM_SIZE_ALIGNED) {
380 ((struct mem *)&ram[mem2->next])->prev = ptr2;
382 /* the original mem->next is used, so no need to plug holes! */
385 next struct mem is used but size between mem and mem2 is not big enough
386 to create another struct mem
387 -> don't do anyhting.
388 -> the remaining space stays unused since it is too small
390 sys_sem_signal(mem_sem);
395 * Adam's mem_malloc() plus solution for bug #17922
397 * Allocate a block of memory with a minimum of 'size' bytes.
398 * @param size is the minimum size of the requested block in bytes.
400 * Note that the returned value will always be aligned.
403 mem_malloc(mem_size_t size)
405 mem_size_t ptr, ptr2;
406 struct mem *mem, *mem2;
412 /* Expand the size of the allocated memory region so that we can
413 adjust for alignment. */
414 size = LWIP_MEM_ALIGN_SIZE(size);
416 if(size < MIN_SIZE_ALIGNED) {
417 /* every data block must be at least MIN_SIZE_ALIGNED long */
418 size = MIN_SIZE_ALIGNED;
421 if (size > MEM_SIZE_ALIGNED) {
425 /* protect the heap from concurrent access */
426 sys_arch_sem_wait(mem_sem, 0);
428 /* Scan through the heap searching for a free block that is big enough,
429 * beginning with the lowest free block.
431 for (ptr = (u8_t *)lfree - ram; ptr < MEM_SIZE_ALIGNED - size;
432 ptr = ((struct mem *)&ram[ptr])->next) {
433 mem = (struct mem *)&ram[ptr];
436 (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) {
437 /* mem is not used and at least perfect fit is possible:
438 * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
440 if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) {
441 /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing
442 * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
443 * -> split large block, create empty remainder,
444 * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
445 * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
446 * struct mem would fit in but no data between mem2 and mem2->next
447 * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
448 * region that couldn't hold data, but when mem->next gets freed,
449 * the 2 regions would be combined, resulting in more free memory
451 ptr2 = ptr + SIZEOF_STRUCT_MEM + size;
452 /* create mem2 struct */
453 mem2 = (struct mem *)&ram[ptr2];
455 mem2->next = mem->next;
457 /* and insert it between mem and mem->next */
461 if (mem2->next != MEM_SIZE_ALIGNED) {
462 ((struct mem *)&ram[mem2->next])->prev = ptr2;
465 lwip_stats.mem.used += (size + SIZEOF_STRUCT_MEM);
466 if (lwip_stats.mem.max < lwip_stats.mem.used) {
467 lwip_stats.mem.max = lwip_stats.mem.used;
469 #endif /* MEM_STATS */
471 /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
472 * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
473 * take care of this).
474 * -> near fit or excact fit: do not split, no mem2 creation
475 * also can't move mem->next directly behind mem, since mem->next
476 * will always be used at this point!
480 lwip_stats.mem.used += mem->next - ((u8_t *)mem - ram);
481 if (lwip_stats.mem.max < lwip_stats.mem.used) {
482 lwip_stats.mem.max = lwip_stats.mem.used;
484 #endif /* MEM_STATS */
488 /* Find next free block after mem and update lowest free pointer */
489 while (lfree->used && lfree != ram_end) {
490 lfree = (struct mem *)&ram[lfree->next];
492 LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used)));
494 sys_sem_signal(mem_sem);
495 LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.",
496 (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end);
497 LWIP_ASSERT("mem_malloc: allocated memory properly aligned.",
498 (unsigned long)((u8_t *)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0);
499 LWIP_ASSERT("mem_malloc: sanity check alignment",
500 (((mem_ptr_t)mem) & (MEM_ALIGNMENT-1)) == 0);
502 return (u8_t *)mem + SIZEOF_STRUCT_MEM;
505 LWIP_DEBUGF(MEM_DEBUG | 2, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size));
507 ++lwip_stats.mem.err;
508 #endif /* MEM_STATS */
509 sys_sem_signal(mem_sem);
513 #endif /* MEM_USE_POOLS */
515 void *mem_calloc(size_t count, size_t size)
519 p = mem_malloc(count * size);
521 memset(p, 0, count * size);
526 #endif /* !MEM_LIBC_MALLOC */