1 // base_init() puts those Mem_region_map's on the stack which is slightly
2 // larger than our warning limit, it's init code only, so it's ok
3 #pragma GCC diagnostic ignored "-Wframe-larger-than="
5 IMPLEMENTATION [ia32,ux,amd64]:
12 #include "mem_region.h"
15 #include "helping_lock.h"
17 PUBLIC static FIASCO_INIT
19 Kmem_alloc::base_init()
21 //printf("Kmem_alloc::base_init(): kip=%p\n", Kip::k());
22 unsigned long available_size = 0;
23 unsigned long requested_size;
25 Mem_region_map<64> map;
26 //Kip::k()->add_mem_region(Mem_desc(0xc7ed000, 0xfeecfff, Mem_desc::Reserved));
27 //Kip::k()->add_mem_region(Mem_desc(0xea00000, 0xfeecfff, Mem_desc::Reserved));
29 available_size = create_free_map(Kip::k(), &map);
31 requested_size = Koptions::o()->kmemsize << 10;
34 requested_size = available_size / 100 * Config::kernel_mem_per_cent;
35 if (requested_size > Config::kernel_mem_max)
36 requested_size = Config::kernel_mem_max;
39 if (requested_size > (0-Mem_layout::Physmem))
40 requested_size = 0-Mem_layout::Physmem; // maximum mappable memory
42 requested_size = (requested_size + Config::PAGE_SIZE - 1)
43 & ~(Config::PAGE_SIZE - 1);
45 //printf("Kmem_alloc: available_memory=%lu KB requested_size=%lu\n",
46 // available_size / 1024, requested_size / 1024);
48 //printf("Kmem_alloc:: available blocks:\n");
49 //for (unsigned i = 0; i < map.length(); ++i)
50 // printf(" %2u [%014lx; %014lx)\n", i, map[i].start, map[i].end+1);
52 unsigned long base = 0;
53 unsigned long sp_base = 0;
54 unsigned long end = map[map.length()-1].end;
55 unsigned last = map.length();
57 unsigned long size = requested_size;
58 for (i = last; i > 0 && size > 0; --i)
60 if (map[i-1].size() >= size)
61 { // next block is sufficient
62 base = map[i-1].end - size + 1;
63 sp_base = base & ~(Config::SUPERPAGE_SIZE-1);
64 if ((end - sp_base + 1) > (0-Mem_layout::Physmem))
67 { // already a single block, try to align
68 if (sp_base >= map[i-1].start)
71 end = sp_base + size -1;
77 { // too much virtual memory, try other blocks
79 size += map[last-1].size();
80 end = map[last-2].end;
82 ++i; // try same block again
90 size -= map[i-1].size();
96 //printf("Kmem_alloc: kernel memory from %014lx to %014lx\n", base, end+1);
97 //printf("Kmem_alloc: blocks %u-%u\n", i, last-1);
99 Kip::k()->add_mem_region(Mem_desc(base,
100 end <= map[i].end ? end : map[i].end, Mem_desc::Kernel_tmp));
102 for (; i < last; ++i)
103 Kip::k()->add_mem_region(Mem_desc(map[i].start, map[i].end, Mem_desc::Kernel_tmp));
105 Mem_layout::kphys_base(sp_base);
106 Mem_layout::pmem_size = (end + 1 - sp_base + Config::SUPERPAGE_SIZE - 1) & ~(Config::SUPERPAGE_SIZE-1);
111 Kmem_alloc::Kmem_alloc()
113 //printf("Kmem_alloc::Kmem_alloc()\n");
114 Mem_desc *md = Kip::k()->mem_descs();
115 Mem_desc const *const md_end = md + Kip::k()->num_mem_descs();
116 bool initialized = false;
118 for (; md < md_end; ++md)
120 if (md->is_virtual())
123 unsigned long s = md->start(), e = md->end();
125 // Speep out stupid descriptors (that have the end before the start)
128 md->type(Mem_desc::Undefined);
132 if (md->type() == Mem_desc::Kernel_tmp)
134 unsigned long s_v = Mem_layout::phys_to_pmem(s);
138 a->init(s_v & ~(Kmem_alloc::Alloc::Max_size - 1));
139 //printf("Kmem_alloc: allocator base = %014lx\n", s_v & ~(Kmem_alloc::Alloc::Max_size - 1));
141 //printf(" Kmem_alloc: block %014lx(%014lx) size=%lx\n", s_v, s, e - s + 1);
142 a->add_mem((void*)s_v, e - s + 1);
143 md->type(Mem_desc::Reserved);
144 _orig_free += e - s + 1;
147 //printf("Kmem_alloc: construction done\n");
150 //-----------------------------------------------------------------------------
151 IMPLEMENTATION [{ia32,ux,amd64}-debug]:
157 Kmem_alloc::debug_dump()
161 unsigned long free = a->avail();
162 printf("Used %ld%%, %ldKB out of %ldKB of Kmem\n",
163 (unsigned long) div32(100ULL * (orig_free() - free), orig_free()),
164 (orig_free() - free + 1023)/1024,
165 (orig_free() + 1023)/1024);
170 Kmem_alloc::orig_free()