1 // our own implementation of C++ memory management: disallow dynamic
2 // allocation (except where class-specific new/delete functions exist)
4 // more specialized memory allocation/deallocation functions follow
5 // below in the "Kmem" namespace
7 INTERFACE [ia32,amd64,ux]:
9 #include "globalconfig.h"
10 #include "initcalls.h"
12 #include "mem_layout.h"
24 Virt_base = 0x20000000,
33 * The system's base facilities for kernel-memory management.
34 * The kernel memory is a singleton object. We access it through a
35 * static class interface.
37 class Kmem : public Mem_layout
39 friend class Device_map;
41 friend class Jdb_dbinfo;
42 friend class Jdb_kern_info_misc;
45 friend class Vmem_alloc;
48 Kmem(); // default constructors are undefined
50 static unsigned long pmem_cpu_page, cpu_page_vm;
53 static Device_map dev_map;
57 mem_user_max = Mem_layout::User_max,
60 static void init_pageing(Cpu const &boot_cpu);
61 static void init_boot_cpu(Cpu const &boot_cpu);
62 static void init_app_cpu(Cpu const &cpu);
63 static Mword is_kmem_page_fault(Address pfa, Mword error);
64 static Mword is_ipc_page_fault(Address pfa, Mword error);
65 static Mword is_io_bitmap_page_fault(Address pfa);
66 static Address kcode_start();
67 static Address kcode_end();
68 static Address virt_to_phys(const void *addr);
72 typedef Kmem Kmem_space;
75 //----------------------------------------------------------------------------
76 INTERFACE [ia32, amd64]:
81 friend class Kernel_task;
84 static Address user_max();
87 static Unsigned8 *io_bitmap_delimiter;
88 static Address kphys_start, kphys_end;
89 static Pdir *kdir; ///< Kernel page directory
93 //--------------------------------------------------------------------------
94 IMPLEMENTATION [ia32, amd64]:
99 #include "mapped_alloc.h"
100 #include "mem_unit.h"
104 #include "std_macros.h"
112 for (unsigned i = 0; i < Max; ++i)
118 Device_map::lookup_idx(Address phys)
120 Address p = phys & (~0UL << Config::SUPERPAGE_SHIFT);
121 for (unsigned i = 0; i < Max; ++i)
130 template< typename T >
132 Device_map::lookup(T *phys)
134 unsigned idx = lookup_idx((Address)phys);
138 return (T*)((Virt_base + idx * Config::SUPERPAGE_SIZE)
139 | ((Address)phys & ~(~0UL << Config::SUPERPAGE_SHIFT)));
144 Device_map::map(Address phys, bool /*cache*/)
146 unsigned idx = lookup_idx(phys);
148 return (Virt_base + idx * Config::SUPERPAGE_SIZE)
149 | (phys & ~(~0UL << Config::SUPERPAGE_SHIFT));
151 Address p = phys & (~0UL << Config::SUPERPAGE_SHIFT);
152 Mapped_allocator *const alloc = Mapped_allocator::allocator();
153 for (unsigned i = 0; i < Max; ++i)
157 Virt_addr(Virt_base + (i*Config::SUPERPAGE_SIZE)),
158 Virt_size(Config::SUPERPAGE_SIZE),
159 Pt_entry::Dirty | Pt_entry::Writable | Pt_entry::Referenced,
160 Pdir::super_level(), alloc);
163 return (Virt_base + (i*Config::SUPERPAGE_SIZE))
164 | (phys & ~(~0UL << Config::SUPERPAGE_SHIFT));
171 template< typename T >
173 Device_map::map(T *phys, bool cache = true)
174 { return (T*)map((Address)phys, cache); }
178 Device_map::unmap(void const *phys)
180 unsigned idx = lookup_idx((Address)phys);
184 Address v = Virt_base + (idx * Config::SUPERPAGE_SIZE);
186 Kmem::kdir->unmap(Virt_addr(v), Virt_size(Config::SUPERPAGE_SIZE), -1);
190 Unsigned8 *Kmem::io_bitmap_delimiter;
191 Address Kmem::kphys_start, Kmem::kphys_end;
192 Device_map Kmem::dev_map;
197 Kmem::io_bitmap_delimiter_page()
199 return reinterpret_cast<Address>(io_bitmap_delimiter);
204 * Compute physical address from a kernel-virtual address.
205 * @param addr a virtual address
206 * @return corresponding physical address if a mappings exists.
209 IMPLEMENT inline NEEDS["paging.h","std_macros.h","mem_layout.h"]
211 Kmem::virt_to_phys (const void *addr)
213 Address a = reinterpret_cast<Address>(addr);
215 if (EXPECT_TRUE (Mem_layout::in_pmem(a)))
216 return Mem_layout::pmem_to_phys(a);
218 if (EXPECT_TRUE (Mem_layout::in_kernel_image(a)))
219 return a - Mem_layout::Kernel_image_offset;
221 return kdir->virt_to_phys(a);
225 // Only used for initialization and kernel debugger
228 Kmem::map_phys_page_tmp(Address phys, Mword idx)
230 unsigned long pte = phys & Pt_entry::Pfn;
235 case 0: virt = Mem_layout::Kmem_tmp_page_1; break;
236 case 1: virt = Mem_layout::Kmem_tmp_page_2; break;
237 default: return ~0UL;
240 static unsigned long tmp_phys_pte[2] = { ~0UL, ~0UL };
242 if (pte != tmp_phys_pte[idx])
244 // map two consecutive pages as to be able to access
245 map_phys_page(phys, virt, false, true);
246 map_phys_page(phys+0x1000, virt+0x1000, false, true);
247 tmp_phys_pte[idx] = pte;
250 return virt + phys - pte;
254 Address Kmem::kernel_image_start()
255 { return virt_to_phys (&Mem_layout::image_start) & Config::PAGE_MASK; }
257 IMPLEMENT inline Address Kmem::kcode_start()
258 { return virt_to_phys (&Mem_layout::start) & Config::PAGE_MASK; }
260 IMPLEMENT inline Address Kmem::kcode_end()
262 return (virt_to_phys (&Mem_layout::end) + Config::PAGE_SIZE)
267 /** Return number of IPC slots to copy */
268 PUBLIC static inline NEEDS["config.h"]
271 { return (8 << 20) / Config::SUPERPAGE_SIZE; }
273 IMPLEMENT inline NEEDS["mem_layout.h"]
275 Kmem::is_io_bitmap_page_fault(Address addr)
277 return addr >= Mem_layout::Io_bitmap &&
278 addr <= Mem_layout::Io_bitmap + Mem_layout::Io_port_max / 8;
281 IMPLEMENT inline NEEDS["mem_layout.h"]
283 Kmem::is_kmem_page_fault(Address addr, Mword /*error*/)
285 return addr >= mem_user_max;
293 // Establish a 4k-mapping
296 Kmem::map_phys_page(Address phys, Address virt,
297 bool cached, bool global, Address *offs=0)
299 Pdir::Iter i = kdir->walk(Virt_addr(virt));
301 Mword pte = phys & Config::PAGE_MASK;
303 assert(i.shift() == Config::PAGE_SHIFT);
305 *e = pte | Pt_entry::Valid | Pt_entry::Writable
306 | Pt_entry::Referenced | Pt_entry::Dirty
307 | (cached ? 0 : (Pt_entry::Write_through | Pt_entry::Noncacheable))
308 | (global ? Pt_entry::global() : 0);
309 Mem_unit::tlb_flush(virt);
316 PUBLIC static FIASCO_INIT
321 Mapped_allocator *const alloc = Mapped_allocator::allocator();
323 kdir = (Pdir*)alloc->alloc(Config::PAGE_SHIFT);
324 memset (kdir, 0, Config::PAGE_SIZE);
326 unsigned long cpu_features = Cpu::get_features();
327 bool superpages = cpu_features & FEAT_PSE;
329 printf("Superpages: %s\n", superpages?"yes":"no");
331 Pdir::have_superpages(superpages);
333 Cpu::set_cr4(Cpu::get_cr4() | CR4_PSE);
335 if (cpu_features & FEAT_PGE)
337 Pt_entry::enable_global();
338 Cpu::set_cr4 (Cpu::get_cr4() | CR4_PGE);
341 // set up the kernel mapping for physical memory. mark all pages as
342 // referenced and modified (so when touching the respective pages
343 // later, we save the CPU overhead of marking the pd/pt entries like
346 // we also set up a one-to-one virt-to-phys mapping for two reasons:
347 // (1) so that we switch to the new page table early and re-use the
348 // segment descriptors set up by boot_cpu.cc. (we'll set up our
349 // own descriptors later.) we only need the first 4MB for that.
350 // (2) a one-to-one phys-to-virt mapping in the kernel's page directory
351 // sometimes comes in handy (mostly useful for debugging)
354 kdir->map(0, Virt_addr(0), Virt_size(4 << 20),
355 Pt_entry::Dirty | Pt_entry::Writable | Pt_entry::Referenced,
356 Pdir::super_level(), alloc);
359 kdir->map(Mem_layout::Kernel_image_phys,
360 Virt_addr(Mem_layout::Kernel_image),
361 Virt_size(Config::SUPERPAGE_SIZE),
362 Pt_entry::Dirty | Pt_entry::Writable | Pt_entry::Referenced
363 | Pt_entry::global(), Pdir::super_level(), alloc);
365 if (!Mem_layout::Adap_in_kernel_image)
366 kdir->map(Mem_layout::Adap_image_phys,
367 Virt_addr(Mem_layout::Adap_image),
368 Virt_size(Config::SUPERPAGE_SIZE),
369 Pt_entry::Dirty | Pt_entry::Writable | Pt_entry::Referenced
370 | Pt_entry::global(), Pdir::super_level(), alloc);
372 // map the last 64MB of physical memory as kernel memory
373 kdir->map(Mem_layout::pmem_to_phys(Mem_layout::Physmem),
374 Virt_addr(Mem_layout::Physmem), Virt_size(Mem_layout::pmem_size),
375 Pt_entry::Writable | Pt_entry::Referenced | Pt_entry::global(),
376 Pdir::super_level(), alloc);
378 // The service page directory entry points to an universal usable
379 // page table which is currently used for the Local APIC and the
381 assert((Mem_layout::Service_page & ~Config::SUPERPAGE_MASK) == 0);
383 Pdir::Iter pt = kdir->walk(Virt_addr(Mem_layout::Service_page), 100, alloc);
385 // kernel mode should acknowledge write-protected page table entries
386 Cpu::set_cr0(Cpu::get_cr0() | CR0_WP);
388 // now switch to our new page table
389 Cpu::set_pdbr(Mem_layout::pmem_to_phys(kdir));
391 assert((Mem_layout::Io_bitmap & ~Config::SUPERPAGE_MASK) == 0);
394 = 0x10 + Config::Max_num_cpus * (sizeof(Tss) + 256);
396 if (cpu_page_size < Config::PAGE_SIZE)
397 cpu_page_size = Config::PAGE_SIZE;
399 pmem_cpu_page = Mem_layout::pmem_to_phys(alloc->unaligned_alloc(cpu_page_size));
401 printf("Kmem:: cpu page at %lx (%ldBytes)\n", pmem_cpu_page, cpu_page_size);
404 && Config::SUPERPAGE_SIZE - (pmem_cpu_page & ~Config::SUPERPAGE_MASK) < 0x10000)
406 // can map as 4MB page because the cpu_page will land within a
407 // 16-bit range from io_bitmap
408 *(kdir->walk(Virt_addr(Mem_layout::Io_bitmap - Config::SUPERPAGE_SIZE),
409 Pdir::Super_level, alloc).e)
410 = (pmem_cpu_page & Config::SUPERPAGE_MASK)
412 | Pt_entry::Writable | Pt_entry::Referenced
413 | Pt_entry::Dirty | Pt_entry::global() | Pt_entry::Valid;
415 cpu_page_vm = (pmem_cpu_page & ~Config::SUPERPAGE_MASK)
416 + (Mem_layout::Io_bitmap - Config::SUPERPAGE_SIZE);
421 for (i = 0; cpu_page_size > 0; ++i, cpu_page_size -= Config::PAGE_SIZE)
423 pt = kdir->walk(Virt_addr(Mem_layout::Io_bitmap - Config::PAGE_SIZE * (i+1)),
426 *pt.e = (pmem_cpu_page + i*Config::PAGE_SIZE)
427 | Pt_entry::Valid | Pt_entry::Writable
428 | Pt_entry::Referenced | Pt_entry::Dirty
429 | Pt_entry::global();
432 cpu_page_vm = Mem_layout::Io_bitmap - Config::PAGE_SIZE * i;
435 if (Config::enable_io_protection)
437 // the IO bitmap must be followed by one byte containing 0xff
438 // if this byte is not present, then one gets page faults
439 // (or general protection) when accessing the last port
440 // at least on a Pentium 133.
442 // Therefore we write 0xff in the first byte of the cpu_page
443 // and map this page behind every IO bitmap
444 io_bitmap_delimiter =
445 reinterpret_cast<Unsigned8 *>(cpu_page_vm);
449 // did we really get the first byte ??
450 assert((reinterpret_cast<Address>(io_bitmap_delimiter)
451 & ~Config::PAGE_MASK) == 0);
452 *io_bitmap_delimiter = 0xff;
457 PUBLIC static FIASCO_INIT_CPU
459 Kmem::init_cpu(Cpu &cpu)
462 void *cpu_mem = Mapped_allocator::allocator()->unaligned_alloc(1024);
463 printf("Allocate cpu_mem @ %p\n", cpu_mem);
465 // now initialize the global descriptor table
466 cpu.init_gdt (__alloc(&cpu_mem, Gdt::gdt_max), user_max());
468 // Allocate the task segment as the last thing from cpu_page_vm
469 // because with IO protection enabled the task segment includes the
470 // rest of the page and the following IO bitmap (2 pages).
472 // Allocate additional 256 bytes for emergency stack right beneath
473 // the tss. It is needed if we get an NMI or debug exception at
474 // entry_sys_fast_ipc/entry_sys_fast_ipc_c/entry_sys_fast_ipc_log.
475 Address tss_mem = alloc_tss(sizeof(Tss) + 256);
476 assert(tss_mem + sizeof(Tss) + 256 < Mem_layout::Io_bitmap);
479 if (Config::enable_io_protection)
480 // this is actually tss_size +1, including the io_bitmap_delimiter byte
481 tss_size = Mem_layout::Io_bitmap + (Mem_layout::Io_port_max / 8) - tss_mem;
483 tss_size = sizeof(Tss) - 1;
485 assert(tss_size < 0x100000); // must fit into 20 Bits
487 cpu.init_tss (tss_mem, tss_size);
489 // force GDT... to memory before loading the registers
490 asm volatile ( "" : : : "memory" );
492 // set up the x86 CPU's memory model
496 cpu.set_ds (Gdt::data_segment());
497 cpu.set_es (Gdt::data_segment());
498 cpu.set_ss (Gdt::gdt_data_kernel | Gdt::Selector_kernel);
499 cpu.set_fs (Gdt::gdt_data_user | Gdt::Selector_user);
500 cpu.set_gs (Gdt::gdt_data_user | Gdt::Selector_user);
503 // and finally initialize the TSS
506 init_cpu_arch(cpu, &cpu_mem);
510 //---------------------------------------------------------------------------
511 IMPLEMENTATION [ia32 || amd64]:
513 IMPLEMENT inline Address Kmem::user_max() { return ~0UL; }
516 //--------------------------------------------------------------------------
517 IMPLEMENTATION [ia32,ux,amd64]:
520 #include <cstddef> // size_t
521 #include <cstring> // memset
523 #include "boot_info.h"
530 #include "std_macros.h"
533 // static class variables
534 unsigned long Kmem::pmem_cpu_page, Kmem::cpu_page_vm;
538 static inline Address FIASCO_INIT_CPU
539 Kmem::__alloc(void **p, unsigned long size)
541 Address r = ((unsigned long)*p + 0xf) & ~0xf;
542 *p = (void*)(r + size);
547 * Compute a kernel-virtual address for a physical address.
548 * This function always returns virtual addresses within the
549 * physical-memory region.
550 * @pre addr <= highest kernel-accessible RAM address
551 * @param addr a physical address
552 * @return kernel-virtual address.
556 Kmem::phys_to_virt(Address addr)
558 return reinterpret_cast<void *>(Mem_layout::phys_to_pmem(addr));
561 /** Allocate some bytes from a memory page */
564 Kmem::alloc_tss(Address size)
566 Address ret = cpu_page_vm;
567 cpu_page_vm += (size + 0xf) & ~0xf;
574 * Return Global page directory.
575 * This is the master copy of the kernel's page directory. Kernel-memory
576 * allocations are kept here and copied to task page directories lazily
578 * @return kernel's global page directory
580 PUBLIC static inline const Pdir* Kmem::dir() { return kdir; }