EXTENSION class Mem_space
{
protected:
- void sync_kernel() const {}
+ int sync_kernel() const { return 0; }
pid_t _pid;
};
IMPLEMENT inline NEEDS["logdefs.h"]
void
-Mem_space::switchin_context(Mem_space *from)
+Mem_space::switchin_context(Mem_space *from, unsigned)
{
if (this == from)
return;
"cpu_lock.h", "lock_guard.h", "mem_layout.h",
"trampoline.h"]
void
-Mem_space::page_map(Address phys, Address virt, Address size, unsigned attr)
+Mem_space::page_map(Address phys, Address virt, Address size, Attr attr)
{
- Lock_guard<Cpu_lock> guard(&cpu_lock);
+ auto guard = lock_guard(cpu_lock);
Mword *trampoline = (Mword *)Mem_layout::kernel_trampoline_page;
*(trampoline + 1) = virt;
*(trampoline + 2) = size;
- *(trampoline + 3) = PROT_READ | (attr & Page_writable ? PROT_WRITE : 0);
+ *(trampoline + 3) = PROT_READ | (attr.rights & Page::Rights::W() ? PROT_WRITE : 0);
*(trampoline + 4) = MAP_SHARED | MAP_FIXED;
*(trampoline + 5) = Boot_info::fd();
void
Mem_space::page_unmap(Address virt, Address size)
{
- Lock_guard<Cpu_lock> guard(&cpu_lock);
+ auto guard = lock_guard(cpu_lock);
Trampoline::syscall(pid(), __NR_munmap, virt, size);
}
void
Mem_space::page_protect(Address virt, Address size, unsigned attr)
{
- Lock_guard<Cpu_lock> guard(&cpu_lock);
+ auto guard = lock_guard(cpu_lock);
Trampoline::syscall(pid(), __NR_mprotect, virt, size,
PROT_READ | (attr & Page_writable ? PROT_WRITE : 0));
Mem_space::user_to_kernel(T const *addr, bool write)
{
Phys_addr phys;
- Addr virt = Addr::create((Address) addr);
- unsigned attr, error = 0;
- Size size;
+ Virt_addr virt = Virt_addr((Address) addr);
+ Attr attr;
+ unsigned error = 0;
+ Page_order size;
for (;;)
{
if (v_lookup(virt, &phys, &size, &attr))
{
// Add offset to frame
- phys = phys | virt.offset(size);
+ phys = phys | cxx::get_lsb(virt, size);
// See if we want to write and are not allowed to
// Generic check because INTEL_PTE_WRITE == INTEL_PDE_WRITE
- if (!write || (attr & Pt_entry::Writable))
- return (T*)Mem_layout::phys_to_pmem(phys.value());
+ if (!write || (attr.rights & Page::Rights::W()))
+ return (T*)Mem_layout::phys_to_pmem(Phys_addr::val(phys));
error |= PF_ERR_PRESENT;
}
// Pretend open interrupts, we restore the current state afterwards.
Cpu_lock::Status was_locked = cpu_lock.test();
- thread_page_fault(virt.value(), error, 0, Proc::processor_state() | EFLAGS_IF, 0);
+ thread_page_fault(Virt_addr::val(virt), error, 0, Proc::processor_state() | EFLAGS_IF, 0);
cpu_lock.set (was_locked);
}
if (((Address)addr & Config::PAGE_MASK) ==
(((Address)addr + sizeof (T) - 1) & Config::PAGE_MASK))
{
- Lock_guard<Cpu_lock> guard(&cpu_lock);
+ auto guard = lock_guard(cpu_lock);
value = *user_to_kernel(addr, false);
}
else
if (((Address)addr & Config::PAGE_MASK) ==
(((Address)addr + sizeof (T) - 1) & Config::PAGE_MASK))
{
- Lock_guard<Cpu_lock> guard(&cpu_lock);
+ auto guard = lock_guard(cpu_lock);
*user_to_kernel(addr, true) = value;
}
else
void
Mem_space::copy_from_user(T *kdst, T const *usrc, size_t n)
{
- Lock_guard<Cpu_lock> guard(&cpu_lock);
+ auto guard = lock_guard(cpu_lock);
char *ptr = (char *)usrc;
char *dst = (char *)kdst;
void
Mem_space::copy_to_user(T *udst, T const *ksrc, size_t n)
{
- Lock_guard<Cpu_lock> guard(&cpu_lock);
+ auto guard = lock_guard(cpu_lock);
char *ptr = (char *)udst;
char *src = (char *)ksrc;