10 #include "processor.h"
12 #include "std_macros.h"
14 #include "vmem_alloc.h"
19 * The global page fault handler switch.
20 * Handles page-fault monitoring, classification of page faults based on
21 * virtual-memory area they occured in, page-directory updates for kernel
22 * faults, IPC-window updates, and invocation of paging function for
23 * user-space page faults (handle_page_fault_pager).
24 * @param pfa page-fault virtual address
25 * @param error_code CPU error code
26 * @return true if page fault could be resolved, false otherwise
27 * @exception longjmp longjumps to recovery location if page-fault
28 * handling fails (i.e., return value would be false),
29 * but recovery location has been installed
31 IMPLEMENT inline NEEDS[<cstdio>,"regdefs.h", "kdb_ke.h","processor.h",
32 "config.h","std_macros.h","vmem_alloc.h","logdefs.h",
33 "warn.h",Thread::page_fault_log]
34 int Thread::handle_page_fault (Address pfa, Mword error_code, Mword pc,
37 //if (Config::Log_kernel_page_faults && !PF::is_usermode_error(error_code))
38 if (0 && current_cpu() != 0)
40 Lock_guard<Cpu_lock> guard(&cpu_lock);
41 printf("*KP[cpu=%u, sp=%lx, pfa=%lx, pc=%lx, error=(%lx)", current_cpu(), Proc::stack_pointer(), pfa, pc, error_code);
42 print_page_fault_error(error_code);
47 printf("Translation error ? %x\n"
48 " current space has mapping : %08x\n"
49 " Kernel space has mapping : %08x\n",
50 PF::is_translation_error(error_code),
51 current_mem_space()->lookup((void*)pfa),
52 Space::kernel_space()->lookup((void*)pfa));
58 // TODO: put this into a debug_page_fault_handler
59 if (EXPECT_FALSE(log_page_fault()))
60 page_fault_log(pfa, error_code, pc);
62 L4_msg_tag ipc_code = L4_msg_tag(0, 0, 0, 0);
64 // Check for page fault in user memory area
65 if (EXPECT_TRUE (!Kmem::is_kmem_page_fault(pfa, error_code)))
67 // Make sure that we do not handle page faults that do not
68 // belong to this thread.
69 //assert_kdb (mem_space() == current_mem_space());
71 if (EXPECT_FALSE (mem_space()->is_sigma0()))
73 // special case: sigma0 can map in anything from the kernel
74 if(handle_sigma0_page_fault(pfa))
81 // user mode page fault -- send pager request
82 if (handle_page_fault_pager(_pager, pfa, error_code,
83 L4_msg_tag::Label_page_fault))
89 // Check for page fault in kernel memory region caused by user mode
90 else if (EXPECT_FALSE(PF::is_usermode_error(error_code)))
91 return 0; // disallow access after mem_user_max
93 // Check for page fault in IO bit map or in delimiter byte behind IO bitmap
94 // assume it is caused by an input/output instruction and fall through to
96 else if (EXPECT_FALSE(Kmem::is_io_bitmap_page_fault(pfa)))
99 // We're in kernel code faulting on a kernel memory region
101 // A page is not present but a mapping exists in the global page dir.
102 // Update our page directory by copying from the master pdir
103 // This is the only path that should be executed with interrupts
104 // disabled if the page faulter also had interrupts disabled.
105 // thread_page_fault() takes care of that.
106 else if (Mem_layout::is_caps_area(pfa))
108 // Test for special case -- see function documentation
109 if (pagein_tcb_request(regs))
112 printf("Fiasco BUG: Invalid CAP access (pc=%lx, pfa=%lx)\n", pc, pfa);
113 kdb_ke("Fiasco BUG: Invalid access to Caps area");
116 else if (PF::is_translation_error(error_code) &&
117 #if defined CONFIG_ARM || defined CONFIG_PPC32
118 Mem_space::kernel_space()->virt_to_phys(pfa) != ~0UL
120 Kmem::virt_to_phys (reinterpret_cast<void*>(pfa)) != ~0UL
124 if (pfa < Mem_layout::Slabs_start || pfa >= Mem_layout::Slabs_end)
126 Mem_space::current_mem_space(cpu())->kmem_update((void*)pfa);
130 WARN("No page-fault handler for 0x%lx, error 0x%lx, pc "L4_PTR_FMT"\n",
131 pfa, error_code, pc);
133 // An error occurred. Our last chance to recover is an exception
134 // handler a kernel function may have set.
138 longjmp (*_recover_jmpbuf, 1);