1 INTERFACE [ia32 || amd64 || ux]:
12 MAX_ATTRIBS = 0x00000006,
13 Cache_mask = 0x00000018, ///< Cache attrbute mask
14 CACHEABLE = 0x00000000,
15 BUFFERED = 0x00000010,
16 NONCACHEABLE = 0x00000018,
20 EXTENSION class Pt_entry
23 static Unsigned32 _cpu_global;
24 static unsigned _super_level;
25 static bool _have_superpages;
28 EXTENSION class Pte_ptr : private Pt_entry
31 using Pt_entry::Super_level;
35 //---------------------------------------------------------------------------
36 IMPLEMENTATION [ia32 || amd64 || ux]:
40 bool Pt_entry::_have_superpages;
41 unsigned Pt_entry::_super_level;
45 Pt_entry::have_superpages(bool yes)
47 _have_superpages = yes;
48 _super_level = yes ? Super_level : (Super_level + 1);
53 Pt_entry::super_level()
54 { return _super_level; }
59 Pte_ptr::is_valid() const
60 { return *pte & Valid; }
64 Pte_ptr::is_leaf() const
65 { return level == Pdir::Depth || (*pte & Pse_bit); }
68 * \pre is_leaf() == false
72 Pte_ptr::next_level() const
73 { return cxx::mask_lsb(*pte, (unsigned)Config::PAGE_SHIFT); }
76 * \pre cxx::get_lsb(phys_addr, Config::PAGE_SHIFT) == 0
80 Pte_ptr::set_next_level(Mword phys_addr)
81 { *pte = phys_addr | Valid | User | Writable; }
85 Pte_ptr::set_page(Mword phys, Mword attr)
87 Mword v = phys | Valid | attr;
88 if (level < Pdir::Depth)
95 Pte_ptr::operator ++ ()
103 Pte_ptr::page_addr() const
104 { return cxx::mask_lsb(*pte, Pdir::page_order_for_level(level)) & ~Mword(XD); }
109 Pte_ptr::set_attribs(Page::Attr attr)
111 typedef L4_fpage::Rights R;
112 typedef Page::Type T;
113 typedef Page::Kern K;
115 if (attr.rights & R::W()) r |= Writable;
116 if (attr.rights & R::U()) r |= User;
117 if (!(attr.rights & R::X())) r |= XD;
118 if (attr.type == T::Normal()) r |= Page::CACHEABLE;
119 if (attr.type == T::Buffered()) r |= Page::BUFFERED;
120 if (attr.type == T::Uncached()) r |= Page::NONCACHEABLE;
121 if (attr.kern & K::Global()) r |= global();
122 *pte = (*pte & ~(ATTRIBS_MASK | Page::Cache_mask)) | r;
127 Pte_ptr::create_page(Phys_mem_addr addr, Page::Attr attr)
129 Mword r = (level < Pdir::Depth) ? (Mword)Pse_bit : 0;
130 typedef L4_fpage::Rights R;
131 typedef Page::Type T;
132 typedef Page::Kern K;
133 if (attr.rights & R::W()) r |= Writable;
134 if (attr.rights & R::U()) r |= User;
135 if (!(attr.rights & R::X())) r |= XD;
136 if (attr.type == T::Normal()) r |= Page::CACHEABLE;
137 if (attr.type == T::Buffered()) r |= Page::BUFFERED;
138 if (attr.type == T::Uncached()) r |= Page::NONCACHEABLE;
139 if (attr.kern & K::Global()) r |= global();
140 *pte = cxx::int_value<Phys_mem_addr>(addr) | r | Valid;
145 Pte_ptr::attribs() const
147 typedef L4_fpage::Rights R;
148 typedef Page::Type T;
152 if (_raw & Writable) r |= R::W();
153 if (_raw & User) r |= R::U();
154 if (!(_raw & XD)) r |= R::X();
157 switch (_raw & Page::Cache_mask)
160 case Page::CACHEABLE: t = T::Normal(); break;
161 case Page::BUFFERED: t = T::Buffered(); break;
162 case Page::NONCACHEABLE: t = T::Uncached(); break;
164 // do not care for kernel special flags, as this is used for user
166 return Page::Attr(r, t);
171 Pte_ptr::add_attribs(Page::Attr attr)
173 typedef L4_fpage::Rights R;
176 if (attr.rights & R::W())
179 if (attr.rights & R::X())
185 auto p = access_once(pte);
201 Pte_ptr::add_attribs(Mword attr)
206 Pte_ptr::page_order() const
207 { return Pdir::page_order_for_level(level); }
209 PUBLIC inline NEEDS["atomic.h"]
211 Pte_ptr::access_flags() const
215 return L4_fpage::Rights(0);
223 r = L4_fpage::Rights::RW();
224 else if (raw & Referenced)
225 r = L4_fpage::Rights::R();
227 return L4_fpage::Rights(0);
229 if (mp_cas(pte, raw, raw & ~(Dirty | Referenced)))
241 Pte_ptr::write_back(void *, void *)
246 Pte_ptr::write_back_if(bool)
251 Pte_ptr::del_attribs(Mword attr)
256 Pte_ptr::del_rights(L4_fpage::Rights r)
258 if (r & L4_fpage::Rights::W())
261 if (r & L4_fpage::Rights::X())
265 Unsigned32 Pt_entry::_cpu_global = Pt_entry::L4_global;
269 Pt_entry::enable_global()
270 { _cpu_global |= Cpu_global; }
273 * Global entries are entries that are not automatically flushed when the
274 * page-table base register is reloaded. They are intended for kernel data
275 * that is shared between all tasks.
276 * @return global page-table--entry flags
281 { return _cpu_global; }
284 //--------------------------------------------------------------------------
286 #include "mem_layout.h"
289 IMPLEMENT inline NEEDS["regdefs.h"]
290 Mword PF::is_translation_error(Mword error)
292 return !(error & PF_ERR_PRESENT);
295 IMPLEMENT inline NEEDS["regdefs.h"]
296 Mword PF::is_usermode_error(Mword error)
298 return (error & PF_ERR_USERMODE);
301 IMPLEMENT inline NEEDS["regdefs.h"]
302 Mword PF::is_read_error(Mword error)
304 return !(error & PF_ERR_WRITE);
307 IMPLEMENT inline NEEDS["regdefs.h"]
308 Mword PF::addr_to_msgword0(Address pfa, Mword error)
310 Mword v = (pfa & ~0x7) | (error & (PF_ERR_PRESENT | PF_ERR_WRITE));
311 if (error & (1 << 4)) v |= 0x4;