INTERFACE:
+#include "auto_quota.h"
+#include "cpu_mask.h"
#include "paging.h" // for page attributes
#include "mem_layout.h"
#include "member_offs.h"
-#include "pages.h"
#include "per_cpu_data.h"
#include "ram_quota.h"
+#include "types.h"
+#include "mapdb_types.h"
class Space;
{
MEMBER_OFFSET();
+ // Space reverse lookup
+ friend inline Mem_space* current_mem_space();
+
public:
typedef int Status;
+ static char const *const name;
- void *operator new (size_t, void *p)
- { return p; }
-
- void operator delete (void *)
- {}
-
- static char const * const name;
-
+ typedef Page::Attr Attr;
typedef Pdir::Va Vaddr;
- typedef Pdir::Va Vsize;
+ typedef Pdir::Vs Vsize;
+
+ typedef Addr::Addr<Config::PAGE_SHIFT> Phys_addr;
+ typedef Addr::Diff<Config::PAGE_SHIFT> Phys_diff;
+ typedef Addr::Order<Config::PAGE_SHIFT> Page_order;
- typedef Virt_addr Addr;
- typedef Virt_size Size;
- typedef Page_addr<Config::PAGE_SHIFT> Phys_addr;
typedef void Reap_list;
+ // for map_util
+ typedef Page_number V_pfn;
+ typedef Page_count V_pfc;
+ typedef Addr::Order<0> V_order;
+
// Each architecture must provide these members:
- void switchin_context(Mem_space *from);
+ void switchin_context(Mem_space *from, unsigned mode = 0);
/** Insert a page-table entry, or upgrade an existing entry with new
* attributes.
* range
* @pre phys and virt need to be size-aligned according to the size argument.
*/
- Status v_insert(Phys_addr phys, Vaddr virt, Vsize size,
- unsigned page_attribs, bool upgrade_ignore_size = false);
+ FIASCO_SPACE_VIRTUAL
+ Status v_insert(Phys_addr phys, Vaddr virt, Page_order size,
+ Attr page_attribs);
/** Look up a page-table entry.
*
* either case, it is either 4KB or 4MB.
* @return True if an entry was found, false otherwise.
*/
- bool v_lookup(Vaddr virt, Phys_addr *phys = 0, Size *size = 0,
- unsigned *page_attribs = 0);
+ FIASCO_SPACE_VIRTUAL
+ bool v_lookup(Vaddr virt, Phys_addr *phys = 0, Page_order *order = 0,
+ Attr *page_attribs = 0);
/** Delete page-table entries, or some of the entries' attributes.
*
* @return Combined (bit-ORed) page attributes that were removed. In
* case of errors, ~Page_all_attribs is additionally bit-ORed in.
*/
- unsigned long v_delete(Vaddr virt, Vsize size,
- unsigned long page_attribs = Page_all_attribs);
+ FIASCO_SPACE_VIRTUAL
+ L4_fpage::Rights v_delete(Vaddr virt, Page_order size,
+ L4_fpage::Rights page_attribs);
+
+ FIASCO_SPACE_VIRTUAL
+ void v_set_access_flags(Vaddr virt, L4_fpage::Rights access_flags);
- /** Set this memory space as the current on on this CPU. */
+ /** Set this memory space as the current on this CPU. */
void make_current();
- /**
- * Update this address space with an entry from the kernel's shared
- * address space. The kernel maintains a 'master' page directory in
- * class Kmem. Use this function when an entry from the master directory
- * needs to be copied into this address space.
- * @param addr virtual address for which an entry should be copied from the
- * shared page directory.
- */
- void kmem_update (void *addr);
+ static Mem_space *kernel_space()
+ { return _kernel_space; }
- static Mem_space *kernel_space() { return _kernel_space; }
- static inline Mem_space *current_mem_space(unsigned cpu);
+ static inline Mem_space *current_mem_space(Cpu_number cpu);
- virtual Page_number map_max_address() const
- { return Addr::create(Map_max_address); }
- static Address superpage_size() { return Map_superpage_size; }
+ virtual
+ Page_number mem_space_map_max_address() const
+ { return Page_number(Virt_addr(Mem_layout::User_max)) + Page_count(1); }
- static Phys_addr page_address(Phys_addr o, Size s)
- { return o.trunc(s); }
+ Page_number map_max_address() const
+ { return mem_space_map_max_address(); }
- static Phys_addr subpage_address(Phys_addr addr, Size offset)
- { return addr | offset; }
+ static Phys_addr page_address(Phys_addr o, Page_order s)
+ { return cxx::mask_lsb(o, s); }
- static Mword phys_to_word(Phys_addr a)
- { return a.value(); }
+ static V_pfn page_address(V_pfn a, Page_order o)
+ { return cxx::mask_lsb(a, o); }
-private:
- Ram_quota *_quota;
+ static Phys_addr subpage_address(Phys_addr addr, V_pfc offset)
+ { return addr | Phys_diff(offset); }
- // Each architecture must provide these members
+ struct Fit_size
+ {
+ typedef cxx::array<Page_order, Page_order, 65> Size_array;
+ Size_array const &o;
+ Page_order operator () (Page_order i) const { return o[i]; }
- // Page-table ops
- // We'd like to declare current_pdir here, but Dir_type isn't defined yet.
- // static inline Dir_type *current_pdir();
+ explicit Fit_size(Size_array const &o) :o(o) {}
+ };
- // Space reverse lookup
- friend inline Mem_space* current_mem_space(); // Mem_space::current_space
+ FIASCO_SPACE_VIRTUAL
+ Fit_size mem_space_fitting_sizes() const __attribute__((pure));
- // Mem_space();
- Mem_space(const Mem_space &); // undefined copy constructor
+ Fit_size fitting_sizes() const
+ { return mem_space_fitting_sizes(); }
- static Per_cpu<Mem_space *> _current asm ("CURRENT_MEM_SPACE");
- static Mem_space *_kernel_space;
-};
+ static Mdb_types::Pfn to_pfn(Phys_addr p)
+ { return Mdb_types::Pfn(cxx::int_value<Page_number>(p)); }
+
+ static Mdb_types::Pfn to_pfn(V_pfn p)
+ { return Mdb_types::Pfn(cxx::int_value<Page_number>(p)); }
+
+ static Mdb_types::Pcnt to_pcnt(Page_order s)
+ { return Mdb_types::Pcnt(1) << Mdb_types::Order(cxx::int_value<Page_order>(s) - Config::PAGE_SHIFT); }
+
+ static V_pfn to_virt(Mdb_types::Pfn p)
+ { return Page_number(cxx::int_value<Mdb_types::Pfn>(p)); }
+
+ static Page_order to_order(Mdb_types::Order p)
+ { return Page_order(cxx::int_value<Mdb_types::Order>(p) + Config::PAGE_SHIFT); }
+
+ static V_pfc to_size(Page_order p)
+ { return V_pfc(1) << p; }
+
+ static V_pfc subpage_offset(V_pfn a, Page_order o)
+ { return cxx::get_lsb(a, o); }
+
+ Page_order largest_page_size() const
+ { return mem_space_fitting_sizes()(Page_order(64)); }
+
+ enum
+ {
+ Max_num_global_page_sizes = 5
+ };
+
+ static Page_order const *get_global_page_sizes(bool finalize = true)
+ {
+ if (finalize)
+ _glbl_page_sizes_finished = true;
+ return _glbl_page_sizes;
+ }
-class Mem_space_q_alloc
-{
private:
- Mapped_allocator *_a;
- Ram_quota *_q;
-};
+ Mem_space(const Mem_space &) = delete;
+ Ram_quota *_quota;
+
+ static Per_cpu<Mem_space *> _current;
+ static Mem_space *_kernel_space;
+ static Page_order _glbl_page_sizes[Max_num_global_page_sizes];
+ static unsigned _num_glbl_page_sizes;
+ static bool _glbl_page_sizes_finished;
+};
//---------------------------------------------------------------------------
INTERFACE [mp]:
-#include "cpu_mask.h"
-
EXTENSION class Mem_space
{
public:
- enum { Need_xcpu_tlb_flush = 1 };
+ enum { Need_xcpu_tlb_flush = true };
+
private:
- Cpu_mask _flush_tlb;
+ static Cpu_mask _tlb_active;
};
EXTENSION class Mem_space
{
public:
- enum { Need_xcpu_tlb_flush = 0 };
+ enum { Need_xcpu_tlb_flush = false };
};
#include "config.h"
#include "globals.h"
#include "l4_types.h"
-#include "mapped_alloc.h"
+#include "kmem_alloc.h"
#include "mem_unit.h"
#include "paging.h"
#include "panic.h"
-PUBLIC inline
-Mem_space_q_alloc::Mem_space_q_alloc(Ram_quota *q = 0, Mapped_allocator *a = 0)
- : _a(a), _q(q)
-{}
+DEFINE_PER_CPU Per_cpu<Mem_space *> Mem_space::_current;
-PUBLIC inline
-bool
-Mem_space_q_alloc::valid() const
-{ return _a && _q; }
+char const * const Mem_space::name = "Mem_space";
+Mem_space *Mem_space::_kernel_space;
-PUBLIC inline
-void *
-Mem_space_q_alloc::alloc(unsigned long size) const
-{
- if (EXPECT_FALSE(!_q->alloc(size)))
- return 0;
+static Mem_space::Fit_size::Size_array __mfs;
+Mem_space::Page_order Mem_space::_glbl_page_sizes[Max_num_global_page_sizes];
+unsigned Mem_space::_num_glbl_page_sizes;
+bool Mem_space::_glbl_page_sizes_finished;
- void *b;
- if (EXPECT_FALSE(!(b=_a->unaligned_alloc(size))))
+PROTECTED static
+void
+Mem_space::add_global_page_size(Page_order o)
+{
+ assert (!_glbl_page_sizes_finished);
+ unsigned i;
+ for (i = 0; i < _num_glbl_page_sizes; ++i)
{
- _q->free(size);
- return 0;
+ if (_glbl_page_sizes[i] == o)
+ return;
+
+ if (_glbl_page_sizes[i] < o)
+ break;
}
- return b;
-}
+ assert (_num_glbl_page_sizes + 1 < Max_num_global_page_sizes);
+ for (unsigned x = _num_glbl_page_sizes; x > i; --x)
+ _glbl_page_sizes[x] = _glbl_page_sizes[x - 1];
-PUBLIC inline
+ _glbl_page_sizes[i] = o;
+ assert (_glbl_page_sizes[_num_glbl_page_sizes] <= Page_order(Config::PAGE_SHIFT));
+
+ ++_num_glbl_page_sizes;
+}
+
+PUBLIC static
void
-Mem_space_q_alloc::free(void *block, unsigned long size) const
+Mem_space::add_page_size(Page_order o)
{
- _a->unaligned_free(size, block);
- _q->free(size);
+ add_global_page_size(o);
+ for (Page_order c = o; c < __mfs.size(); ++c)
+ __mfs[c] = o;
}
-
-char const * const Mem_space::name = "Mem_space";
-Mem_space *Mem_space::_kernel_space;
-
-
-
-PUBLIC inline
-bool
-Mem_space::valid() const
-{ return _dir; }
+IMPLEMENT
+Mem_space::Fit_size
+Mem_space::mem_space_fitting_sizes() const
+{
+ return Fit_size(__mfs);
+}
PUBLIC inline
Ram_quota *
PUBLIC
void
Mem_space::reset_dirty ()
-{
- _dir = 0;
-}
+{ _dir = 0; }
PUBLIC inline
Mem_space::Dir_type*
Mem_space::dir ()
-{
- return _dir;
-}
+{ return _dir; }
PUBLIC inline
const Mem_space::Dir_type*
Mem_space::dir() const
-{
- return _dir;
-}
-
-inline NEEDS[Mem_space::current_mem_space]
-Mem_space *
-current_mem_space()
-{
- return Mem_space::current_mem_space(0);
-}
-
-// routines
-
-/**
- * Tests if a task is the sigma0 task.
- * @return true if the task is sigma0, false otherwise.
- */
-PUBLIC inline NEEDS ["globals.h","config.h"]
-bool Mem_space::is_sigma0 () const
-{
- return this == sigma0_space;
-}
-
-// Mapping utilities
+{ return _dir; }
-PUBLIC inline
+PUBLIC
virtual bool
-Mem_space::v_fabricate(Vaddr address,
- Phys_addr* phys, Size* size, unsigned* attribs = 0)
+Mem_space::v_fabricate(Vaddr address, Phys_addr *phys, Page_order *order,
+ Attr *attribs = 0)
{
- return Mem_space::v_lookup(address.trunc(Size(Map_page_size)),
- phys, size, attribs);
+ return v_lookup(cxx::mask_lsb(address, Page_order(Config::PAGE_SHIFT)),
+ phys, order, attribs);
}
+PUBLIC virtual
+bool
+Mem_space::is_sigma0() const
+{ return false; }
+
//---------------------------------------------------------------------------
IMPLEMENTATION [!io]:
Mem_space::io_lookup (Address)
{ return false; }
-//---------------------------------------------------------------------------
-IMPLEMENTATION [mp]:
+//----------------------------------------------------------------------------
+IMPLEMENTATION [!mp]:
-PUBLIC inline
-bool
-Mem_space::need_tlb_flush()
+PUBLIC static inline
+void
+Mem_space::enable_tlb(Cpu_number)
+{}
+
+PUBLIC static inline
+void
+Mem_space::disable_tlb(Cpu_number)
+{}
+
+PUBLIC static inline
+Cpu_mask
+Mem_space::active_tlb()
{
- unsigned c = current_cpu();
- bool x = _flush_tlb.get(c);
- if (x)
- _flush_tlb.atomic_get_and_clear(c);
- return x;
+ Cpu_mask c;
+ c.set(Cpu_number::boot_cpu());
+ return c;
}
-//---------------------------------------------------------------------------
-IMPLEMENTATION [!mp]:
-PRIVATE inline
-bool
-Mem_space::need_tlb_flush()
-{ return false; }
+// ----------------------------------------------------------
+IMPLEMENTATION [mp]:
+Cpu_mask Mem_space::_tlb_active;
+PUBLIC static inline
+Cpu_mask const &
+Mem_space::active_tlb()
+{ return _tlb_active; }
+PUBLIC static inline
+void
+Mem_space::enable_tlb(Cpu_number cpu)
+{ _tlb_active.atomic_set(cpu); }
+PUBLIC static inline
+void
+Mem_space::disable_tlb(Cpu_number cpu)
+{ _tlb_active.atomic_clear(cpu); }