include $(srcdir)/Makeconf
include $(MODULES_FILE)
+MODULES_FILES += $(srcdir)/Modules.generic
ifdef SUBSYSTEMS
_modules_read_ = true
VPATH += jdb/ia32 jdb/ia32/32 jdb/ux jdb
PRIVATE_INCDIR += kern/$(CONFIG_XARCH) kern/ia32/32 kern/ia32 kern
-KERNEL_EXTRA-$(CONFIG_UX_CON)) += ux_con
+KERNEL_EXTRA-$(CONFIG_UX_CON) += ux_con
KERNEL_EXTRA-$(CONFIG_UX_NET) += ux_net
KERNEL_EXTRA += $(KERNEL_EXTRA-y)
{
Irq *r;
putchar('\n');
- for (unsigned i = 0; i < Config::Max_num_irqs; ++i)
+ for (unsigned i = 0; i < Config::Max_num_dirqs; ++i)
{
r = static_cast<Irq*>(Irq_chip::hw_chip->irq(i));
if (!r)
Kobject_common *
Jdb_kobject_irq::follow_link(Kobject_common *o)
{
- Irq_sender *t = dcast<Irq_sender*>(Kobject::from_dbg(o->dbg_info()));
- if (!t || !t->owner() || (Smword)t->owner() == -1)
- return o;
-
- return Kobject::from_dbg(static_cast<Thread*>(t->owner())->dbg_info());
+ Irq_sender *t = Kobject::dcast<Irq_sender*>(o);
+ return t ? Kobject::pointer_to_obj(t->owner()) : 0;
}
PUBLIC
bool
Jdb_kobject_irq::show_kobject(Kobject_common *, int)
{ return true; }
-#if 0
- Thread *t = Kobject::dcast<Thread*>(o);
- return show(t, level);
-#endif
PUBLIC
int
Jdb_kobject_irq::show_kobject_short(char *buf, int max, Kobject_common *o)
{
- Irq *t = Kobject::dcast<Irq*>(o);
- Kobject_common *d = follow_link(o);
- int cnt = 0;
- return cnt + snprintf(buf, max, " I=%3lx %s L=%lx T=%lx F=%x",
- t->irq(), t->pin()->pin_type(), t->obj_id(),
- d ? d->dbg_info()->dbg_id() : 0, (unsigned)t->pin()->flags());
+ Irq_sender *t = Kobject::dcast<Irq_sender*>(o);
+ Kobject_common *w = follow_link(o);
+ return snprintf(buf, max, " I=%3lx %s L=%lx T=%lx F=%x",
+ t->irq(), t->pin()->pin_type(), t->obj_id(),
+ w ? w->dbg_info()->dbg_id() : 0,
+ (unsigned)t->pin()->flags());
}
static
return NOTHING;
}
printf("\nState of IO APIC\n");
- Io_apic::dump();
+ for (unsigned i = 0; i < Io_apic::Max_ioapics; ++i)
+ {
+ Io_apic *a = Io_apic::apic(i);
+ if (!a->valid())
+ break;
+
+ a->dump();
+ }
// print global LAPIC state
unsigned khz;
char time_str[12];
- putstr("thread: ");
+ putstr("thread : ");
Jdb_kobject::print_uid(t, 3);
print_thread_uid_raw(t);
- printf("CPU %3u ", t->cpu());
+ printf("\tCPU: %u ", t->cpu());
printf("\tprio: %02x mode: %s\n",
t->sched()->prio(),
t->mode() & Context::Periodic ?
t->mode() & Context::Nonstrict ? "Per (IRT)" : "Per (SP)" : "Con");
- printf("state: %03lx ", t->state(false));
+ printf("state : %03lx ", t->state(false));
Jdb_thread::print_state_long(t);
- putstr("\n\nwait for: ");
+ putstr("\nwait for: ");
if (!t->partner())
- putstr("--- ");
+ putstr("--- ");
else
Jdb_thread::print_partner(t, 4);
- putstr(" polling: ");
+ putstr(" polling: ");
Jdb_thread::print_snd_partner(t, 3);
- putstr("\trcv descr: ");
+ putstr("rcv descr: ");
if (t->state(false) & Thread_ipc_receiving_mask)
printf("%08lx", t->rcv_regs()->from_spec());
t->sched()->left(), t->sched()->quantum(), Config::char_micro);
print_kobject(t, t->_pager.raw());
- putstr("\ttask: ");
+ putstr("\ttask : ");
if (t->space() == Kernel_task::kernel_task())
putstr(" kernel ");
else
print_kobject(static_cast<Task*>(t->space()));
- putstr("\tutcb: ");
- printf("%08lx", (Mword)t->utcb().kern());
-
putstr("\nexc-hndl: ");
print_kobject(t, t->_exc_handler.raw());
+ printf("\tUTCB : %08lx/%08lx",
+ (Mword)t->utcb().kern(), (Mword)t->utcb().usr().get());
+
#if 0
putstr("\tready lnk: ");
if (t->state(false) & Thread_ready)
putchar('\n');
- putstr("vCPU st: ");
+ putstr("vCPU : ");
if (t->state(false) & Thread_vcpu_enabled)
{
char st1[7];
char st2[7];
Vcpu_state *v = t->vcpu_state().kern();
- printf("c=%s s=%s sf=%c e-ip=%08lx e-sp=%08lx S=",
+ printf("%08lx/%08lx S=", (Mword)v, (Mword)t->vcpu_state().usr().get());
+ print_kobject(static_cast<Task*>(t->vcpu_user_space()));
+ putchar('\n');
+ printf("vCPU : c=%s s=%s sf=%c e-ip=%08lx e-sp=%08lx\n",
vcpu_state_str(v->state, st1, sizeof(st1)),
vcpu_state_str(v->_saved_state, st2, sizeof(st2)),
(v->sticky_flags & Vcpu_state::Sf_irq_pending) ? 'P' : '-',
v->_entry_ip, v->_entry_sp);
- print_kobject(static_cast<Task*>(t->vcpu_user_space()));
}
else
- putstr("---");
- putchar('\n');
+ putstr("---\nvCPU : ---\n");
Address ksp = is_current_thread ? ef->ksp()
: (Address)t->get_kernel_sp();
Thread_busy |
Thread_rcvlong_in_progress))
&& (!t->partner()->id().is_irq() ||
- t->partner()->id().irq() > Config::Max_num_irqs))
+ t->partner()->id().irq() > Config::Max_num_dirqs))
{
t_current = static_cast<Thread*>(t->partner());
redraw = true;
enum
{
Scheduling_irq = 26,
- scheduler_irq_vector = Scheduling_irq,
- Max_num_irqs = 64,
Max_num_dirqs = 32,
-
- Vkey_irq = 27,
- Tbuf_irq = 28,
};
};
enum
{
Scheduling_irq = 26,
- scheduler_irq_vector = Scheduling_irq,
- Max_num_irqs = 66,
Max_num_dirqs = 64,
- Vkey_irq = 64,
- Tbuf_irq = 65,
};
};
enum
{
Scheduling_irq = 40,
- scheduler_irq_vector = Scheduling_irq,
- Max_num_irqs = 130,
Max_num_dirqs = 128,
- Vkey_irq = 128,
- Tbuf_irq = 129,
};
};
enum
{
Scheduling_irq = 6,
- scheduler_irq_vector = Scheduling_irq,
- Max_num_irqs = 50,
Max_num_dirqs = 48,
- Vkey_irq = 48,
- Tbuf_irq = 49,
};
};
public:
enum
{
- Max_num_irqs = 66,
Max_num_dirqs = 64,
-
- Vkey_irq = 64,
- Tbuf_irq = 65,
-
Scheduling_irq = 1,
- scheduler_irq_vector = Scheduling_irq,
};
};
enum
{
Scheduling_irq = 37,
- scheduler_irq_vector = Scheduling_irq,
- Max_num_irqs = 98,
Max_num_dirqs = 96,
-
- Vkey_irq = 96,
- Tbuf_irq = 97,
};
};
public:
enum
{
- Max_num_irqs = 258,
Max_num_dirqs = 256,
-
- Vkey_irq = 256,
- Tbuf_irq = 257,
};
};
enum
{
Scheduling_irq = 36,
- scheduler_irq_vector = Scheduling_irq,
};
};
enum
{
Scheduling_irq = 29,
- scheduler_irq_vector = Scheduling_irq,
};
};
enum
{
Scheduling_irq = 14,
- scheduler_irq_vector = Scheduling_irq,
- Max_num_irqs = 67,
Max_num_dirqs = 65,
- Vkey_irq = 65,
- Tbuf_irq = 66,
};
};
public:
enum
{
- Max_num_irqs = 258,
- Max_num_dirqs = 256,
-
- Vkey_irq = 256,
- Tbuf_irq = 257,
-
+ Max_num_dirqs = 160,
Scheduling_irq = 29,
- scheduler_irq_vector = Scheduling_irq,
};
};
(e & 0x00020000)?'r':'w');
}
-PUBLIC template<typename T> inline
+PUBLIC inline
void FIASCO_NORETURN
-Thread::fast_return_to_user(Mword ip, Mword sp, T arg)
+Thread::fast_return_to_user(Mword ip, Mword sp, Vcpu_state *arg)
{
extern char __iret[];
+ assert_kdb((regs()->psr & Proc::Status_mode_mask) == Proc::Status_mode_user);
+
regs()->ip(ip);
regs()->sp(sp); // user-sp is in lazy user state and thus handled by
// fill_user_state()
regs()->psr &= ~Proc::Status_thumb;
{
- register Mword r0 asm("r0") = (Mword)arg;
+ register Vcpu_state *r0 asm("r0") = arg;
asm volatile
("mov sp, %0 \t\n"
{
#if 0 // Double PF detect
static unsigned long last_pfa = ~0UL;
- LOG_MSG_3VAL(current(),"PF", pfa, last_pfa, pc);
- if (last_pfa == pfa)
+ LOG_MSG_3VAL(current(),"PF", pfa, error_code, pc);
+ if (last_pfa == pfa || pfa == 0)
kdb_ke("DBF");
last_pfa = pfa;
#endif
}
PUBLIC inline NEEDS["fpu.h"]
-void
+bool
Context::vcpu_enter_kernel_mode(Vcpu_state *vcpu)
{
if (EXPECT_FALSE(state() & Thread_vcpu_enabled))
Fpu::enable();
space()->switchin_context(vcpu_user_space());
+ return true;
}
}
}
+ return false;
}
#include "receiver.h"
#include "idt.h"
#include "irq.h"
+#include "vkey.h"
enum
{
IMPLEMENT
unsigned
Dirq_io_apic::Chip::nr_irqs() const
-{ return Io_apic::nr_irqs(); }
+{ return Io_apic::total_irqs(); }
IMPLEMENT
void
Dirq_io_apic::Chip::setup(Irq_base *irq, unsigned irqnum)
{
+ unsigned apic_idx = Io_apic::find_apic(irqnum);
+ irqnum -= Io_apic::apic(apic_idx)->gsi_offset();
+
//irq->pin()->set_mode(Default_mode);
if (irq->pin()->get_mode() & Irq::Trigger_level)
- irq->pin()->replace<Pin_io_apic_level>(irqnum);
+ irq->pin()->replace<Pin_io_apic_level>(apic_idx, irqnum);
else
- irq->pin()->replace<Pin_io_apic_edge>(irqnum);
+ irq->pin()->replace<Pin_io_apic_edge>(apic_idx, irqnum);
}
IMPLEMENT
if (!Dirq_pic_pin::Chip::alloc(irq, irqnum))
return false;
- Io_apic_entry e = Io_apic::apic()->read_entry(irqnum);
+
+ unsigned apic_idx = Io_apic::find_apic(irqnum);
+ Io_apic *a = Io_apic::apic(apic_idx);
+ unsigned lirqn = irqnum - a->gsi_offset();
+
+
+ Io_apic_entry e = a->read_entry(lirqn);
e.vector(vector(irqnum));
- Io_apic::apic()->write_entry(irqnum, e);
+ a->write_entry(lirqn, e);
return true;
}
: Io_apic_entry::High_active;
}
-class Pin_io_apic_level : public Dirq_pic_pin
+class Pin_io_apic_level : public Irq_pin
{
public:
- explicit Pin_io_apic_level(unsigned irq) : Dirq_pic_pin(irq) {}
+ explicit Pin_io_apic_level(unsigned apic, unsigned irq)
+ { payload()[0] = apic + (irq << 16); }
+
+ unsigned irq() const { return payload()[0] >> 16; }
+ unsigned apic_idx() const { return payload()[0] & 0xffff; }
+ Io_apic *apic() const { return Io_apic::apic(apic_idx()); }
+ unsigned gsi() const { return apic()->gsi_offset() + irq(); }
};
class Pin_io_apic_edge : public Pin_io_apic_level
{
public:
- explicit Pin_io_apic_edge(unsigned irq) : Pin_io_apic_level(irq) {}
+ explicit Pin_io_apic_edge(unsigned apic, unsigned irq)
+ : Pin_io_apic_level(apic, irq) {}
};
+PUBLIC
+bool
+Pin_io_apic_level::check_debug_irq()
+{
+ return !Vkey::check_(gsi());
+}
+PUBLIC
+void
+Pin_io_apic_level::unbind_irq()
+{
+ do_mask();
+ disable();
+ Irq_chip::hw_chip->free(Irq::self(this), gsi());
+ replace<Sw_irq_pin>();
+}
PUBLIC
void
Pin_io_apic_level::disable()
{
extern char entry_int_apic_ignore[];
- unsigned vector = this->vector();
+ unsigned vector = Dirq_pic_pin::Chip::vector(gsi());
+ Dirq_pic_pin::Chip::vfree(Irq_base::self(this), vector);
Idt::set_entry(vector, Address(&entry_int_apic_ignore), false);
- disable_vector();
}
PUBLIC void
PUBLIC void
Pin_io_apic_edge::do_set_mode(unsigned mode)
{
- Io_apic_entry e = Io_apic::apic()->read_entry(irq());
+ Io_apic_entry e = apic()->read_entry(irq());
e.polarity(to_io_apic_polarity(mode));
e.trigger(to_io_apic_trigger(mode));
- Io_apic::apic()->write_entry(irq(), e);
+ apic()->write_entry(irq(), e);
if (mode & Irq::Trigger_level)
- new (this) Pin_io_apic_level(irq());
+ new (this) Pin_io_apic_level(apic_idx(), irq());
}
Pin_io_apic_level::do_mask()
{
assert (cpu_lock.test());
- Io_apic::mask(irq());
- Io_apic::sync();
+ apic()->mask(irq());
+ apic()->sync();
}
{
assert (cpu_lock.test());
__mask();
- Io_apic::mask(irq());
- Io_apic::sync();
+ apic()->mask(irq());
+ apic()->sync();
Apic::irq_ack();
}
Pin_io_apic_level::do_unmask()
{
assert (cpu_lock.test());
- Io_apic::unmask(irq());
+ apic()->unmask(irq());
}
PUBLIC void
Pin_io_apic_level::set_cpu(unsigned cpu)
{
- Io_apic::set_dest(irq(), Cpu::cpus.cpu(cpu).phys_id());
+ apic()->set_dest(irq(), Cpu::cpus.cpu(cpu).phys_id());
}
PUBLIC void
Pin_io_apic_level::do_set_mode(unsigned mode)
{
- Io_apic_entry e = Io_apic::apic()->read_entry(irq());
+ Io_apic_entry e = apic()->read_entry(irq());
e.polarity(to_io_apic_polarity(mode));
e.trigger(to_io_apic_trigger(mode));
- Io_apic::apic()->write_entry(irq(), e);
+ apic()->write_entry(irq(), e);
if (!(mode & Irq::Trigger_level))
- new (this) Pin_io_apic_edge(irq());
+ new (this) Pin_io_apic_edge(apic_idx(), irq());
}
IMPLEMENT inline
unsigned Fpu::state_align()
-{ return 0; }
+{ return 1; }
IMPLEMENT
void Fpu::init(unsigned)
Address offs;
Address a = _acpi_hpet->base_address.addr;
- Kmem::map_phys_page(a, Mem_layout::Hpet_page, false, true, &offs);
+ Address va = Mem_layout::alloc_io_vmem(Config::PAGE_SIZE);
+ assert (va);
+
+ Kmem::map_phys_page(a, va, false, true, &offs);
Kip::k()->add_mem_region(Mem_desc(a, a + 1023, Mem_desc::Reserved));
- _hpet = (Hpet_device *)(Mem_layout::Hpet_page + offs);
+ _hpet = (Hpet_device *)(va + offs);
_hpet->dump();
slowtraps:
#ifdef CONFIG_PF_UX
# define REG_GS CPU_GS
+# define REG_FS CPU_FS
#else
# define REG_GS %gs
+# define REG_FS %fs
#endif
- pushl %fs /* we save the segment regs in the trap */
+ pushl REG_FS /* we save the segment regs in the trap */
pushl REG_GS /* state, but we do not restore them. We */
pushl %ds /* rather reload them using */
pushl %es /* RESET_{KERNEL,USER}_SEGMENTS */
addl $4*2,%esp /* Pop ds, es segment registers */
popl REG_GS
- popl %fs /* Restore segment registers */
+ popl REG_FS /* Restore segment registers */
popa
addl $4*2,%esp /* Pop trap number and error code */
iret
call thread_restore_exc_state
ESP_TO_TCB_AT %ecx
- mov OFS__THREAD__USER_VCPU(%ecx), %eax /* vcpu state poiner from TCB */
- mov OFS__THREAD__VCPU_STATE(%ecx), %ecx /* vcpu state poiner from TCB */
+ mov OFS__THREAD__USER_VCPU(%ecx), %eax /* vcpu state pointer from TCB */
+ mov OFS__THREAD__VCPU_STATE(%ecx), %ecx /* vcpu state pointer from TCB */
addl $(VAL__SIZEOF_TRAP_STATE - 20), %ecx /* skip return frame */
mov SCRATCH_REGISTER_SIZE(%esp), %edx
mov %edx, (%ecx) /* EIP */
/*add SCRATCH_REGISTER_SIZE, %esp*/
-#ifdef CONFIG_PF_UX
-# define REG_GS CPU_GS
-#else
-# define REG_GS %gs
-#endif
-
- pushl %fs /* we save the segment regs in the trap */
+ pushl REG_FS /* we save the segment regs in the trap */
pushl REG_GS /* state, but we do not restore them. We */
pushl %ds /* rather reload them using */
pushl %es /* RESET_{KERNEL,USER}_SEGMENTS */
//popl %es
//popl %ds
popl REG_GS
- popl %fs
+ popl REG_FS
popa
addl $(2*4), %esp
iret
Space_index = 0xea000000, ///< % 4MB
Service_page = 0xeac00000, ///< % 4MB global mappings
Local_apic_page = Service_page + 0x0000, ///< % 4KB
- Io_apic_page = Service_page + 0x1000,
Kmem_tmp_page_1 = Service_page + 0x2000, ///< % 4KB size 8KB
Kmem_tmp_page_2 = Service_page + 0x4000, ///< % 4KB size 8KB
Tbuf_status_page = Service_page + 0x6000, ///< % 4KB
Tbuf_ustatus_page = Tbuf_status_page,
- Hpet_page = Service_page + 0x7000, ///< % 4KB
Jdb_bench_page = Service_page + 0x8000, ///< % 4KB
Jdb_bts_area = Service_page + 0xf000, ///< % 4KB size 0x81000
Utcb_ptr_page = Service_page + 0xfd000, ///< % 4KB
Tbuf_buffer_area = Service_page + 0x200000, ///< % 2MB
Tbuf_ubuffer_area = Tbuf_buffer_area,
// 0xeb800000-0xec000000 (8MB) free
- __free_1 = 0xec000000, ///< % 4MB
- __free_2 = 0xec400000, ///< % 4MB
- __free_3 = 0xec800000, ///< % 4MB
+ Io_map_area_start = 0xec000000,
+ Io_map_area_end = 0xec800000,
__free_4 = 0xec880000, ///< % 4MB
Jdb_debug_start = 0xecc00000, ///< % 4MB JDB symbols/lines
Jdb_debug_end = 0xee000000, ///< % 4MB
{
assert_kdb(cpu_lock.test());
assert_kdb(current() == this);
+ assert_kdb(Config::Is_ux || (regs()->cs() & 3 == 3));
regs()->ip(ip);
regs()->sp(sp);
Space_index = 200,
Service_page = 0xffffffffeac00000UL, ///< % 4MB global mappings
Local_apic_page = Service_page + 0x0000, ///< % 4KB
- Io_apic_page = Service_page + 0x1000, ///< % 4KB
Kmem_tmp_page_1 = Service_page + 0x2000, ///< % 4KB size 8KB
Kmem_tmp_page_2 = Service_page + 0x4000, ///< % 4KB size 8KB
Tbuf_status_page = Service_page + 0x6000, ///< % 4KB
Tbuf_ustatus_page = Tbuf_status_page,
- Hpet_page = Service_page + 0x7000, ///< % 4KB
Jdb_bench_page = Service_page + 0x8000, ///< % 4KB
Utcb_ptr_page = Service_page + 0xfd000, ///< % 4KB
Utcb_ptr_offset = Utcb_ptr_page,
Tbuf_buffer_area = Service_page + 0x200000, ///< % 2MB
Tbuf_ubuffer_area = Tbuf_buffer_area,
// 0xffffffffeb800000-0xfffffffffec000000 (8MB) free
- ___free_1 = 0xffffffffec000000UL, ///< % 4MB
- ___free_2 = 0xffffffffec400000UL, ///< % 4MB
+ Io_map_area_start = 0xffffffffec000000UL,
+ Io_map_area_end = 0xffffffffec800000UL,
___free_3 = 0xffffffffec800000UL, ///< % 4MB
___free_4 = 0xffffffffec880000UL, ///< % 4MB
Jdb_debug_start = 0xffffffffecc00000UL, ///< % 4MB JDB symbols/lines
void FIASCO_NORETURN
Thread::fast_return_to_user(Mword ip, Mword sp, T arg)
{
+ assert_kdb(cpu_lock.test());
+ assert_kdb(current() == this);
+ assert_kdb(regs()->cs() & 3 == 3);
+
regs()->ip(ip);
regs()->sp(sp);
asm volatile
}
IMPLEMENT inline
-Mword
+Mword
Thread::user_sp() const
{ return exception_triggered()?_exc_cont.sp(regs()):regs()->sp(); }
// can access user memory directly
Access_user_mem = Access_user_mem_direct,
- Max_num_dirqs = 32,
-
- Tbuf_irq = Max_num_dirqs + 0,
-
- Max_num_irqs = Max_num_dirqs + 4,
+ Max_num_dirqs = 128,
/// Timer vector used with APIC timer or IOAPIC
Apic_timer_vector = APIC_IRQ_BASE + 0,
load_segments();
// update the global UTCB pointer to make the thread find its UTCB
- // using gs:[0]
+ // using fs:[0]
Mem_layout::user_utcb_ptr(current_cpu()) = utcb().usr();
}
return val;
}
-PUBLIC static inline
-Unsigned32
-Cpu::get_fs()
-{
- Unsigned32 val;
- asm volatile ("mov %%fs, %0" : "=rm" (val));
- return val;
-}
-
PUBLIC static inline
void
Cpu::set_ds(Unsigned32 val)
Cpu::set_es(Unsigned32 val)
{ asm volatile ("mov %0, %%es" : : "rm" (val)); }
-PUBLIC static inline
-void
-Cpu::set_fs(Unsigned32 val)
-{ asm volatile ("mov %0, %%fs" : : "rm" (val)); }
-
-
//----------------------------------------------------------------------------
IMPLEMENTATION[ia32, amd64]:
}
+PUBLIC static inline
+Unsigned32
+Cpu::get_fs()
+{ Unsigned32 val; asm volatile ("mov %%fs, %0" : "=rm" (val)); return val; }
+
PUBLIC static inline
Unsigned32
Cpu::get_gs()
{ Unsigned32 val; asm volatile ("mov %%gs, %0" : "=rm" (val)); return val; }
+PUBLIC static inline
+void
+Cpu::set_fs(Unsigned32 val)
+{ asm volatile ("mov %0, %%fs" : : "rm" (val)); }
+
PUBLIC static inline
void
Cpu::set_gs(Unsigned32 val)
unsigned nr_irqs() const { return 16; }
bool valloc(Irq_base *irq, unsigned vector);
- bool vfree(Irq_base *irq, unsigned vector);
+ static bool vfree(Irq_base *irq, unsigned vector);
virtual void disable_irq(unsigned vector);
protected:
//
// initialize interrupts
//
- if (!Io_apic::apic())
+ if (!Io_apic::active())
{
Irq_chip::hw_chip->reserve(2); // reserve cascade irq
Pic::enable_locked(2); // allow cascaded irqs
}
else
{
- if (!Io_apic::apic() && !user_irq0
+ if (!Io_apic::active() && !user_irq0
&& !Config::scheduler_mode == Config::SCHED_PIT)
Irq_chip::hw_chip->reserve(0); // reserve irq0 even though
}
Kmem::map_phys_page(Address phys, Address virt,
bool cached, bool global, Address *offs=0)
{
- Pdir::Iter i = kdir->walk(Virt_addr(virt));
+ Pdir::Iter i = kdir->walk(Virt_addr(virt), 100, Mapped_allocator::allocator()
+);
Pte_base *e = i.e;
Mword pte = phys & Config::PAGE_MASK;
{
public:
enum { Io_port_max = (1UL << 16) };
+
+ static Address _io_map_ptr;
};
IMPLEMENTATION [ia32 || amd64 || ux]:
#include "static_assert.h"
+Address Mem_layout::_io_map_ptr = Mem_layout::Io_map_area_end;
+
+PUBLIC static inline
+Address
+Mem_layout::alloc_io_vmem(unsigned long bytes)
+{
+ bytes = (bytes + Config::PAGE_SIZE - 1) & ~(Config::PAGE_SIZE - 1);
+ if (_io_map_ptr - bytes < Io_map_area_start)
+ return 0;
+
+ _io_map_ptr -= bytes;
+ return _io_map_ptr;
+}
+
PUBLIC static inline NEEDS["static_assert.h"]
template< typename V >
bool
{
unsigned const pic_pin
= Io_apic::legacy_override(Config::scheduler_irq_vector - Pic_base);
- Io_apic_entry e = Io_apic::apic()->read_entry(pic_pin);
+ // assume the legacy irqs are routet to IO-APIC 0
+ Io_apic_entry e = Io_apic::apic(0)->read_entry(pic_pin);
e.vector(Config::Apic_timer_vector);
- Io_apic::apic()->write_entry(pic_pin, e);
+ Io_apic::apic(0)->write_entry(pic_pin, e);
Config::scheduler_irq_vector = Config::Apic_timer_vector;
}
}
int r = do_resume_vcpu(ctxt, vcpu, vmcb_s);
- // test for error or non-IRQ exit resason
+ // test for error or non-IRQ exit reason
if (r <= 0)
return r;
// leave the kernel to not overwrite the vcpu-regs
// with bogus state.
Thread *t = nonull_static_cast<Thread*>(ctxt);
- if (t->exception_triggered())
- t->fast_return_to_user(vcpu->_entry_ip, vcpu->_entry_sp, t->vcpu_state().usr().get());
+
+ if (t->continuation_test_and_restore())
+ t->fast_return_to_user(vcpu->_entry_ip, vcpu->_entry_sp,
+ t->vcpu_state().usr().get());
}
}
int r = do_resume_vcpu(ctxt, vcpu, vmcs_s);
- // test for error or non-IRQ exit resason
+ // test for error or non-IRQ exit reason
if (r <= 0)
return r;
// leave the kernel to not overwrite the vcpu-regs
// with bogus state.
Thread *t = nonull_static_cast<Thread*>(ctxt);
- if (t->exception_triggered())
- t->fast_return_to_user(vcpu->_entry_ip, vcpu->_entry_sp, t->vcpu_state().usr().get());
+ if (t->continuation_test_and_restore())
+ t->fast_return_to_user(vcpu->_entry_ip, vcpu->_entry_sp,
+ t->vcpu_state().usr().get());
}
}
-
class Io_apic
{
-private:
- Unsigned32 volatile adr;
- Unsigned32 dummy[3];
- Unsigned32 volatile data;
+public:
+ enum { Max_ioapics = 6 };
- static Io_apic *_apic;
+private:
+ struct Apic
+ {
+ Unsigned32 volatile adr;
+ Unsigned32 dummy[3];
+ Unsigned32 volatile data;
+ } __attribute__((packed));
+
+ Apic *_apic;
+ Spin_lock<> _l;
+ unsigned _offset;
+
+ static Io_apic _apics[Max_ioapics];
+ static unsigned _nr_irqs;
static Acpi_madt const *_madt;
- static Spin_lock<> _l;
-} __attribute__((packed));
+};
IMPLEMENTATION:
#include "kip.h"
#include "lock_guard.h"
-Io_apic *Io_apic::_apic;
+Io_apic Io_apic::_apics[Io_apic::Max_ioapics];
Acpi_madt const *Io_apic::_madt;
-Spin_lock<> Io_apic::_l;
-
+unsigned Io_apic::_nr_irqs;
PRIVATE inline NEEDS["lock_guard.h"]
Io_apic::read(int reg)
{
Lock_guard<typeof(_l)> g(&_l);
- adr = reg;
+ _apic->adr = reg;
asm volatile ("": : :"memory");
- return data;
+ return _apic->data;
}
PRIVATE inline NEEDS["lock_guard.h"]
{
register Mword tmp;
Lock_guard<typeof(_l)> g(&_l);
- adr = reg;
+ _apic->adr = reg;
asm volatile ("": : :"memory");
- tmp = data;
+ tmp = _apic->data;
tmp &= ~del_bits;
tmp |= set_bits;
- data = tmp;
+ _apic->data = tmp;
}
PRIVATE inline NEEDS["lock_guard.h"]
Io_apic::write(int reg, Mword value)
{
Lock_guard<typeof(_l)> g(&_l);
- adr = reg;
+ _apic->adr = reg;
asm volatile ("": : :"memory");
- data = value;
+ _apic->data = value;
}
PRIVATE inline
}
printf("MADT = %p\n", _madt);
- Acpi_madt::Io_apic const *ioapic
- = static_cast<Acpi_madt::Io_apic const *>(_madt->find(Acpi_madt::IOAPIC, 0));
+ int n_apics = 0;
- if (!ioapic)
+ for (n_apics = 0;
+ Acpi_madt::Io_apic const *ioapic = static_cast<Acpi_madt::Io_apic const *>(_madt->find(Acpi_madt::IOAPIC, n_apics));
+ ++n_apics)
+ {
+
+ if (n_apics >= Max_ioapics)
+ {
+ printf("Maximum number of IO-APICs exceeded ignore further IO-APICs\n");
+ break;
+ }
+
+ printf("IO-APIC[%2d]: struct: %p adr=%x\n", n_apics, ioapic, ioapic->adr);
+
+ Address offs;
+ Address va = Mem_layout::alloc_io_vmem(Config::PAGE_SIZE);
+ assert (va);
+
+ Kmem::map_phys_page(ioapic->adr, va, false, true, &offs);
+
+ Kip::k()->add_mem_region(Mem_desc(ioapic->adr, ioapic->adr + Config::PAGE_SIZE -1, Mem_desc::Reserved));
+
+
+ Io_apic *apic = Io_apic::apic(n_apics);
+ apic->_apic = (Io_apic::Apic*)(va + offs);
+ apic->write(0, 0);
+ unsigned ne = apic->num_entries();
+ apic->_offset = _nr_irqs;
+ _nr_irqs += ne + 1;
+
+ for (unsigned i = 0; i <= ne; ++i)
+ {
+ int v = 0x20+i;
+ Io_apic_entry e(v, Io_apic_entry::Fixed, Io_apic_entry::Physical,
+ Io_apic_entry::High_active, Io_apic_entry::Edge, 0);
+ apic->write_entry(i, e);
+ }
+
+ printf("IO-APIC[%2d]: pins %u\n", n_apics, ne);
+ apic->dump();
+ }
+
+ if (!n_apics)
{
printf("IO-APIC: Could not find IO-APIC in MADT, skip init\n");
return false;
}
- printf("IO-APIC: struct: %p adr=%x\n", ioapic, ioapic->adr);
+
printf("IO-APIC: dual 8259: %s\n", _madt->apic_flags & 1 ? "yes" : "no");
- unsigned tmp = 0;
- for (;;++tmp)
+ for (unsigned tmp = 0;;++tmp)
{
Acpi_madt::Irq_source const *irq
= static_cast<Acpi_madt::Irq_source const *>(_madt->find(Acpi_madt::Irq_src_ovr, tmp));
printf("IO-APIC: ovr[%2u] %02x -> %x\n", tmp, irq->src, irq->irq);
}
- if (tmp)
- printf("IO-APIC: NOTE IRQ overrides are ignored!\n");
-
- Address offs;
- Kmem::map_phys_page(ioapic->adr, Mem_layout::Io_apic_page,
- false, true, &offs);
-
- Kip::k()->add_mem_region(Mem_desc(ioapic->adr, ioapic->adr + Config::PAGE_SIZE -1, Mem_desc::Reserved));
-
-
- Io_apic *apic = (Io_apic*)(Mem_layout::Io_apic_page + offs);
- _apic = apic;
- apic->write(0, 0);
- unsigned ne = apic->num_entries();
-
- for (unsigned i = 0; i <= ne; ++i)
- {
- int v = 0x20+i;
- Io_apic_entry e(v, Io_apic_entry::Fixed, Io_apic_entry::Physical,
- Io_apic_entry::High_active, Io_apic_entry::Edge, 0);
- apic->write_entry(i, e);
- }
-
- printf("IO-APIC: pins %u\n", ne);
- dump();
-
return true;
};
+PUBLIC static
+unsigned
+Io_apic::total_irqs()
+{ return _nr_irqs; }
+
PUBLIC static
unsigned
Io_apic::legacy_override(unsigned i)
return i;
}
-PUBLIC static
+PUBLIC
void
Io_apic::dump()
{
- unsigned ne = _apic->num_entries();
+ unsigned ne = num_entries();
for (unsigned i = 0; i <= ne; ++i)
{
- Io_apic_entry e = _apic->read_entry(i);
+ Io_apic_entry e = read_entry(i);
printf(" PIN[%2u%c]: vector=%2x, del=%u, dm=%s, dest=%u (%s, %s)\n",
i, e.mask() ? 'm' : '.',
e.vector(), e.delivery(), e.dest_mode() ? "logical" : "physical",
PUBLIC static inline
bool
Io_apic::active()
-{ return _apic; }
+{ return _apics[0]._apic; }
-PUBLIC static inline NEEDS["kdb_ke.h", Io_apic::modify]
+PUBLIC inline
+bool
+Io_apic::valid() const { return _apic; }
+
+PUBLIC inline NEEDS["kdb_ke.h", Io_apic::modify]
void
Io_apic::mask(unsigned irq)
{
//assert_kdb(irq <= _apic->num_entries());
- _apic->modify(0x10 + irq * 2, 1UL << 16, 0);
+ modify(0x10 + irq * 2, 1UL << 16, 0);
}
-PUBLIC static inline NEEDS["kdb_ke.h", Io_apic::modify]
+PUBLIC inline NEEDS["kdb_ke.h", Io_apic::modify]
void
Io_apic::unmask(unsigned irq)
{
//assert_kdb(irq <= _apic->num_entries());
- _apic->modify(0x10 + irq * 2, 0, 1UL << 16);
+ modify(0x10 + irq * 2, 0, 1UL << 16);
}
-PUBLIC static inline NEEDS["kdb_ke.h", Io_apic::read]
+PUBLIC inline NEEDS["kdb_ke.h", Io_apic::read]
bool
Io_apic::masked(unsigned irq)
{
//assert_kdb(irq <= _apic->num_entries());
- return _apic->read(0x10 + irq * 2) & (1UL << 16);
+ return read(0x10 + irq * 2) & (1UL << 16);
}
-PUBLIC static inline NEEDS[Io_apic::read]
+PUBLIC inline NEEDS[Io_apic::read]
void
Io_apic::sync()
{
(void)_apic->data;
}
-PUBLIC static inline NEEDS["kdb_ke.h", Io_apic::modify]
+PUBLIC inline NEEDS["kdb_ke.h", Io_apic::modify]
void
Io_apic::set_dest(unsigned irq, Mword dst)
{
//assert_kdb(irq <= _apic->num_entries());
- _apic->modify(0x11 + irq * 2, dst & (~0UL << 24), ~0UL << 24);
+ modify(0x11 + irq * 2, dst & (~0UL << 24), ~0UL << 24);
}
-PUBLIC static inline NEEDS[Io_apic::num_entries]
+PUBLIC inline NEEDS[Io_apic::num_entries]
unsigned
Io_apic::nr_irqs()
-{ return _apic->num_entries() + 1; }
+{ return num_entries() + 1; }
+
+PUBLIC inline
+unsigned
+Io_apic::gsi_offset() const { return _offset; }
PUBLIC static inline
Io_apic *
-Io_apic::apic()
-{ return _apic; }
+Io_apic::apic(unsigned idx)
+{ return &_apics[idx]; }
+
+PUBLIC static
+unsigned
+Io_apic::find_apic(unsigned irqnum)
+{
+ for (unsigned i = Max_ioapics; i > 0; --i)
+ {
+ if (_apics[i-1]._apic && _apics[i-1]._offset < irqnum)
+ return i - 1;
+ }
+ return 0;
+};
return commit_result(-L4_err::EInval);
irq->pin()->unbind_irq();
-
+
if (!c->alloc(irq, irqnum & ~Msi_bit))
return commit_result(-L4_err::EPerm);
public:
enum
{
-
Max_num_dirqs = Pic::IRQ_MAX,
- Vkey_irq = Max_num_dirqs,
- Tbuf_irq = Max_num_dirqs + 1,
- Max_num_irqs = Max_num_dirqs + 2,
};
};
enum
{
Max_num_dirqs = 3,
- Vkey_irq = Max_num_dirqs,
- Tbuf_irq = Max_num_dirqs + 1,
- Max_num_irqs = Max_num_dirqs + 2,
};
};
Thread::fast_return_to_user(Mword ip, Mword sp, T arg)
{
(void)ip; (void)sp; (void)arg;
+ //assert_kdb(check that exiting privs are user privs);
// XXX: UNIMPLEMENTED
panic("__builtin_trap()");
}
void
sys_ipc_wrapper()
{
- // FIXME: use UTCB from user, not the kernel var (remove access utcb
assert_kdb (!(current()->state() & Thread_drq_ready));
Thread *curr = current_thread();
Obj_cap obj = f->ref();
Utcb *utcb = curr->utcb().access(true);
- // printf("sys_invoke_object(f=%p, obj=%x)\n", f, f->obj_ref());
unsigned char rights;
Kobject_iface *o = obj.deref(&rights);
L4_msg_tag e;
#if defined(CONFIG_ARM)
DUMP_OFFSET (THREAD, UTCB_SIZE, sizeof(Utcb))
#else
- DUMP_MEMBER1 (THREAD, Context, _vcpu_state, VCPU_STATE)
+ DUMP_MEMBER1 (THREAD, Context, _vcpu_state._k, VCPU_STATE)
#endif
#if 0
DUMP_MEMBER1 (SCHED_CONTEXT, Sched_context,_owner, OWNER)
if (_exc_cont.valid())
return 1;
- vcpu_enter_kernel_mode(vcpu);
+ if (vcpu_enter_kernel_mode(vcpu))
+ {
+ // enter_kernel_mode has switched the address space from user to
+ // kernel space, so reevaluate the address of the VCPU state area
+ vcpu = vcpu_state().access();
+ }
+
spill_user_state();
LOG_TRACE("VCPU events", "vcpu", this, __context_vcpu_log_fmt,
Vcpu_log *l = tbe->payload<Vcpu_log>();
Thread::exception_triggered() const
{ return _exc_cont.valid(); }
+PUBLIC inline
+bool
+Thread::continuation_test_and_restore()
+{
+ bool v = _exc_cont.valid();
+ if (v)
+ _exc_cont.restore(regs());
+ return v;
+}
+
//
// state requests/manipulation
//
enum {
Scheduling_irq = 0,
Max_num_dirqs = 16,
- Max_num_irqs = Max_num_dirqs + 4,
- Tbuf_irq = Max_num_dirqs + 1,
Is_ux = 1,
};
};
static Tss *tss asm ("CPU_TSS");
static int msr_dev;
static unsigned long _gs asm ("CPU_GS");
+ static unsigned long _fs asm ("CPU_FS");
static Mword _kern_ds asm ("KERN_DS");
static Mword _kern_es asm ("KERN_ES");
Proc::Status volatile Proc::virtual_processor_state = 0;
Tss *Cpu::tss;
int Cpu::msr_dev = -1;
-unsigned long Cpu::_gs; // XXX percpu
+unsigned long Cpu::_gs; // for mp: percpu
+unsigned long Cpu::_fs; // for mp: percpu
unsigned long Cpu::_kern_ds;
unsigned long Cpu::_kern_es;
Cpu::debugctl_disable()
{}
+PUBLIC static inline
+void
+Cpu::set_fs(Unsigned32 val)
+{ _fs = val; }
+
PUBLIC static inline
void
Cpu::set_gs(Unsigned32 val)
{ _gs = val; }
+PUBLIC static inline
+Unsigned32
+Cpu::get_fs()
+{ return _fs; }
+
PUBLIC static inline
Unsigned32
Cpu::get_gs()
Caps_end = 0x5f000000,
Idt = 0x5f001000,
Tbuf_status_page = 0x5f002000, ///< % 4KB for jdb_tbuf
+ Io_map_area_start = 0x5f003000,
+ Io_map_area_end = 0x5f006000,
Tbuf_buffer_area = 0x5f200000, ///< % 2MB tracebuffer
Io_bitmap = 0x5f800000, ///< % 4MB dummy
_Free_1 = 0x5fc00000, ///< % 4MB dummy
Caps_end = 0x2f000000,
Idt = 0x2f001000,
Tbuf_status_page = 0x2f002000, ///< % 4KB for jdb_tbuf
+ Io_map_area_start = 0x2f003000,
+ Io_map_area_end = 0x2f006000,
Tbuf_buffer_area = 0x2f200000, ///< % 2MB tracebuffer
Io_bitmap = 0x2f800000, ///< % 4MB dummy
Glibc_mmap_start = 0x40000000, ///< fixed, Linux kernel spec.
" Fixing up!\n");
regs.xds = Cpu::kern_ds();
regs.xes = Cpu::kern_es();
- regs.xfs = 0;
+ regs.xgs = 0;
check(ptrace (PTRACE_SETREGS, pid, NULL, ®s));
}
else
# files or any other data file you might want to load. Note that the bin and
# lib directories of the build-tree are automatically added to the search
# path.
-# MODULES_SEARCH_PATH = /path/to/cfgs:/path/to/foo:..
+# MODULE_SEARCH_PATH = /path/to/cfgs:/path/to/foo:..
#
# 'image' just builds what's configured:
# - default: just ELF
static void show_progress(int done, int len)
{
- int r = printf("%d%%", (done * 100) / len);
+ int r = printf("%lld%%", ((unsigned long long)done * 100) / len);
while (r-- > 0)
putchar('\b');
fflush(NULL);
if (!compressed_file)
return start;
- printf(" Uncompressing %s from %p to %p (%d to %d bytes, %+d%%).\n",
+ printf(" Uncompressing %s from %p to %p (%d to %d bytes, %+lld%%).\n",
name, start, destbuf, size, size_uncompressed,
- 100*size_uncompressed/size - 100);
+ 100*(unsigned long long)size_uncompressed/size - 100);
// Add 10 to detect too short given size
if ((read_size = grub_read(destbuf, size_uncompressed + 10))
#if defined(ARCH_x86) || defined(ARCH_amd64)
-static unsigned long gs;
+static unsigned long fs;
static unsigned long ds;
asm
static void setup_user_state_arch(L4vcpu::Vcpu *v)
{
- asm volatile ("mov %%gs, %0" : "=r"(gs));
+ asm volatile ("mov %%fs, %0" : "=r"(fs));
asm volatile ("mov %%ds, %0" : "=r"(ds));
#ifndef __amd64__
v->r()->gs = ds;
{
asm volatile ("mov %0, %%es \t\n"
"mov %0, %%ds \t\n"
- "mov %1, %%gs \t\n"
- : : "r"(ds), "r"(gs));
+ "mov %1, %%fs \t\n"
+ : : "r"(ds), "r"(fs));
}
#elif defined(ARCH_arm)
L4Re::Env::env()->factory());
enum {
- Max_search_paths = 10,
+ Max_search_paths = 20,
};
static char *search_paths[Max_search_paths];
int
main(int argc, char *argv[])
{
- setup(argc, argv);
try
{
+ setup(argc, argv);
+
int err = mount("/", fprov_prefix, "fuxfs", 0, 0);
if (err == -1)
{
l4_umword_t msg = 0;
if (l4_error(system_icu()->icu->msi_info(msi, &msg)) < 0)
{
- printf("WARNING: could not get MSI message, use noremal IRQ\n");
+ printf("WARNING: could not get MSI message, use normal IRQ\n");
continue;
}
/* ignored */
return;
}
+ else if (ev->type == EV_SYN)
+ {
+ /* Pass through */
+ }
else
{
printf("handle_event: Unknown event type %d\n", ev->type);
typename RM::Region_handler::Ops::Map_result result;
if (int err = n->second.map(addr, n->first, writable, &result))
{
- Dbg(Dbg::Warn, "rm").printf("mapping for pf failed with %d @%lx pc=%lx\n",
+ Dbg(Dbg::Warn, "rm").printf("mapping for page fault failed with %d @%lx pc=%lx\n",
err, addr, pc);
// generate exception
ios << (l4_umword_t)~0;
* for the current thread.
* \param tag Tag to use, returned by l4_thread_vcpu_resume_start()
*
- * \return System call result message tag.
+ * \return System call result message tag. In extended vCPU mode and when
+ * the virtual interrupts are cleared, the return code 1 flags an incoming
+ * IPC message, whereas 0 indicates a VM exit.
*
* To resume into another address space the capability to the target task
* must be set in the vCPU-state (\see l4_vcpu_state_t). The task needs
endif
STDCXX_CONTRIB_DIR = $(STDCXX_PKG_DIR)/contrib/libstdc++-v3-$(STDCXX_CONTRIB_VERSION)
+DEFINES += -Dlinux
CPU_H_x86 := i386
CPU_H_ppc32 := powerpc
/**
* \brief Find a device by the HID APCI conforming or L4Io static name.
*
- * \param vbus capability of the system bus
- * \param parent handle to the parent to start the search
- * \retval child handle to the found device
- * \param hid HID name of the device
+ * \param vbus Capability of the system bus
+ * \param parent Handle to the parent to start the search
+ * \retval child Handle to the found device
+ * \param hid HID name of the device
* \retval devinfo Device information structure (might be NULL)
* \retval reshandle Resource handle (might be NULL)
*
/**
* \brief Find next child following \a child.
*
- * \param vbus capability of the system bus
- * \param parent handle to the parent device (use 0 for the system bus)
- * \param child handle to the child device (use 0 to get the first
+ * \param vbus Capability of the system bus
+ * \param parent Handle to the parent device (use 0 for the system bus)
+ * \param child Handle to the child device (use 0 to get the first
* child)
- * \retval next handle to the successor of child
+ * \retval next Handle to the successor of child
* \retval devinfo device information (might be NULL)
*
* \return 0 on succes, else failure
/**
* \brief Iterate over the resources of a device
*
- * \param vbus capability of the system bus
- * \param dev handle of the device
- * \retval res_idx Index of the resource, the number of resources is
- * availabnle in the devinfo from get device functions.
- * \retval res descriptor of the resource
+ * \param vbus Capability of the system bus
+ * \param dev Handle of the device
+ * \retval res_idx Index of the resource, the number of resources is
+ * availabnle in the devinfo from get device functions.
+ * \retval res Descriptor of the resource
*
* \return 0 on succes, else failure
*/
/**
* \brief Request a resource of a specific type
*
- * \param vbus capability of the system bus
- * \param res descriptor of the resource
- * \param flags optional flags
+ * \param vbus Capability of the system bus
+ * \param res Descriptor of the resource
+ * \param flags Optional flags
*
* \return 0 on succes, else failure
*
/**
* \brief Release a previously requested resource
*
- * \param vbus capability of the system bus
- * \param res descriptor of the resource
+ * \param vbus Capability of the system bus.
+ * \param res Descriptor of the resource.
*
* \return 0 on succes, else failure
*/
int L4_CV
l4vbus_release_resource(l4_cap_idx_t vbus, l4vbus_resource_t *res);
+/**
+ * \brief Get capability of ICU.
+ *
+ * \param vbus Capability of the system bus.
+ * \param icu ICU device handle.
+ * \param cap Capability slot for the capability.
+ *
+ * \return 0 on succes, else failure
+ */
int L4_CV
l4vbus_vicu_get_cap(l4_cap_idx_t vbus, l4vbus_device_handle_t icu,
- l4_cap_idx_t res);
+ l4_cap_idx_t cap);
__END_DECLS
typedef l4_addr_t l4vbus_paddr_t;
typedef struct {
- l4_uint16_t type; /**< resource type */
- l4_uint16_t flags;
- l4vbus_paddr_t start; /**< start of res. range */
+ l4_uint16_t type; /**< Resource type, see l4vbus_resource_type_t */
+ l4_uint16_t flags; /**< Flags */
+ l4vbus_paddr_t start; /**< Start of res. range */
l4vbus_paddr_t end; /**< (inclusive) end of res. range */
} l4vbus_resource_t;
L4VBUS_RESOURCE_MAX, /**< Maximum resource id */
};
-enum {
+enum l4vbus_consts_t {
L4VBUS_DEV_NAME_LEN = 64,
L4VBUS_MAX_DEPTH = 100,
};
typedef struct {
- int type; /**< type */
- char name[L4VBUS_DEV_NAME_LEN]; /**< name */
- unsigned num_resources; /**< resources count */
- unsigned flags;
+ int type; /**< Type */
+ char name[L4VBUS_DEV_NAME_LEN]; /**< Name */
+ unsigned num_resources; /**< Resources count */
+ unsigned flags; /**< Flags */
} l4vbus_device_t;
-enum
-{
+enum l4vbus_device_flags_t {
L4VBUS_DEVICE_F_CHILDREN = 0x10,
};
{
public:
State() {}
+
+ /**
+ * \brief Initialize state.
+ *
+ * \param v Initial state.
+ */
explicit State(l4vcpu_state_t v) : _s(v) {}
/**
class Vcpu : private l4_vcpu_state_t
{
public:
+ /**
+ * \brief IRQ status type.
+ */
typedef l4vcpu_irq_state_t Irq_state;
/**
/**
* \brief Enable the vCPU for event delivery.
*
+ * \param utcb The UTCB to use.
* \param do_event_work_cb Call-back function that is called in case an
* event (such as an interrupt) is pending.
+ * \param setup_ipc Call-back function that is called before an
+ * IPC operation is called.
*/
void irq_enable(l4_utcb_t *utcb, l4vcpu_event_hndl_t do_event_work_cb,
l4vcpu_setup_ipc_t setup_ipc) throw()
* \brief Restore a previously saved IRQ/event state.
*
* \param s IRQ state to be restored.
+ * \param utcb The UTCB to use.
* \param do_event_work_cb Call-back function that is called in case an
* event (such as an interrupt) is pending.
+ * \param setup_ipc Call-back function that is called before an
+ * IPC operation is called.
*/
void irq_restore(Irq_state s, l4_utcb_t *utcb,
l4vcpu_event_hndl_t do_event_work_cb,
l4vcpu_setup_ipc_t setup_ipc) throw()
{ l4vcpu_irq_restore(this, s, utcb, do_event_work_cb, setup_ipc); }
+ /**
+ * \brief Halt/block the vCPU.
+ *
+ * \param utcb The UTCB to use.
+ * \param do_event_work_cb Call-back function that is called in case an
+ * event (such as an interrupt) is pending.
+ * \param setup_ipc Call-back function that is called before an
+ * IPC operation is called.
+ */
void halt(l4_utcb_t *utcb, l4vcpu_event_hndl_t do_event_work_cb,
l4vcpu_setup_ipc_t setup_ipc) throw()
{ l4vcpu_halt(this, utcb, do_event_work_cb, setup_ipc); }
/**
* \brief Set vCPU entry instruction pointer.
- * \param sp Instruction pointer address to set.
+ * \param ip Instruction pointer address to set.
*/
void entry_ip(l4_umword_t ip)
{ l4_vcpu_state_t::entry_ip = ip; }
l4vcpu_print_state(l4_vcpu_state_t *vcpu,
const char *prefix) L4_NOTHROW
{
- printf("%svcpu=%p state=%lx savedstate=%lx label=%lx\n",
+ printf("%svcpu=%p state=%x savedstate=%x label=%lx\n",
prefix, vcpu, vcpu->state, vcpu->saved_state, vcpu->i.label);
- printf("%ssticky=%lx user_task=%lx\n",
+ printf("%ssticky=%x user_task=%lx\n",
prefix, vcpu->sticky_flags, vcpu->user_task << L4_CAP_SHIFT);
printf("%sentry_sp=%lx entry_ip=%lx\n",
prefix, vcpu->entry_sp, vcpu->entry_ip);
L4_CV int
l4vcpu_ext_alloc(l4_vcpu_state_t **vcpu, l4_addr_t *ext_state,
- l4_cap_idx_t task, l4_cap_idx_t regmgr) L4_NOTHROW
+ l4_cap_idx_t task, l4_cap_idx_t regmgr) L4_NOTHROW
{
L4::Cap<L4::Task> t(task);
L4::Cap<L4Re::Rm> r(regmgr);