3 * Specific code for I/O port protection
5 INTERFACE[(ia32|amd64) & io & debug]:
10 extern Static_object<Mapdb> mapdb_io;
12 IMPLEMENTATION[(ia32|amd64) & io]:
19 Static_object<Mapdb> mapdb_io;
21 void init_mapdb_io(Space *sigma0)
23 static size_t const io_page_sizes[] =
24 {Io_space::Map_superpage_shift, 9, Io_space::Page_shift};
26 mapdb_io.construct(sigma0, Page_number::create(0x10000 >> io_page_sizes[0]), io_page_sizes, 3);
29 /** Map the IO port region described by "fp_from" of address space "from"
30 into address space "to". IO ports can only be mapped idempotently,
31 therefore there is no offset for fp_from and only those ports are mapped
32 that lay in the intersection of fp_from and fp_to
33 @param from source address space
34 @param fp_from... IO flexpage descripton for IO space range
36 @param to destination address space
37 @param fp_to... IO flexpage description for IO space range
38 in destination IO space
39 @return IPC error code that describes the status of the operation
41 L4_error __attribute__((nonnull(1, 3)))
42 io_map(Space *from, L4_fpage const &fp_from,
43 Space *to, L4_fpage const &fp_to, L4_msg_item control)
45 /* printf("io_map %u -> %u "
46 * "snd %08x base %x size %x rcv %08x base %x size %x\n",
47 * (unsigned)from->space(), (unsigned)to->space(),
49 * fp_from.iofp.iopage, fp_from.iofp.iosize,
51 * fp_to.iofp.iopage, fp_to.iofp.iosize);
52 * kdb_ke("io_fpage_map 1");
55 typedef Map_traits<Io_space> Mt;
56 Mt::Addr rcv_pos = Mt::get_addr(fp_to);
57 Mt::Addr snd_pos = Mt::get_addr(fp_from);
59 Mt::Size rcv_size = Mt::Size::from_shift(fp_to.order());
60 Mt::Size snd_size = Mt::Size::from_shift(fp_from.order());
62 snd_pos = snd_pos.trunc(snd_size);
63 rcv_pos = rcv_pos.trunc(rcv_size);
64 Mt::constraint(snd_pos, snd_size, rcv_pos, rcv_size, Mt::Addr(0));
67 return L4_error::None;
69 //assert(snd_pos < L4_fpage::Io_port_max);
71 unsigned long del_attribs, add_attribs;
72 Mt::attribs(control, fp_from, &del_attribs, &add_attribs);
74 return map<Io_space>(mapdb_io.get(), from, from, snd_pos,
77 control.is_grant(), add_attribs, del_attribs,
78 (Io_space::Reap_list**)0);
81 /** Unmap IO mappings.
82 Unmap the region described by "fp" from the IO
83 space "space" and/or the IO spaces the mappings have been
85 XXX not implemented yet
86 @param space address space that should be flushed
87 @param fp IO flexpage descriptor of IO-space range that should
89 @param me_too If false, only flush recursive mappings. If true,
90 additionally flush the region in the given address space.
91 @return true if successful
93 unsigned __attribute__((nonnull(1)))
94 io_fpage_unmap(Space *space, L4_fpage fp, L4_map_mask mask)
96 typedef Map_traits<Io_space> Mt;
97 Mt::Size size = Mt::Size::from_shift(fp.order());
98 Mt::Addr port = Mt::get_addr(fp);
99 port = port.trunc(size);
101 // Here we _would_ reset IOPL to 0 but this doesn't make much sense
102 // for only one thread since this thread may have forwarded the right
103 // to other threads too. Therefore we had to walk through any thread
106 // current()->regs()->eflags &= ~EFLAGS_IOPL;
108 return unmap<Io_space>(mapdb_io.get(), space, space,
110 fp.rights(), mask, (Io_space::Reap_list**)0);
115 save_access_attribs(Mapdb* /*mapdb*/, const Mapdb::Frame& /*mapdb_frame*/,
116 Mapping* /*mapping*/, Io_space* /*space*/,
117 unsigned /*page_rights*/,
118 Io_space::Addr /*virt*/, Io_space::Phys_addr /*phys*/,
119 Io_space::Size /*size*/,