2 * Jailhouse, a Linux-based partitioning hypervisor
4 * Copyright (c) Valentine Sinitsyn, 2014, 2015
5 * Copyright (c) Siemens AG, 2016
8 * Valentine Sinitsyn <valentine.sinitsyn@gmail.com>
9 * Jan Kiszka <jan.kiszka@siemens.com>
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
15 #include <jailhouse/cell.h>
16 #include <jailhouse/cell-config.h>
17 #include <jailhouse/control.h>
18 #include <jailhouse/mmio.h>
19 #include <jailhouse/pci.h>
20 #include <jailhouse/printk.h>
21 #include <jailhouse/string.h>
22 #include <asm/amd_iommu.h>
24 #include <asm/iommu.h>
26 #define CAPS_IOMMU_HEADER_REG 0x00
27 #define CAPS_IOMMU_EFR_SUP (1 << 27)
28 #define CAPS_IOMMU_BASE_LOW_REG 0x04
29 #define CAPS_IOMMU_ENABLE (1 << 0)
30 #define CAPS_IOMMU_BASE_HI_REG 0x08
32 #define ACPI_REPORTING_HE_SUP (1 << 7)
34 #define AMD_DEV_TABLE_BASE_REG 0x0000
35 #define AMD_CMD_BUF_BASE_REG 0x0008
36 #define AMD_EVT_LOG_BASE_REG 0x0010
37 #define AMD_CONTROL_REG 0x0018
38 #define AMD_CONTROL_IOMMU_EN (1UL << 0)
39 #define AMD_CONTROL_EVT_LOG_EN (1UL << 2)
40 #define AMD_CONTROL_EVT_INT_EN (1UL << 3)
41 #define AMD_CONTROL_COMM_WAIT_INT_EN (1UL << 4)
42 #define AMD_CONTROL_CMD_BUF_EN (1UL << 12)
43 #define AMD_CONTROL_SMIF_EN (1UL << 22)
44 #define AMD_CONTROL_SMIFLOG_EN (1UL << 24)
45 #define AMD_CONTROL_SEG_EN_MASK BIT_MASK(36, 34)
46 #define AMD_CONTROL_SEG_EN_SHIFT 34
47 #define AMD_EXT_FEATURES_REG 0x0030
48 #define AMD_EXT_FEAT_HE_SUP (1UL << 7)
49 #define AMD_EXT_FEAT_SMI_FSUP_MASK BIT_MASK(17, 16)
50 #define AMD_EXT_FEAT_SMI_FSUP_SHIFT 16
51 #define AMD_EXT_FEAT_SMI_FRC_MASK BIT_MASK(20, 18)
52 #define AMD_EXT_FEAT_SMI_FRC_SHIFT 18
53 #define AMD_EXT_FEAT_SEG_SUP_MASK BIT_MASK(39, 38)
54 #define AMD_EXT_FEAT_SEG_SUP_SHIFT 38
55 #define AMD_HEV_UPPER_REG 0x0040
56 #define AMD_HEV_LOWER_REG 0x0048
57 #define AMD_HEV_STATUS_REG 0x0050
58 #define AMD_HEV_VALID (1UL << 1)
59 #define AMD_HEV_OVERFLOW (1UL << 2)
60 #define AMD_SMI_FILTER0_REG 0x0060
61 #define AMD_SMI_FILTER_VALID (1UL << 16)
62 #define AMD_SMI_FILTER_LOCKED (1UL << 17)
63 #define AMD_DEV_TABLE_SEG1_REG 0x0100
64 #define AMD_CMD_BUF_HEAD_REG 0x2000
65 #define AMD_CMD_BUF_TAIL_REG 0x2008
66 #define AMD_EVT_LOG_HEAD_REG 0x2010
67 #define AMD_EVT_LOG_TAIL_REG 0x2018
68 #define AMD_STATUS_REG 0x2020
69 # define AMD_STATUS_EVT_OVERFLOW (1UL << 0)
70 # define AMD_STATUS_EVT_LOG_INT (1UL << 1)
71 # define AMD_STATUS_EVT_LOG_RUN (1UL << 3)
73 struct dev_table_entry {
75 } __attribute__((packed));
77 #define DTE_VALID (1UL << 0)
78 #define DTE_TRANSLATION_VALID (1UL << 1)
79 #define DTE_PAGING_MODE_4_LEVEL (4UL << 9)
80 #define DTE_IR (1UL << 61)
81 #define DTE_IW (1UL << 62)
83 #define DEV_TABLE_SEG_MAX 8
84 #define DEV_TABLE_SIZE 0x200000
94 } __attribute__((packed));
96 #define CMD_COMPL_WAIT 0x01
97 # define CMD_COMPL_WAIT_STORE (1 << 0)
98 # define CMD_COMPL_WAIT_INT (1 << 1)
100 #define CMD_INV_DEVTAB_ENTRY 0x02
102 #define CMD_INV_IOMMU_PAGES 0x03
103 # define CMD_INV_IOMMU_PAGES_SIZE (1 << 0)
104 # define CMD_INV_IOMMU_PAGES_PDE (1 << 1)
106 #define EVENT_TYPE_ILL_DEV_TAB_ENTRY 0x01
107 #define EVENT_TYPE_PAGE_TAB_HW_ERR 0x04
108 #define EVENT_TYPE_ILL_CMD_ERR 0x05
109 #define EVENT_TYPE_CMD_HW_ERR 0x06
110 #define EVENT_TYPE_IOTLB_INV_TIMEOUT 0x07
111 #define EVENT_TYPE_INV_PPR_REQ 0x09
113 #define BUF_LEN_EXPONENT_SHIFT 56
115 /* Allocate minimum space possible (4K or 256 entries) */
116 #define BUF_SIZE(name, entry) ((1UL << name##_LEN_EXPONENT) * \
119 #define CMD_BUF_LEN_EXPONENT 8
120 #define EVT_LOG_LEN_EXPONENT 8
122 #define CMD_BUF_SIZE BUF_SIZE(CMD_BUF, union buf_entry)
123 #define EVT_LOG_SIZE BUF_SIZE(EVT_LOG, union buf_entry)
125 #define BITS_PER_SHORT 16
127 #define AMD_IOMMU_MAX_PAGE_TABLE_LEVELS 4
129 static struct amd_iommu {
132 /* Command Buffer, Event Log */
133 unsigned char *cmd_buf_base;
134 unsigned char *evt_log_base;
136 void *devtable_segments[DEV_TABLE_SEG_MAX];
140 } iommu_units[JAILHOUSE_MAX_IOMMU_UNITS];
142 #define for_each_iommu(iommu) for (iommu = iommu_units; \
143 iommu < iommu_units + iommu_units_count; \
146 static unsigned int iommu_units_count;
147 static struct paging amd_iommu_paging[AMD_IOMMU_MAX_PAGE_TABLE_LEVELS];
150 * Interrupt remapping is not emulated on AMD,
151 * thus we have no MMIO to intercept.
153 unsigned int iommu_mmio_count_regions(struct cell *cell)
158 bool iommu_cell_emulates_ir(struct cell *cell)
163 static int amd_iommu_init_pci(struct amd_iommu *entry,
164 struct jailhouse_iommu *iommu)
166 u64 caps_header, hi, lo;
168 /* Check alignment */
169 if (iommu->size & (iommu->size - 1))
170 return trace_error(-EINVAL);
172 /* Check that EFR is supported */
173 caps_header = pci_read_config(iommu->amd_bdf, iommu->amd_base_cap, 4);
174 if (!(caps_header & CAPS_IOMMU_EFR_SUP))
175 return trace_error(-EIO);
177 lo = pci_read_config(iommu->amd_bdf,
178 iommu->amd_base_cap + CAPS_IOMMU_BASE_LOW_REG, 4);
179 hi = pci_read_config(iommu->amd_bdf,
180 iommu->amd_base_cap + CAPS_IOMMU_BASE_HI_REG, 4);
182 if (lo & CAPS_IOMMU_ENABLE &&
183 ((hi << 32) | lo) != (iommu->base | CAPS_IOMMU_ENABLE)) {
184 printk("FATAL: IOMMU %d config is locked in invalid state.\n",
186 return trace_error(-EPERM);
189 /* Should be configured by BIOS, but we want to be sure */
190 pci_write_config(iommu->amd_bdf,
191 iommu->amd_base_cap + CAPS_IOMMU_BASE_HI_REG,
192 (u32)(iommu->base >> 32), 4);
193 pci_write_config(iommu->amd_bdf,
194 iommu->amd_base_cap + CAPS_IOMMU_BASE_LOW_REG,
195 (u32)(iommu->base & 0xffffffff) | CAPS_IOMMU_ENABLE,
198 /* Allocate and map MMIO space */
199 entry->mmio_base = page_alloc(&remap_pool, PAGES(iommu->size));
200 if (!entry->mmio_base)
203 return paging_create(&hv_paging_structs, iommu->base, iommu->size,
204 (unsigned long)entry->mmio_base,
205 PAGE_DEFAULT_FLAGS | PAGE_FLAG_DEVICE,
206 PAGING_NON_COHERENT);
209 static int amd_iommu_init_features(struct amd_iommu *entry,
210 struct jailhouse_iommu *iommu)
212 u64 efr = mmio_read64(entry->mmio_base + AMD_EXT_FEATURES_REG);
213 unsigned char smi_filter_regcnt;
214 u64 val, ctrl_reg = 0, smi_freg = 0;
219 * Require SMI Filter support. Enable and lock filter but
220 * mark all entries as invalid to disable SMI delivery.
222 if (!(efr & AMD_EXT_FEAT_SMI_FSUP_MASK))
223 return trace_error(-EINVAL);
225 /* Figure out if hardware events are supported. */
226 if (iommu->amd_features)
227 entry->he_supported =
228 iommu->amd_features & ACPI_REPORTING_HE_SUP;
230 entry->he_supported = efr & AMD_EXT_FEAT_HE_SUP;
232 smi_filter_regcnt = (1 << (efr & AMD_EXT_FEAT_SMI_FRC_MASK) >>
233 AMD_EXT_FEAT_SMI_FRC_SHIFT);
234 for (n = 0; n < smi_filter_regcnt; n++) {
235 reg_base = entry->mmio_base + AMD_SMI_FILTER0_REG + (n << 3);
236 smi_freg = mmio_read64(reg_base);
238 if (!(smi_freg & AMD_SMI_FILTER_LOCKED)) {
240 * Program unlocked register the way we need:
241 * invalid and locked.
243 mmio_write64(reg_base, AMD_SMI_FILTER_LOCKED);
244 } else if (smi_freg & AMD_SMI_FILTER_VALID) {
246 * The register is locked and programed
247 * the way we don't want - error.
249 printk("ERROR: SMI Filter register %d is locked "
250 "and can't be reprogrammed.\n"
251 "Reboot and check no other component uses the "
252 "IOMMU %d.\n", n, entry->idx);
253 return trace_error(-EPERM);
256 * The register is locked, but programmed
257 * the way we need - OK to go.
261 ctrl_reg |= (AMD_CONTROL_SMIF_EN | AMD_CONTROL_SMIFLOG_EN);
263 /* Enable maximum Device Table segmentation possible */
264 entry->dev_tbl_seg_sup = (efr & AMD_EXT_FEAT_SEG_SUP_MASK) >>
265 AMD_EXT_FEAT_SEG_SUP_SHIFT;
266 if (entry->dev_tbl_seg_sup) {
267 val = (u64)entry->dev_tbl_seg_sup << AMD_CONTROL_SEG_EN_SHIFT;
268 ctrl_reg |= val & AMD_CONTROL_SEG_EN_MASK;
271 mmio_write64(entry->mmio_base + AMD_CONTROL_REG, ctrl_reg);
276 static int amd_iommu_init_buffers(struct amd_iommu *entry,
277 struct jailhouse_iommu *iommu)
279 /* Allocate and configure command buffer */
280 entry->cmd_buf_base = page_alloc(&mem_pool, PAGES(CMD_BUF_SIZE));
281 if (!entry->cmd_buf_base)
284 mmio_write64(entry->mmio_base + AMD_CMD_BUF_BASE_REG,
285 paging_hvirt2phys(entry->cmd_buf_base) |
286 ((u64)CMD_BUF_LEN_EXPONENT << BUF_LEN_EXPONENT_SHIFT));
288 entry->cmd_tail_ptr = 0;
290 /* Allocate and configure event log */
291 entry->evt_log_base = page_alloc(&mem_pool, PAGES(EVT_LOG_SIZE));
292 if (!entry->evt_log_base)
295 mmio_write64(entry->mmio_base + AMD_EVT_LOG_BASE_REG,
296 paging_hvirt2phys(entry->evt_log_base) |
297 ((u64)EVT_LOG_LEN_EXPONENT << BUF_LEN_EXPONENT_SHIFT));
302 static void amd_iommu_enable_command_processing(struct amd_iommu *iommu)
306 ctrl_reg = mmio_read64(iommu->mmio_base + AMD_CONTROL_REG);
307 ctrl_reg |= AMD_CONTROL_IOMMU_EN | AMD_CONTROL_CMD_BUF_EN |
308 AMD_CONTROL_EVT_LOG_EN | AMD_CONTROL_EVT_INT_EN;
309 mmio_write64(iommu->mmio_base + AMD_CONTROL_REG, ctrl_reg);
312 static void amd_iommu_set_next_pt_l4(pt_entry_t pte, unsigned long next_pt)
314 *pte = (next_pt & BIT_MASK(51, 12)) | AMD_IOMMU_PTE_PG_MODE(3) |
315 AMD_IOMMU_PTE_IR | AMD_IOMMU_PTE_IW | AMD_IOMMU_PTE_P;
318 static void amd_iommu_set_next_pt_l3(pt_entry_t pte, unsigned long next_pt)
320 *pte = (next_pt & BIT_MASK(51, 12)) | AMD_IOMMU_PTE_PG_MODE(2) |
321 AMD_IOMMU_PTE_IR | AMD_IOMMU_PTE_IW | AMD_IOMMU_PTE_P;
324 static void amd_iommu_set_next_pt_l2(pt_entry_t pte, unsigned long next_pt)
326 *pte = (next_pt & BIT_MASK(51, 12)) | AMD_IOMMU_PTE_PG_MODE(1) |
327 AMD_IOMMU_PTE_IR | AMD_IOMMU_PTE_IW | AMD_IOMMU_PTE_P;
330 static unsigned long amd_iommu_get_phys_l3(pt_entry_t pte, unsigned long virt)
332 if (*pte & AMD_IOMMU_PTE_PG_MODE_MASK)
333 return INVALID_PHYS_ADDR;
334 return (*pte & BIT_MASK(51, 30)) | (virt & BIT_MASK(29, 0));
337 static unsigned long amd_iommu_get_phys_l2(pt_entry_t pte, unsigned long virt)
339 if (*pte & AMD_IOMMU_PTE_PG_MODE_MASK)
340 return INVALID_PHYS_ADDR;
341 return (*pte & BIT_MASK(51, 21)) | (virt & BIT_MASK(20, 0));
346 struct jailhouse_iommu *iommu;
347 struct amd_iommu *entry;
351 iommu = &system_config->platform_info.x86.iommu_units[0];
352 for (n = 0; iommu->base && n < iommu_count_units(); iommu++, n++) {
353 entry = &iommu_units[iommu_units_count];
357 /* Protect against accidental VT-d configs. */
359 return trace_error(-EINVAL);
361 printk("AMD IOMMU @0x%lx/0x%x\n", iommu->base, iommu->size);
363 /* Initialize PCI registers and MMIO space */
364 err = amd_iommu_init_pci(entry, iommu);
368 /* Setup IOMMU features */
369 err = amd_iommu_init_features(entry, iommu);
373 /* Initialize command buffer and event log */
374 err = amd_iommu_init_buffers(entry, iommu);
378 /* Enable the IOMMU */
379 amd_iommu_enable_command_processing(entry);
385 * Derive amd_iommu_paging from very similar x86_64_paging,
386 * replicating all 4 levels.
388 memcpy(amd_iommu_paging, x86_64_paging, sizeof(amd_iommu_paging));
389 amd_iommu_paging[0].set_next_pt = amd_iommu_set_next_pt_l4;
390 amd_iommu_paging[1].set_next_pt = amd_iommu_set_next_pt_l3;
391 amd_iommu_paging[2].set_next_pt = amd_iommu_set_next_pt_l2;
392 amd_iommu_paging[1].get_phys = amd_iommu_get_phys_l3;
393 amd_iommu_paging[2].get_phys = amd_iommu_get_phys_l2;
395 return iommu_cell_init(&root_cell);
398 int iommu_cell_init(struct cell *cell)
401 if (iommu_units_count == 0)
404 if (cell->id > 0xffff)
405 return trace_error(-ERANGE);
407 cell->arch.amd_iommu.pg_structs.root_paging = amd_iommu_paging;
408 cell->arch.amd_iommu.pg_structs.root_table = page_alloc(&mem_pool, 1);
409 if (!cell->arch.amd_iommu.pg_structs.root_table)
410 return trace_error(-ENOMEM);
415 int iommu_map_memory_region(struct cell *cell,
416 const struct jailhouse_memory *mem)
418 /* TODO: Implement */
421 int iommu_unmap_memory_region(struct cell *cell,
422 const struct jailhouse_memory *mem)
424 /* TODO: Implement */
428 int iommu_add_pci_device(struct cell *cell, struct pci_device *device)
430 /* TODO: Implement */
434 void iommu_remove_pci_device(struct pci_device *device)
436 /* TODO: Implement */
439 void iommu_cell_exit(struct cell *cell)
441 /* TODO: Again, this a copy of vtd.c:iommu_cell_exit */
443 if (iommu_units_count == 0)
446 page_free(&mem_pool, cell->arch.amd_iommu.pg_structs.root_table, 1);
449 void iommu_config_commit(struct cell *cell_added_removed)
451 /* TODO: Implement */
454 struct apic_irq_message iommu_get_remapped_root_int(unsigned int iommu,
457 unsigned int remap_index)
459 struct apic_irq_message dummy = { .valid = 0 };
461 /* TODO: Implement */
465 int iommu_map_interrupt(struct cell *cell, u16 device_id, unsigned int vector,
466 struct apic_irq_message irq_msg)
468 /* TODO: Implement */
472 void iommu_shutdown(void)
474 struct amd_iommu *iommu;
477 for_each_iommu(iommu) {
478 /* Disable the IOMMU */
479 ctrl_reg = mmio_read64(iommu->mmio_base + AMD_CONTROL_REG);
480 ctrl_reg &= ~(AMD_CONTROL_IOMMU_EN | AMD_CONTROL_CMD_BUF_EN |
481 AMD_CONTROL_EVT_LOG_EN | AMD_CONTROL_EVT_INT_EN);
482 mmio_write64(iommu->mmio_base + AMD_CONTROL_REG, ctrl_reg);
486 void iommu_check_pending_faults(void)
488 /* TODO: Implement */