2 * Copyright © 2006-2014 Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * Authors: David Woodhouse <dwmw2@infradead.org>,
14 * Ashok Raj <ashok.raj@intel.com>,
15 * Shaohua Li <shaohua.li@intel.com>,
16 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17 * Fenghua Yu <fenghua.yu@intel.com>
18 * Joerg Roedel <jroedel@suse.de>
21 #define pr_fmt(fmt) "DMAR: " fmt
23 #include <linux/init.h>
24 #include <linux/bitmap.h>
25 #include <linux/debugfs.h>
26 #include <linux/export.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/memory.h>
36 #include <linux/cpu.h>
37 #include <linux/timer.h>
39 #include <linux/iova.h>
40 #include <linux/iommu.h>
41 #include <linux/intel-iommu.h>
42 #include <linux/syscore_ops.h>
43 #include <linux/tboot.h>
44 #include <linux/dmi.h>
45 #include <linux/pci-ats.h>
46 #include <linux/memblock.h>
47 #include <linux/dma-contiguous.h>
48 #include <linux/crash_dump.h>
49 #include <asm/irq_remapping.h>
50 #include <asm/cacheflush.h>
51 #include <asm/iommu.h>
53 #include "irq_remapping.h"
55 #define ROOT_SIZE VTD_PAGE_SIZE
56 #define CONTEXT_SIZE VTD_PAGE_SIZE
58 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
59 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
60 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
61 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
63 #define IOAPIC_RANGE_START (0xfee00000)
64 #define IOAPIC_RANGE_END (0xfeefffff)
65 #define IOVA_START_ADDR (0x1000)
67 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
69 #define MAX_AGAW_WIDTH 64
70 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
72 #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
73 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
75 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
76 to match. That way, we can use 'unsigned long' for PFNs with impunity. */
77 #define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
78 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
79 #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
81 /* IO virtual address start page frame number */
82 #define IOVA_START_PFN (1)
84 #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
85 #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
86 #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
88 /* page table handling */
89 #define LEVEL_STRIDE (9)
90 #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
93 * This bitmap is used to advertise the page sizes our hardware support
94 * to the IOMMU core, which will then use this information to split
95 * physically contiguous memory regions it is mapping into page sizes
98 * Traditionally the IOMMU core just handed us the mappings directly,
99 * after making sure the size is an order of a 4KiB page and that the
100 * mapping has natural alignment.
102 * To retain this behavior, we currently advertise that we support
103 * all page sizes that are an order of 4KiB.
105 * If at some point we'd like to utilize the IOMMU core's new behavior,
106 * we could change this to advertise the real page sizes we support.
108 #define INTEL_IOMMU_PGSIZES (~0xFFFUL)
110 static inline int agaw_to_level(int agaw)
115 static inline int agaw_to_width(int agaw)
117 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
120 static inline int width_to_agaw(int width)
122 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
125 static inline unsigned int level_to_offset_bits(int level)
127 return (level - 1) * LEVEL_STRIDE;
130 static inline int pfn_level_offset(unsigned long pfn, int level)
132 return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
135 static inline unsigned long level_mask(int level)
137 return -1UL << level_to_offset_bits(level);
140 static inline unsigned long level_size(int level)
142 return 1UL << level_to_offset_bits(level);
145 static inline unsigned long align_to_level(unsigned long pfn, int level)
147 return (pfn + level_size(level) - 1) & level_mask(level);
150 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
152 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
155 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
156 are never going to work. */
157 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
159 return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
162 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
164 return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
166 static inline unsigned long page_to_dma_pfn(struct page *pg)
168 return mm_to_dma_pfn(page_to_pfn(pg));
170 static inline unsigned long virt_to_dma_pfn(void *p)
172 return page_to_dma_pfn(virt_to_page(p));
175 /* global iommu list, set NULL for ignored DMAR units */
176 static struct intel_iommu **g_iommus;
178 static void __init check_tylersburg_isoch(void);
179 static int rwbf_quirk;
182 * set to 1 to panic kernel if can't successfully enable VT-d
183 * (used when kernel is launched w/ TXT)
185 static int force_on = 0;
190 * 12-63: Context Ptr (12 - (haw-1))
197 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
200 * Take a root_entry and return the Lower Context Table Pointer (LCTP)
203 static phys_addr_t root_entry_lctp(struct root_entry *re)
208 return re->lo & VTD_PAGE_MASK;
212 * Take a root_entry and return the Upper Context Table Pointer (UCTP)
215 static phys_addr_t root_entry_uctp(struct root_entry *re)
220 return re->hi & VTD_PAGE_MASK;
225 * 1: fault processing disable
226 * 2-3: translation type
227 * 12-63: address space root
233 struct context_entry {
238 static inline void context_clear_pasid_enable(struct context_entry *context)
240 context->lo &= ~(1ULL << 11);
243 static inline bool context_pasid_enabled(struct context_entry *context)
245 return !!(context->lo & (1ULL << 11));
248 static inline void context_set_copied(struct context_entry *context)
250 context->hi |= (1ull << 3);
253 static inline bool context_copied(struct context_entry *context)
255 return !!(context->hi & (1ULL << 3));
258 static inline bool __context_present(struct context_entry *context)
260 return (context->lo & 1);
263 static inline bool context_present(struct context_entry *context)
265 return context_pasid_enabled(context) ?
266 __context_present(context) :
267 __context_present(context) && !context_copied(context);
270 static inline void context_set_present(struct context_entry *context)
275 static inline void context_set_fault_enable(struct context_entry *context)
277 context->lo &= (((u64)-1) << 2) | 1;
280 static inline void context_set_translation_type(struct context_entry *context,
283 context->lo &= (((u64)-1) << 4) | 3;
284 context->lo |= (value & 3) << 2;
287 static inline void context_set_address_root(struct context_entry *context,
290 context->lo &= ~VTD_PAGE_MASK;
291 context->lo |= value & VTD_PAGE_MASK;
294 static inline void context_set_address_width(struct context_entry *context,
297 context->hi |= value & 7;
300 static inline void context_set_domain_id(struct context_entry *context,
303 context->hi |= (value & ((1 << 16) - 1)) << 8;
306 static inline int context_domain_id(struct context_entry *c)
308 return((c->hi >> 8) & 0xffff);
311 static inline void context_clear_entry(struct context_entry *context)
324 * 12-63: Host physcial address
330 static inline void dma_clear_pte(struct dma_pte *pte)
335 static inline u64 dma_pte_addr(struct dma_pte *pte)
338 return pte->val & VTD_PAGE_MASK;
340 /* Must have a full atomic 64-bit read */
341 return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
345 static inline bool dma_pte_present(struct dma_pte *pte)
347 return (pte->val & 3) != 0;
350 static inline bool dma_pte_superpage(struct dma_pte *pte)
352 return (pte->val & DMA_PTE_LARGE_PAGE);
355 static inline int first_pte_in_page(struct dma_pte *pte)
357 return !((unsigned long)pte & ~VTD_PAGE_MASK);
361 * This domain is a statically identity mapping domain.
362 * 1. This domain creats a static 1:1 mapping to all usable memory.
363 * 2. It maps to each iommu if successful.
364 * 3. Each iommu mapps to this domain if successful.
366 static struct dmar_domain *si_domain;
367 static int hw_pass_through = 1;
370 * Domain represents a virtual machine, more than one devices
371 * across iommus may be owned in one domain, e.g. kvm guest.
373 #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0)
375 /* si_domain contains mulitple devices */
376 #define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
378 #define for_each_domain_iommu(idx, domain) \
379 for (idx = 0; idx < g_num_of_iommus; idx++) \
380 if (domain->iommu_refcnt[idx])
383 int nid; /* node id */
385 unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED];
386 /* Refcount of devices per iommu */
389 u16 iommu_did[DMAR_UNITS_SUPPORTED];
390 /* Domain ids per IOMMU. Use u16 since
391 * domain ids are 16 bit wide according
392 * to VT-d spec, section 9.3 */
394 bool has_iotlb_device;
395 struct list_head devices; /* all devices' list */
396 struct iova_domain iovad; /* iova's that belong to this domain */
398 struct dma_pte *pgd; /* virtual address */
399 int gaw; /* max guest address width */
401 /* adjusted guest address width, 0 is level 2 30-bit */
404 int flags; /* flags to find out type of domain */
406 int iommu_coherency;/* indicate coherency of iommu access */
407 int iommu_snooping; /* indicate snooping control feature*/
408 int iommu_count; /* reference count of iommu */
409 int iommu_superpage;/* Level of superpages supported:
410 0 == 4KiB (no superpages), 1 == 2MiB,
411 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
412 u64 max_addr; /* maximum mapped address */
414 struct iommu_domain domain; /* generic domain data structure for
418 /* PCI domain-device relationship */
419 struct device_domain_info {
420 struct list_head link; /* link to domain siblings */
421 struct list_head global; /* link to global list */
422 u8 bus; /* PCI bus number */
423 u8 devfn; /* PCI devfn number */
424 u8 pasid_supported:3;
431 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
432 struct intel_iommu *iommu; /* IOMMU used by this device */
433 struct dmar_domain *domain; /* pointer to domain */
436 struct dmar_rmrr_unit {
437 struct list_head list; /* list of rmrr units */
438 struct acpi_dmar_header *hdr; /* ACPI header */
439 u64 base_address; /* reserved base address*/
440 u64 end_address; /* reserved end address */
441 struct dmar_dev_scope *devices; /* target devices */
442 int devices_cnt; /* target device count */
445 struct dmar_atsr_unit {
446 struct list_head list; /* list of ATSR units */
447 struct acpi_dmar_header *hdr; /* ACPI header */
448 struct dmar_dev_scope *devices; /* target devices */
449 int devices_cnt; /* target device count */
450 u8 include_all:1; /* include all ports */
453 static LIST_HEAD(dmar_atsr_units);
454 static LIST_HEAD(dmar_rmrr_units);
456 #define for_each_rmrr_units(rmrr) \
457 list_for_each_entry(rmrr, &dmar_rmrr_units, list)
459 static void flush_unmaps_timeout(unsigned long data);
461 struct deferred_flush_entry {
462 unsigned long iova_pfn;
463 unsigned long nrpages;
464 struct dmar_domain *domain;
465 struct page *freelist;
468 #define HIGH_WATER_MARK 250
469 struct deferred_flush_table {
471 struct deferred_flush_entry entries[HIGH_WATER_MARK];
474 struct deferred_flush_data {
477 struct timer_list timer;
479 struct deferred_flush_table *tables;
482 static DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
484 /* bitmap for indexing intel_iommus */
485 static int g_num_of_iommus;
487 static void domain_exit(struct dmar_domain *domain);
488 static void domain_remove_dev_info(struct dmar_domain *domain);
489 static void dmar_remove_one_dev_info(struct dmar_domain *domain,
491 static void __dmar_remove_one_dev_info(struct device_domain_info *info);
492 static void domain_context_clear(struct intel_iommu *iommu,
494 static int domain_detach_iommu(struct dmar_domain *domain,
495 struct intel_iommu *iommu);
497 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
498 int dmar_disabled = 0;
500 int dmar_disabled = 1;
501 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
503 int intel_iommu_enabled = 0;
504 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
506 static int dmar_map_gfx = 1;
507 static int dmar_forcedac;
508 static int intel_iommu_strict;
509 static int intel_iommu_superpage = 1;
510 static int intel_iommu_ecs = 1;
511 static int intel_iommu_pasid28;
512 static int iommu_identity_mapping;
514 #define IDENTMAP_ALL 1
515 #define IDENTMAP_GFX 2
516 #define IDENTMAP_AZALIA 4
518 /* Broadwell and Skylake have broken ECS support — normal so-called "second
519 * level" translation of DMA requests-without-PASID doesn't actually happen
520 * unless you also set the NESTE bit in an extended context-entry. Which of
521 * course means that SVM doesn't work because it's trying to do nested
522 * translation of the physical addresses it finds in the process page tables,
523 * through the IOVA->phys mapping found in the "second level" page tables.
525 * The VT-d specification was retroactively changed to change the definition
526 * of the capability bits and pretend that Broadwell/Skylake never happened...
527 * but unfortunately the wrong bit was changed. It's ECS which is broken, but
528 * for some reason it was the PASID capability bit which was redefined (from
529 * bit 28 on BDW/SKL to bit 40 in future).
531 * So our test for ECS needs to eschew those implementations which set the old
532 * PASID capabiity bit 28, since those are the ones on which ECS is broken.
533 * Unless we are working around the 'pasid28' limitations, that is, by putting
534 * the device into passthrough mode for normal DMA and thus masking the bug.
536 #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
537 (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
538 /* PASID support is thus enabled if ECS is enabled and *either* of the old
539 * or new capability bits are set. */
540 #define pasid_enabled(iommu) (ecs_enabled(iommu) && \
541 (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
543 int intel_iommu_gfx_mapped;
544 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
546 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
547 static DEFINE_SPINLOCK(device_domain_lock);
548 static LIST_HEAD(device_domain_list);
550 static const struct iommu_ops intel_iommu_ops;
552 static bool translation_pre_enabled(struct intel_iommu *iommu)
554 return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
557 static void clear_translation_pre_enabled(struct intel_iommu *iommu)
559 iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
562 static void init_translation_status(struct intel_iommu *iommu)
566 gsts = readl(iommu->reg + DMAR_GSTS_REG);
567 if (gsts & DMA_GSTS_TES)
568 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
571 /* Convert generic 'struct iommu_domain to private struct dmar_domain */
572 static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
574 return container_of(dom, struct dmar_domain, domain);
577 static int __init intel_iommu_setup(char *str)
582 if (!strncmp(str, "on", 2)) {
584 pr_info("IOMMU enabled\n");
585 } else if (!strncmp(str, "off", 3)) {
587 pr_info("IOMMU disabled\n");
588 } else if (!strncmp(str, "igfx_off", 8)) {
590 pr_info("Disable GFX device mapping\n");
591 } else if (!strncmp(str, "forcedac", 8)) {
592 pr_info("Forcing DAC for PCI devices\n");
594 } else if (!strncmp(str, "strict", 6)) {
595 pr_info("Disable batched IOTLB flush\n");
596 intel_iommu_strict = 1;
597 } else if (!strncmp(str, "sp_off", 6)) {
598 pr_info("Disable supported super page\n");
599 intel_iommu_superpage = 0;
600 } else if (!strncmp(str, "ecs_off", 7)) {
602 "Intel-IOMMU: disable extended context table support\n");
604 } else if (!strncmp(str, "pasid28", 7)) {
606 "Intel-IOMMU: enable pre-production PASID support\n");
607 intel_iommu_pasid28 = 1;
608 iommu_identity_mapping |= IDENTMAP_GFX;
611 str += strcspn(str, ",");
617 __setup("intel_iommu=", intel_iommu_setup);
619 static struct kmem_cache *iommu_domain_cache;
620 static struct kmem_cache *iommu_devinfo_cache;
622 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
624 struct dmar_domain **domains;
627 domains = iommu->domains[idx];
631 return domains[did & 0xff];
634 static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
635 struct dmar_domain *domain)
637 struct dmar_domain **domains;
640 if (!iommu->domains[idx]) {
641 size_t size = 256 * sizeof(struct dmar_domain *);
642 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
645 domains = iommu->domains[idx];
646 if (WARN_ON(!domains))
649 domains[did & 0xff] = domain;
652 static inline void *alloc_pgtable_page(int node)
657 page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
659 vaddr = page_address(page);
663 static inline void free_pgtable_page(void *vaddr)
665 free_page((unsigned long)vaddr);
668 static inline void *alloc_domain_mem(void)
670 return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
673 static void free_domain_mem(void *vaddr)
675 kmem_cache_free(iommu_domain_cache, vaddr);
678 static inline void * alloc_devinfo_mem(void)
680 return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
683 static inline void free_devinfo_mem(void *vaddr)
685 kmem_cache_free(iommu_devinfo_cache, vaddr);
688 static inline int domain_type_is_vm(struct dmar_domain *domain)
690 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
693 static inline int domain_type_is_si(struct dmar_domain *domain)
695 return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
698 static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
700 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
701 DOMAIN_FLAG_STATIC_IDENTITY);
704 static inline int domain_pfn_supported(struct dmar_domain *domain,
707 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
709 return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
712 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
717 sagaw = cap_sagaw(iommu->cap);
718 for (agaw = width_to_agaw(max_gaw);
720 if (test_bit(agaw, &sagaw))
728 * Calculate max SAGAW for each iommu.
730 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
732 return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
736 * calculate agaw for each iommu.
737 * "SAGAW" may be different across iommus, use a default agaw, and
738 * get a supported less agaw for iommus that don't support the default agaw.
740 int iommu_calculate_agaw(struct intel_iommu *iommu)
742 return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
745 /* This functionin only returns single iommu in a domain */
746 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
750 /* si_domain and vm domain should not get here. */
751 BUG_ON(domain_type_is_vm_or_si(domain));
752 for_each_domain_iommu(iommu_id, domain)
755 if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
758 return g_iommus[iommu_id];
761 static void domain_update_iommu_coherency(struct dmar_domain *domain)
763 struct dmar_drhd_unit *drhd;
764 struct intel_iommu *iommu;
768 domain->iommu_coherency = 1;
770 for_each_domain_iommu(i, domain) {
772 if (!ecap_coherent(g_iommus[i]->ecap)) {
773 domain->iommu_coherency = 0;
780 /* No hardware attached; use lowest common denominator */
782 for_each_active_iommu(iommu, drhd) {
783 if (!ecap_coherent(iommu->ecap)) {
784 domain->iommu_coherency = 0;
791 static int domain_update_iommu_snooping(struct intel_iommu *skip)
793 struct dmar_drhd_unit *drhd;
794 struct intel_iommu *iommu;
798 for_each_active_iommu(iommu, drhd) {
800 if (!ecap_sc_support(iommu->ecap)) {
811 static int domain_update_iommu_superpage(struct intel_iommu *skip)
813 struct dmar_drhd_unit *drhd;
814 struct intel_iommu *iommu;
817 if (!intel_iommu_superpage) {
821 /* set iommu_superpage to the smallest common denominator */
823 for_each_active_iommu(iommu, drhd) {
825 mask &= cap_super_page_val(iommu->cap);
835 /* Some capabilities may be different across iommus */
836 static void domain_update_iommu_cap(struct dmar_domain *domain)
838 domain_update_iommu_coherency(domain);
839 domain->iommu_snooping = domain_update_iommu_snooping(NULL);
840 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
843 static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
844 u8 bus, u8 devfn, int alloc)
846 struct root_entry *root = &iommu->root_entry[bus];
847 struct context_entry *context;
851 if (ecs_enabled(iommu)) {
859 context = phys_to_virt(*entry & VTD_PAGE_MASK);
861 unsigned long phy_addr;
865 context = alloc_pgtable_page(iommu->node);
869 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
870 phy_addr = virt_to_phys((void *)context);
871 *entry = phy_addr | 1;
872 __iommu_flush_cache(iommu, entry, sizeof(*entry));
874 return &context[devfn];
877 static int iommu_dummy(struct device *dev)
879 return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
882 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
884 struct dmar_drhd_unit *drhd = NULL;
885 struct intel_iommu *iommu;
887 struct pci_dev *ptmp, *pdev = NULL;
891 if (iommu_dummy(dev))
894 if (dev_is_pci(dev)) {
895 struct pci_dev *pf_pdev;
897 pdev = to_pci_dev(dev);
898 /* VFs aren't listed in scope tables; we need to look up
899 * the PF instead to find the IOMMU. */
900 pf_pdev = pci_physfn(pdev);
902 segment = pci_domain_nr(pdev->bus);
903 } else if (has_acpi_companion(dev))
904 dev = &ACPI_COMPANION(dev)->dev;
907 for_each_active_iommu(iommu, drhd) {
908 if (pdev && segment != drhd->segment)
911 for_each_active_dev_scope(drhd->devices,
912 drhd->devices_cnt, i, tmp) {
914 /* For a VF use its original BDF# not that of the PF
915 * which we used for the IOMMU lookup. Strictly speaking
916 * we could do this for all PCI devices; we only need to
917 * get the BDF# from the scope table for ACPI matches. */
921 *bus = drhd->devices[i].bus;
922 *devfn = drhd->devices[i].devfn;
926 if (!pdev || !dev_is_pci(tmp))
929 ptmp = to_pci_dev(tmp);
930 if (ptmp->subordinate &&
931 ptmp->subordinate->number <= pdev->bus->number &&
932 ptmp->subordinate->busn_res.end >= pdev->bus->number)
936 if (pdev && drhd->include_all) {
938 *bus = pdev->bus->number;
939 *devfn = pdev->devfn;
950 static void domain_flush_cache(struct dmar_domain *domain,
951 void *addr, int size)
953 if (!domain->iommu_coherency)
954 clflush_cache_range(addr, size);
957 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
959 struct context_entry *context;
963 spin_lock_irqsave(&iommu->lock, flags);
964 context = iommu_context_addr(iommu, bus, devfn, 0);
966 ret = context_present(context);
967 spin_unlock_irqrestore(&iommu->lock, flags);
971 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
973 struct context_entry *context;
976 spin_lock_irqsave(&iommu->lock, flags);
977 context = iommu_context_addr(iommu, bus, devfn, 0);
979 context_clear_entry(context);
980 __iommu_flush_cache(iommu, context, sizeof(*context));
982 spin_unlock_irqrestore(&iommu->lock, flags);
985 static void free_context_table(struct intel_iommu *iommu)
989 struct context_entry *context;
991 spin_lock_irqsave(&iommu->lock, flags);
992 if (!iommu->root_entry) {
995 for (i = 0; i < ROOT_ENTRY_NR; i++) {
996 context = iommu_context_addr(iommu, i, 0, 0);
998 free_pgtable_page(context);
1000 if (!ecs_enabled(iommu))
1003 context = iommu_context_addr(iommu, i, 0x80, 0);
1005 free_pgtable_page(context);
1008 free_pgtable_page(iommu->root_entry);
1009 iommu->root_entry = NULL;
1011 spin_unlock_irqrestore(&iommu->lock, flags);
1014 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
1015 unsigned long pfn, int *target_level)
1017 struct dma_pte *parent, *pte = NULL;
1018 int level = agaw_to_level(domain->agaw);
1021 BUG_ON(!domain->pgd);
1023 if (!domain_pfn_supported(domain, pfn))
1024 /* Address beyond IOMMU's addressing capabilities. */
1027 parent = domain->pgd;
1032 offset = pfn_level_offset(pfn, level);
1033 pte = &parent[offset];
1034 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
1036 if (level == *target_level)
1039 if (!dma_pte_present(pte)) {
1042 tmp_page = alloc_pgtable_page(domain->nid);
1047 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
1048 pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
1049 if (cmpxchg64(&pte->val, 0ULL, pteval))
1050 /* Someone else set it while we were thinking; use theirs. */
1051 free_pgtable_page(tmp_page);
1053 domain_flush_cache(domain, pte, sizeof(*pte));
1058 parent = phys_to_virt(dma_pte_addr(pte));
1063 *target_level = level;
1069 /* return address's pte at specific level */
1070 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
1072 int level, int *large_page)
1074 struct dma_pte *parent, *pte = NULL;
1075 int total = agaw_to_level(domain->agaw);
1078 parent = domain->pgd;
1079 while (level <= total) {
1080 offset = pfn_level_offset(pfn, total);
1081 pte = &parent[offset];
1085 if (!dma_pte_present(pte)) {
1086 *large_page = total;
1090 if (dma_pte_superpage(pte)) {
1091 *large_page = total;
1095 parent = phys_to_virt(dma_pte_addr(pte));
1101 /* clear last level pte, a tlb flush should be followed */
1102 static void dma_pte_clear_range(struct dmar_domain *domain,
1103 unsigned long start_pfn,
1104 unsigned long last_pfn)
1106 unsigned int large_page = 1;
1107 struct dma_pte *first_pte, *pte;
1109 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1110 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1111 BUG_ON(start_pfn > last_pfn);
1113 /* we don't need lock here; nobody else touches the iova range */
1116 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
1118 start_pfn = align_to_level(start_pfn + 1, large_page + 1);
1123 start_pfn += lvl_to_nr_pages(large_page);
1125 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1127 domain_flush_cache(domain, first_pte,
1128 (void *)pte - (void *)first_pte);
1130 } while (start_pfn && start_pfn <= last_pfn);
1133 static void dma_pte_free_level(struct dmar_domain *domain, int level,
1134 struct dma_pte *pte, unsigned long pfn,
1135 unsigned long start_pfn, unsigned long last_pfn)
1137 pfn = max(start_pfn, pfn);
1138 pte = &pte[pfn_level_offset(pfn, level)];
1141 unsigned long level_pfn;
1142 struct dma_pte *level_pte;
1144 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1147 level_pfn = pfn & level_mask(level - 1);
1148 level_pte = phys_to_virt(dma_pte_addr(pte));
1151 dma_pte_free_level(domain, level - 1, level_pte,
1152 level_pfn, start_pfn, last_pfn);
1154 /* If range covers entire pagetable, free it */
1155 if (!(start_pfn > level_pfn ||
1156 last_pfn < level_pfn + level_size(level) - 1)) {
1158 domain_flush_cache(domain, pte, sizeof(*pte));
1159 free_pgtable_page(level_pte);
1162 pfn += level_size(level);
1163 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1166 /* clear last level (leaf) ptes and free page table pages. */
1167 static void dma_pte_free_pagetable(struct dmar_domain *domain,
1168 unsigned long start_pfn,
1169 unsigned long last_pfn)
1171 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1172 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1173 BUG_ON(start_pfn > last_pfn);
1175 dma_pte_clear_range(domain, start_pfn, last_pfn);
1177 /* We don't need lock here; nobody else touches the iova range */
1178 dma_pte_free_level(domain, agaw_to_level(domain->agaw),
1179 domain->pgd, 0, start_pfn, last_pfn);
1182 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1183 free_pgtable_page(domain->pgd);
1188 /* When a page at a given level is being unlinked from its parent, we don't
1189 need to *modify* it at all. All we need to do is make a list of all the
1190 pages which can be freed just as soon as we've flushed the IOTLB and we
1191 know the hardware page-walk will no longer touch them.
1192 The 'pte' argument is the *parent* PTE, pointing to the page that is to
1194 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1195 int level, struct dma_pte *pte,
1196 struct page *freelist)
1200 pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1201 pg->freelist = freelist;
1207 pte = page_address(pg);
1209 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1210 freelist = dma_pte_list_pagetables(domain, level - 1,
1213 } while (!first_pte_in_page(pte));
1218 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1219 struct dma_pte *pte, unsigned long pfn,
1220 unsigned long start_pfn,
1221 unsigned long last_pfn,
1222 struct page *freelist)
1224 struct dma_pte *first_pte = NULL, *last_pte = NULL;
1226 pfn = max(start_pfn, pfn);
1227 pte = &pte[pfn_level_offset(pfn, level)];
1230 unsigned long level_pfn;
1232 if (!dma_pte_present(pte))
1235 level_pfn = pfn & level_mask(level);
1237 /* If range covers entire pagetable, free it */
1238 if (start_pfn <= level_pfn &&
1239 last_pfn >= level_pfn + level_size(level) - 1) {
1240 /* These suborbinate page tables are going away entirely. Don't
1241 bother to clear them; we're just going to *free* them. */
1242 if (level > 1 && !dma_pte_superpage(pte))
1243 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1249 } else if (level > 1) {
1250 /* Recurse down into a level that isn't *entirely* obsolete */
1251 freelist = dma_pte_clear_level(domain, level - 1,
1252 phys_to_virt(dma_pte_addr(pte)),
1253 level_pfn, start_pfn, last_pfn,
1257 pfn += level_size(level);
1258 } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1261 domain_flush_cache(domain, first_pte,
1262 (void *)++last_pte - (void *)first_pte);
1267 /* We can't just free the pages because the IOMMU may still be walking
1268 the page tables, and may have cached the intermediate levels. The
1269 pages can only be freed after the IOTLB flush has been done. */
1270 static struct page *domain_unmap(struct dmar_domain *domain,
1271 unsigned long start_pfn,
1272 unsigned long last_pfn)
1274 struct page *freelist = NULL;
1276 BUG_ON(!domain_pfn_supported(domain, start_pfn));
1277 BUG_ON(!domain_pfn_supported(domain, last_pfn));
1278 BUG_ON(start_pfn > last_pfn);
1280 /* we don't need lock here; nobody else touches the iova range */
1281 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1282 domain->pgd, 0, start_pfn, last_pfn, NULL);
1285 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1286 struct page *pgd_page = virt_to_page(domain->pgd);
1287 pgd_page->freelist = freelist;
1288 freelist = pgd_page;
1296 static void dma_free_pagelist(struct page *freelist)
1300 while ((pg = freelist)) {
1301 freelist = pg->freelist;
1302 free_pgtable_page(page_address(pg));
1306 /* iommu handling */
1307 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1309 struct root_entry *root;
1310 unsigned long flags;
1312 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1314 pr_err("Allocating root entry for %s failed\n",
1319 __iommu_flush_cache(iommu, root, ROOT_SIZE);
1321 spin_lock_irqsave(&iommu->lock, flags);
1322 iommu->root_entry = root;
1323 spin_unlock_irqrestore(&iommu->lock, flags);
1328 static void iommu_set_root_entry(struct intel_iommu *iommu)
1334 addr = virt_to_phys(iommu->root_entry);
1335 if (ecs_enabled(iommu))
1336 addr |= DMA_RTADDR_RTT;
1338 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1339 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1341 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1343 /* Make sure hardware complete it */
1344 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1345 readl, (sts & DMA_GSTS_RTPS), sts);
1347 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1350 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1355 if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1358 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1359 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1361 /* Make sure hardware complete it */
1362 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1363 readl, (!(val & DMA_GSTS_WBFS)), val);
1365 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1368 /* return value determine if we need a write buffer flush */
1369 static void __iommu_flush_context(struct intel_iommu *iommu,
1370 u16 did, u16 source_id, u8 function_mask,
1377 case DMA_CCMD_GLOBAL_INVL:
1378 val = DMA_CCMD_GLOBAL_INVL;
1380 case DMA_CCMD_DOMAIN_INVL:
1381 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1383 case DMA_CCMD_DEVICE_INVL:
1384 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1385 | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1390 val |= DMA_CCMD_ICC;
1392 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1393 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1395 /* Make sure hardware complete it */
1396 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1397 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1399 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1402 /* return value determine if we need a write buffer flush */
1403 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1404 u64 addr, unsigned int size_order, u64 type)
1406 int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1407 u64 val = 0, val_iva = 0;
1411 case DMA_TLB_GLOBAL_FLUSH:
1412 /* global flush doesn't need set IVA_REG */
1413 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1415 case DMA_TLB_DSI_FLUSH:
1416 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1418 case DMA_TLB_PSI_FLUSH:
1419 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1420 /* IH bit is passed in as part of address */
1421 val_iva = size_order | addr;
1426 /* Note: set drain read/write */
1429 * This is probably to be super secure.. Looks like we can
1430 * ignore it without any impact.
1432 if (cap_read_drain(iommu->cap))
1433 val |= DMA_TLB_READ_DRAIN;
1435 if (cap_write_drain(iommu->cap))
1436 val |= DMA_TLB_WRITE_DRAIN;
1438 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1439 /* Note: Only uses first TLB reg currently */
1441 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1442 dmar_writeq(iommu->reg + tlb_offset + 8, val);
1444 /* Make sure hardware complete it */
1445 IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1446 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1448 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1450 /* check IOTLB invalidation granularity */
1451 if (DMA_TLB_IAIG(val) == 0)
1452 pr_err("Flush IOTLB failed\n");
1453 if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1454 pr_debug("TLB flush request %Lx, actual %Lx\n",
1455 (unsigned long long)DMA_TLB_IIRG(type),
1456 (unsigned long long)DMA_TLB_IAIG(val));
1459 static struct device_domain_info *
1460 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1463 struct device_domain_info *info;
1465 assert_spin_locked(&device_domain_lock);
1470 list_for_each_entry(info, &domain->devices, link)
1471 if (info->iommu == iommu && info->bus == bus &&
1472 info->devfn == devfn) {
1473 if (info->ats_supported && info->dev)
1481 static void domain_update_iotlb(struct dmar_domain *domain)
1483 struct device_domain_info *info;
1484 bool has_iotlb_device = false;
1486 assert_spin_locked(&device_domain_lock);
1488 list_for_each_entry(info, &domain->devices, link) {
1489 struct pci_dev *pdev;
1491 if (!info->dev || !dev_is_pci(info->dev))
1494 pdev = to_pci_dev(info->dev);
1495 if (pdev->ats_enabled) {
1496 has_iotlb_device = true;
1501 domain->has_iotlb_device = has_iotlb_device;
1504 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1506 struct pci_dev *pdev;
1508 assert_spin_locked(&device_domain_lock);
1510 if (!info || !dev_is_pci(info->dev))
1513 pdev = to_pci_dev(info->dev);
1515 #ifdef CONFIG_INTEL_IOMMU_SVM
1516 /* The PCIe spec, in its wisdom, declares that the behaviour of
1517 the device if you enable PASID support after ATS support is
1518 undefined. So always enable PASID support on devices which
1519 have it, even if we can't yet know if we're ever going to
1521 if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
1522 info->pasid_enabled = 1;
1524 if (info->pri_supported && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
1525 info->pri_enabled = 1;
1527 if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
1528 info->ats_enabled = 1;
1529 domain_update_iotlb(info->domain);
1530 info->ats_qdep = pci_ats_queue_depth(pdev);
1534 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1536 struct pci_dev *pdev;
1538 assert_spin_locked(&device_domain_lock);
1540 if (!dev_is_pci(info->dev))
1543 pdev = to_pci_dev(info->dev);
1545 if (info->ats_enabled) {
1546 pci_disable_ats(pdev);
1547 info->ats_enabled = 0;
1548 domain_update_iotlb(info->domain);
1550 #ifdef CONFIG_INTEL_IOMMU_SVM
1551 if (info->pri_enabled) {
1552 pci_disable_pri(pdev);
1553 info->pri_enabled = 0;
1555 if (info->pasid_enabled) {
1556 pci_disable_pasid(pdev);
1557 info->pasid_enabled = 0;
1562 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1563 u64 addr, unsigned mask)
1566 unsigned long flags;
1567 struct device_domain_info *info;
1569 if (!domain->has_iotlb_device)
1572 spin_lock_irqsave(&device_domain_lock, flags);
1573 list_for_each_entry(info, &domain->devices, link) {
1574 if (!info->ats_enabled)
1577 sid = info->bus << 8 | info->devfn;
1578 qdep = info->ats_qdep;
1579 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1581 spin_unlock_irqrestore(&device_domain_lock, flags);
1584 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1585 struct dmar_domain *domain,
1586 unsigned long pfn, unsigned int pages,
1589 unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1590 uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1591 u16 did = domain->iommu_did[iommu->seq_id];
1598 * Fallback to domain selective flush if no PSI support or the size is
1600 * PSI requires page size to be 2 ^ x, and the base address is naturally
1601 * aligned to the size
1603 if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1604 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1607 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1611 * In caching mode, changes of pages from non-present to present require
1612 * flush. However, device IOTLB doesn't need to be flushed in this case.
1614 if (!cap_caching_mode(iommu->cap) || !map)
1615 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1619 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1622 unsigned long flags;
1624 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1625 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1626 pmen &= ~DMA_PMEN_EPM;
1627 writel(pmen, iommu->reg + DMAR_PMEN_REG);
1629 /* wait for the protected region status bit to clear */
1630 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1631 readl, !(pmen & DMA_PMEN_PRS), pmen);
1633 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1636 static void iommu_enable_translation(struct intel_iommu *iommu)
1639 unsigned long flags;
1641 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1642 iommu->gcmd |= DMA_GCMD_TE;
1643 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1645 /* Make sure hardware complete it */
1646 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1647 readl, (sts & DMA_GSTS_TES), sts);
1649 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1652 static void iommu_disable_translation(struct intel_iommu *iommu)
1657 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1658 iommu->gcmd &= ~DMA_GCMD_TE;
1659 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1661 /* Make sure hardware complete it */
1662 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1663 readl, (!(sts & DMA_GSTS_TES)), sts);
1665 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1669 static int iommu_init_domains(struct intel_iommu *iommu)
1671 u32 ndomains, nlongs;
1674 ndomains = cap_ndoms(iommu->cap);
1675 pr_debug("%s: Number of Domains supported <%d>\n",
1676 iommu->name, ndomains);
1677 nlongs = BITS_TO_LONGS(ndomains);
1679 spin_lock_init(&iommu->lock);
1681 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1682 if (!iommu->domain_ids) {
1683 pr_err("%s: Allocating domain id array failed\n",
1688 size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
1689 iommu->domains = kzalloc(size, GFP_KERNEL);
1691 if (iommu->domains) {
1692 size = 256 * sizeof(struct dmar_domain *);
1693 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1696 if (!iommu->domains || !iommu->domains[0]) {
1697 pr_err("%s: Allocating domain array failed\n",
1699 kfree(iommu->domain_ids);
1700 kfree(iommu->domains);
1701 iommu->domain_ids = NULL;
1702 iommu->domains = NULL;
1709 * If Caching mode is set, then invalid translations are tagged
1710 * with domain-id 0, hence we need to pre-allocate it. We also
1711 * use domain-id 0 as a marker for non-allocated domain-id, so
1712 * make sure it is not used for a real domain.
1714 set_bit(0, iommu->domain_ids);
1719 static void disable_dmar_iommu(struct intel_iommu *iommu)
1721 struct device_domain_info *info, *tmp;
1722 unsigned long flags;
1724 if (!iommu->domains || !iommu->domain_ids)
1728 spin_lock_irqsave(&device_domain_lock, flags);
1729 list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1730 struct dmar_domain *domain;
1732 if (info->iommu != iommu)
1735 if (!info->dev || !info->domain)
1738 domain = info->domain;
1740 __dmar_remove_one_dev_info(info);
1742 if (!domain_type_is_vm_or_si(domain)) {
1744 * The domain_exit() function can't be called under
1745 * device_domain_lock, as it takes this lock itself.
1746 * So release the lock here and re-run the loop
1749 spin_unlock_irqrestore(&device_domain_lock, flags);
1750 domain_exit(domain);
1754 spin_unlock_irqrestore(&device_domain_lock, flags);
1756 if (iommu->gcmd & DMA_GCMD_TE)
1757 iommu_disable_translation(iommu);
1760 static void free_dmar_iommu(struct intel_iommu *iommu)
1762 if ((iommu->domains) && (iommu->domain_ids)) {
1763 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
1766 for (i = 0; i < elems; i++)
1767 kfree(iommu->domains[i]);
1768 kfree(iommu->domains);
1769 kfree(iommu->domain_ids);
1770 iommu->domains = NULL;
1771 iommu->domain_ids = NULL;
1774 g_iommus[iommu->seq_id] = NULL;
1776 /* free context mapping */
1777 free_context_table(iommu);
1779 #ifdef CONFIG_INTEL_IOMMU_SVM
1780 if (pasid_enabled(iommu)) {
1781 if (ecap_prs(iommu->ecap))
1782 intel_svm_finish_prq(iommu);
1783 intel_svm_free_pasid_tables(iommu);
1788 static struct dmar_domain *alloc_domain(int flags)
1790 struct dmar_domain *domain;
1792 domain = alloc_domain_mem();
1796 memset(domain, 0, sizeof(*domain));
1798 domain->flags = flags;
1799 domain->has_iotlb_device = false;
1800 INIT_LIST_HEAD(&domain->devices);
1805 /* Must be called with iommu->lock */
1806 static int domain_attach_iommu(struct dmar_domain *domain,
1807 struct intel_iommu *iommu)
1809 unsigned long ndomains;
1812 assert_spin_locked(&device_domain_lock);
1813 assert_spin_locked(&iommu->lock);
1815 domain->iommu_refcnt[iommu->seq_id] += 1;
1816 domain->iommu_count += 1;
1817 if (domain->iommu_refcnt[iommu->seq_id] == 1) {
1818 ndomains = cap_ndoms(iommu->cap);
1819 num = find_first_zero_bit(iommu->domain_ids, ndomains);
1821 if (num >= ndomains) {
1822 pr_err("%s: No free domain ids\n", iommu->name);
1823 domain->iommu_refcnt[iommu->seq_id] -= 1;
1824 domain->iommu_count -= 1;
1828 set_bit(num, iommu->domain_ids);
1829 set_iommu_domain(iommu, num, domain);
1831 domain->iommu_did[iommu->seq_id] = num;
1832 domain->nid = iommu->node;
1834 domain_update_iommu_cap(domain);
1840 static int domain_detach_iommu(struct dmar_domain *domain,
1841 struct intel_iommu *iommu)
1843 int num, count = INT_MAX;
1845 assert_spin_locked(&device_domain_lock);
1846 assert_spin_locked(&iommu->lock);
1848 domain->iommu_refcnt[iommu->seq_id] -= 1;
1849 count = --domain->iommu_count;
1850 if (domain->iommu_refcnt[iommu->seq_id] == 0) {
1851 num = domain->iommu_did[iommu->seq_id];
1852 clear_bit(num, iommu->domain_ids);
1853 set_iommu_domain(iommu, num, NULL);
1855 domain_update_iommu_cap(domain);
1856 domain->iommu_did[iommu->seq_id] = 0;
1862 static struct iova_domain reserved_iova_list;
1863 static struct lock_class_key reserved_rbtree_key;
1865 static int dmar_init_reserved_ranges(void)
1867 struct pci_dev *pdev = NULL;
1871 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1874 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1875 &reserved_rbtree_key);
1877 /* IOAPIC ranges shouldn't be accessed by DMA */
1878 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1879 IOVA_PFN(IOAPIC_RANGE_END));
1881 pr_err("Reserve IOAPIC range failed\n");
1885 /* Reserve all PCI MMIO to avoid peer-to-peer access */
1886 for_each_pci_dev(pdev) {
1889 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1890 r = &pdev->resource[i];
1891 if (!r->flags || !(r->flags & IORESOURCE_MEM))
1893 iova = reserve_iova(&reserved_iova_list,
1897 pr_err("Reserve iova failed\n");
1905 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1907 copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1910 static inline int guestwidth_to_adjustwidth(int gaw)
1913 int r = (gaw - 12) % 9;
1924 static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
1927 int adjust_width, agaw;
1928 unsigned long sagaw;
1930 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1932 domain_reserve_special_ranges(domain);
1934 /* calculate AGAW */
1935 if (guest_width > cap_mgaw(iommu->cap))
1936 guest_width = cap_mgaw(iommu->cap);
1937 domain->gaw = guest_width;
1938 adjust_width = guestwidth_to_adjustwidth(guest_width);
1939 agaw = width_to_agaw(adjust_width);
1940 sagaw = cap_sagaw(iommu->cap);
1941 if (!test_bit(agaw, &sagaw)) {
1942 /* hardware doesn't support it, choose a bigger one */
1943 pr_debug("Hardware doesn't support agaw %d\n", agaw);
1944 agaw = find_next_bit(&sagaw, 5, agaw);
1948 domain->agaw = agaw;
1950 if (ecap_coherent(iommu->ecap))
1951 domain->iommu_coherency = 1;
1953 domain->iommu_coherency = 0;
1955 if (ecap_sc_support(iommu->ecap))
1956 domain->iommu_snooping = 1;
1958 domain->iommu_snooping = 0;
1960 if (intel_iommu_superpage)
1961 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1963 domain->iommu_superpage = 0;
1965 domain->nid = iommu->node;
1967 /* always allocate the top pgd */
1968 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1971 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1975 static void domain_exit(struct dmar_domain *domain)
1977 struct page *freelist = NULL;
1979 /* Domain 0 is reserved, so dont process it */
1983 /* Flush any lazy unmaps that may reference this domain */
1984 if (!intel_iommu_strict) {
1987 for_each_possible_cpu(cpu)
1988 flush_unmaps_timeout(cpu);
1991 /* Remove associated devices and clear attached or cached domains */
1993 domain_remove_dev_info(domain);
1997 put_iova_domain(&domain->iovad);
1999 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
2001 dma_free_pagelist(freelist);
2003 free_domain_mem(domain);
2006 static int domain_context_mapping_one(struct dmar_domain *domain,
2007 struct intel_iommu *iommu,
2010 u16 did = domain->iommu_did[iommu->seq_id];
2011 int translation = CONTEXT_TT_MULTI_LEVEL;
2012 struct device_domain_info *info = NULL;
2013 struct context_entry *context;
2014 unsigned long flags;
2015 struct dma_pte *pgd;
2020 if (hw_pass_through && domain_type_is_si(domain))
2021 translation = CONTEXT_TT_PASS_THROUGH;
2023 pr_debug("Set context mapping for %02x:%02x.%d\n",
2024 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
2026 BUG_ON(!domain->pgd);
2028 spin_lock_irqsave(&device_domain_lock, flags);
2029 spin_lock(&iommu->lock);
2032 context = iommu_context_addr(iommu, bus, devfn, 1);
2037 if (context_present(context))
2042 context_clear_entry(context);
2043 context_set_domain_id(context, did);
2046 * Skip top levels of page tables for iommu which has less agaw
2047 * than default. Unnecessary for PT mode.
2049 if (translation != CONTEXT_TT_PASS_THROUGH) {
2050 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
2052 pgd = phys_to_virt(dma_pte_addr(pgd));
2053 if (!dma_pte_present(pgd))
2057 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2058 if (info && info->ats_supported)
2059 translation = CONTEXT_TT_DEV_IOTLB;
2061 translation = CONTEXT_TT_MULTI_LEVEL;
2063 context_set_address_root(context, virt_to_phys(pgd));
2064 context_set_address_width(context, iommu->agaw);
2067 * In pass through mode, AW must be programmed to
2068 * indicate the largest AGAW value supported by
2069 * hardware. And ASR is ignored by hardware.
2071 context_set_address_width(context, iommu->msagaw);
2074 context_set_translation_type(context, translation);
2075 context_set_fault_enable(context);
2076 context_set_present(context);
2077 domain_flush_cache(domain, context, sizeof(*context));
2080 * It's a non-present to present mapping. If hardware doesn't cache
2081 * non-present entry we only need to flush the write-buffer. If the
2082 * _does_ cache non-present entries, then it does so in the special
2083 * domain #0, which we have to flush:
2085 if (cap_caching_mode(iommu->cap)) {
2086 iommu->flush.flush_context(iommu, 0,
2087 (((u16)bus) << 8) | devfn,
2088 DMA_CCMD_MASK_NOBIT,
2089 DMA_CCMD_DEVICE_INVL);
2090 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
2092 iommu_flush_write_buffer(iommu);
2094 iommu_enable_dev_iotlb(info);
2099 spin_unlock(&iommu->lock);
2100 spin_unlock_irqrestore(&device_domain_lock, flags);
2105 struct domain_context_mapping_data {
2106 struct dmar_domain *domain;
2107 struct intel_iommu *iommu;
2110 static int domain_context_mapping_cb(struct pci_dev *pdev,
2111 u16 alias, void *opaque)
2113 struct domain_context_mapping_data *data = opaque;
2115 return domain_context_mapping_one(data->domain, data->iommu,
2116 PCI_BUS_NUM(alias), alias & 0xff);
2120 domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2122 struct intel_iommu *iommu;
2124 struct domain_context_mapping_data data;
2126 iommu = device_to_iommu(dev, &bus, &devfn);
2130 if (!dev_is_pci(dev))
2131 return domain_context_mapping_one(domain, iommu, bus, devfn);
2133 data.domain = domain;
2136 return pci_for_each_dma_alias(to_pci_dev(dev),
2137 &domain_context_mapping_cb, &data);
2140 static int domain_context_mapped_cb(struct pci_dev *pdev,
2141 u16 alias, void *opaque)
2143 struct intel_iommu *iommu = opaque;
2145 return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
2148 static int domain_context_mapped(struct device *dev)
2150 struct intel_iommu *iommu;
2153 iommu = device_to_iommu(dev, &bus, &devfn);
2157 if (!dev_is_pci(dev))
2158 return device_context_mapped(iommu, bus, devfn);
2160 return !pci_for_each_dma_alias(to_pci_dev(dev),
2161 domain_context_mapped_cb, iommu);
2164 /* Returns a number of VTD pages, but aligned to MM page size */
2165 static inline unsigned long aligned_nrpages(unsigned long host_addr,
2168 host_addr &= ~PAGE_MASK;
2169 return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2172 /* Return largest possible superpage level for a given mapping */
2173 static inline int hardware_largepage_caps(struct dmar_domain *domain,
2174 unsigned long iov_pfn,
2175 unsigned long phy_pfn,
2176 unsigned long pages)
2178 int support, level = 1;
2179 unsigned long pfnmerge;
2181 support = domain->iommu_superpage;
2183 /* To use a large page, the virtual *and* physical addresses
2184 must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2185 of them will mean we have to use smaller pages. So just
2186 merge them and check both at once. */
2187 pfnmerge = iov_pfn | phy_pfn;
2189 while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2190 pages >>= VTD_STRIDE_SHIFT;
2193 pfnmerge >>= VTD_STRIDE_SHIFT;
2200 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2201 struct scatterlist *sg, unsigned long phys_pfn,
2202 unsigned long nr_pages, int prot)
2204 struct dma_pte *first_pte = NULL, *pte = NULL;
2205 phys_addr_t uninitialized_var(pteval);
2206 unsigned long sg_res = 0;
2207 unsigned int largepage_lvl = 0;
2208 unsigned long lvl_pages = 0;
2210 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
2212 if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2215 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2219 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2222 while (nr_pages > 0) {
2226 sg_res = aligned_nrpages(sg->offset, sg->length);
2227 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2228 sg->dma_length = sg->length;
2229 pteval = page_to_phys(sg_page(sg)) | prot;
2230 phys_pfn = pteval >> VTD_PAGE_SHIFT;
2234 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2236 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2239 /* It is large page*/
2240 if (largepage_lvl > 1) {
2241 unsigned long nr_superpages, end_pfn;
2243 pteval |= DMA_PTE_LARGE_PAGE;
2244 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2246 nr_superpages = sg_res / lvl_pages;
2247 end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
2250 * Ensure that old small page tables are
2251 * removed to make room for superpage(s).
2253 dma_pte_free_pagetable(domain, iov_pfn, end_pfn);
2255 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2259 /* We don't need lock here, nobody else
2260 * touches the iova range
2262 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2264 static int dumps = 5;
2265 pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2266 iov_pfn, tmp, (unsigned long long)pteval);
2269 debug_dma_dump_mappings(NULL);
2274 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2276 BUG_ON(nr_pages < lvl_pages);
2277 BUG_ON(sg_res < lvl_pages);
2279 nr_pages -= lvl_pages;
2280 iov_pfn += lvl_pages;
2281 phys_pfn += lvl_pages;
2282 pteval += lvl_pages * VTD_PAGE_SIZE;
2283 sg_res -= lvl_pages;
2285 /* If the next PTE would be the first in a new page, then we
2286 need to flush the cache on the entries we've just written.
2287 And then we'll need to recalculate 'pte', so clear it and
2288 let it get set again in the if (!pte) block above.
2290 If we're done (!nr_pages) we need to flush the cache too.
2292 Also if we've been setting superpages, we may need to
2293 recalculate 'pte' and switch back to smaller pages for the
2294 end of the mapping, if the trailing size is not enough to
2295 use another superpage (i.e. sg_res < lvl_pages). */
2297 if (!nr_pages || first_pte_in_page(pte) ||
2298 (largepage_lvl > 1 && sg_res < lvl_pages)) {
2299 domain_flush_cache(domain, first_pte,
2300 (void *)pte - (void *)first_pte);
2304 if (!sg_res && nr_pages)
2310 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2311 struct scatterlist *sg, unsigned long nr_pages,
2314 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2317 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2318 unsigned long phys_pfn, unsigned long nr_pages,
2321 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2324 static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
2329 clear_context_table(iommu, bus, devfn);
2330 iommu->flush.flush_context(iommu, 0, 0, 0,
2331 DMA_CCMD_GLOBAL_INVL);
2332 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2335 static inline void unlink_domain_info(struct device_domain_info *info)
2337 assert_spin_locked(&device_domain_lock);
2338 list_del(&info->link);
2339 list_del(&info->global);
2341 info->dev->archdata.iommu = NULL;
2344 static void domain_remove_dev_info(struct dmar_domain *domain)
2346 struct device_domain_info *info, *tmp;
2347 unsigned long flags;
2349 spin_lock_irqsave(&device_domain_lock, flags);
2350 list_for_each_entry_safe(info, tmp, &domain->devices, link)
2351 __dmar_remove_one_dev_info(info);
2352 spin_unlock_irqrestore(&device_domain_lock, flags);
2357 * Note: we use struct device->archdata.iommu stores the info
2359 static struct dmar_domain *find_domain(struct device *dev)
2361 struct device_domain_info *info;
2363 /* No lock here, assumes no domain exit in normal case */
2364 info = dev->archdata.iommu;
2366 return info->domain;
2370 static inline struct device_domain_info *
2371 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2373 struct device_domain_info *info;
2375 list_for_each_entry(info, &device_domain_list, global)
2376 if (info->iommu->segment == segment && info->bus == bus &&
2377 info->devfn == devfn)
2383 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2386 struct dmar_domain *domain)
2388 struct dmar_domain *found = NULL;
2389 struct device_domain_info *info;
2390 unsigned long flags;
2393 info = alloc_devinfo_mem();
2398 info->devfn = devfn;
2399 info->ats_supported = info->pasid_supported = info->pri_supported = 0;
2400 info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
2403 info->domain = domain;
2404 info->iommu = iommu;
2406 if (dev && dev_is_pci(dev)) {
2407 struct pci_dev *pdev = to_pci_dev(info->dev);
2409 if (ecap_dev_iotlb_support(iommu->ecap) &&
2410 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
2411 dmar_find_matched_atsr_unit(pdev))
2412 info->ats_supported = 1;
2414 if (ecs_enabled(iommu)) {
2415 if (pasid_enabled(iommu)) {
2416 int features = pci_pasid_features(pdev);
2418 info->pasid_supported = features | 1;
2421 if (info->ats_supported && ecap_prs(iommu->ecap) &&
2422 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI))
2423 info->pri_supported = 1;
2427 spin_lock_irqsave(&device_domain_lock, flags);
2429 found = find_domain(dev);
2432 struct device_domain_info *info2;
2433 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
2435 found = info2->domain;
2441 spin_unlock_irqrestore(&device_domain_lock, flags);
2442 free_devinfo_mem(info);
2443 /* Caller must free the original domain */
2447 spin_lock(&iommu->lock);
2448 ret = domain_attach_iommu(domain, iommu);
2449 spin_unlock(&iommu->lock);
2452 spin_unlock_irqrestore(&device_domain_lock, flags);
2453 free_devinfo_mem(info);
2457 list_add(&info->link, &domain->devices);
2458 list_add(&info->global, &device_domain_list);
2460 dev->archdata.iommu = info;
2461 spin_unlock_irqrestore(&device_domain_lock, flags);
2463 if (dev && domain_context_mapping(domain, dev)) {
2464 pr_err("Domain context map for %s failed\n", dev_name(dev));
2465 dmar_remove_one_dev_info(domain, dev);
2472 static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2474 *(u16 *)opaque = alias;
2478 static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
2480 struct device_domain_info *info = NULL;
2481 struct dmar_domain *domain = NULL;
2482 struct intel_iommu *iommu;
2483 u16 req_id, dma_alias;
2484 unsigned long flags;
2487 iommu = device_to_iommu(dev, &bus, &devfn);
2491 req_id = ((u16)bus << 8) | devfn;
2493 if (dev_is_pci(dev)) {
2494 struct pci_dev *pdev = to_pci_dev(dev);
2496 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2498 spin_lock_irqsave(&device_domain_lock, flags);
2499 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2500 PCI_BUS_NUM(dma_alias),
2503 iommu = info->iommu;
2504 domain = info->domain;
2506 spin_unlock_irqrestore(&device_domain_lock, flags);
2508 /* DMA alias already has a domain, use it */
2513 /* Allocate and initialize new domain for the device */
2514 domain = alloc_domain(0);
2517 if (domain_init(domain, iommu, gaw)) {
2518 domain_exit(domain);
2527 static struct dmar_domain *set_domain_for_dev(struct device *dev,
2528 struct dmar_domain *domain)
2530 struct intel_iommu *iommu;
2531 struct dmar_domain *tmp;
2532 u16 req_id, dma_alias;
2535 iommu = device_to_iommu(dev, &bus, &devfn);
2539 req_id = ((u16)bus << 8) | devfn;
2541 if (dev_is_pci(dev)) {
2542 struct pci_dev *pdev = to_pci_dev(dev);
2544 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2546 /* register PCI DMA alias device */
2547 if (req_id != dma_alias) {
2548 tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2549 dma_alias & 0xff, NULL, domain);
2551 if (!tmp || tmp != domain)
2556 tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2557 if (!tmp || tmp != domain)
2563 static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2565 struct dmar_domain *domain, *tmp;
2567 domain = find_domain(dev);
2571 domain = find_or_alloc_domain(dev, gaw);
2575 tmp = set_domain_for_dev(dev, domain);
2576 if (!tmp || domain != tmp) {
2577 domain_exit(domain);
2586 static int iommu_domain_identity_map(struct dmar_domain *domain,
2587 unsigned long long start,
2588 unsigned long long end)
2590 unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2591 unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2593 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2594 dma_to_mm_pfn(last_vpfn))) {
2595 pr_err("Reserving iova failed\n");
2599 pr_debug("Mapping reserved region %llx-%llx\n", start, end);
2601 * RMRR range might have overlap with physical memory range,
2604 dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2606 return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2607 last_vpfn - first_vpfn + 1,
2608 DMA_PTE_READ|DMA_PTE_WRITE);
2611 static int domain_prepare_identity_map(struct device *dev,
2612 struct dmar_domain *domain,
2613 unsigned long long start,
2614 unsigned long long end)
2616 /* For _hardware_ passthrough, don't bother. But for software
2617 passthrough, we do it anyway -- it may indicate a memory
2618 range which is reserved in E820, so which didn't get set
2619 up to start with in si_domain */
2620 if (domain == si_domain && hw_pass_through) {
2621 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2622 dev_name(dev), start, end);
2626 pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2627 dev_name(dev), start, end);
2630 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2631 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2632 dmi_get_system_info(DMI_BIOS_VENDOR),
2633 dmi_get_system_info(DMI_BIOS_VERSION),
2634 dmi_get_system_info(DMI_PRODUCT_VERSION));
2638 if (end >> agaw_to_width(domain->agaw)) {
2639 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2640 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2641 agaw_to_width(domain->agaw),
2642 dmi_get_system_info(DMI_BIOS_VENDOR),
2643 dmi_get_system_info(DMI_BIOS_VERSION),
2644 dmi_get_system_info(DMI_PRODUCT_VERSION));
2648 return iommu_domain_identity_map(domain, start, end);
2651 static int iommu_prepare_identity_map(struct device *dev,
2652 unsigned long long start,
2653 unsigned long long end)
2655 struct dmar_domain *domain;
2658 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2662 ret = domain_prepare_identity_map(dev, domain, start, end);
2664 domain_exit(domain);
2669 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2672 if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2674 return iommu_prepare_identity_map(dev, rmrr->base_address,
2678 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2679 static inline void iommu_prepare_isa(void)
2681 struct pci_dev *pdev;
2684 pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2688 pr_info("Prepare 0-16MiB unity mapping for LPC\n");
2689 ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
2692 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
2697 static inline void iommu_prepare_isa(void)
2701 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2703 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2705 static int __init si_domain_init(int hw)
2709 si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2713 if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2714 domain_exit(si_domain);
2718 pr_debug("Identity mapping domain allocated\n");
2723 for_each_online_node(nid) {
2724 unsigned long start_pfn, end_pfn;
2727 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2728 ret = iommu_domain_identity_map(si_domain,
2729 PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2738 static int identity_mapping(struct device *dev)
2740 struct device_domain_info *info;
2742 if (likely(!iommu_identity_mapping))
2745 info = dev->archdata.iommu;
2746 if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2747 return (info->domain == si_domain);
2752 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
2754 struct dmar_domain *ndomain;
2755 struct intel_iommu *iommu;
2758 iommu = device_to_iommu(dev, &bus, &devfn);
2762 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2763 if (ndomain != domain)
2769 static bool device_has_rmrr(struct device *dev)
2771 struct dmar_rmrr_unit *rmrr;
2776 for_each_rmrr_units(rmrr) {
2778 * Return TRUE if this RMRR contains the device that
2781 for_each_active_dev_scope(rmrr->devices,
2782 rmrr->devices_cnt, i, tmp)
2793 * There are a couple cases where we need to restrict the functionality of
2794 * devices associated with RMRRs. The first is when evaluating a device for
2795 * identity mapping because problems exist when devices are moved in and out
2796 * of domains and their respective RMRR information is lost. This means that
2797 * a device with associated RMRRs will never be in a "passthrough" domain.
2798 * The second is use of the device through the IOMMU API. This interface
2799 * expects to have full control of the IOVA space for the device. We cannot
2800 * satisfy both the requirement that RMRR access is maintained and have an
2801 * unencumbered IOVA space. We also have no ability to quiesce the device's
2802 * use of the RMRR space or even inform the IOMMU API user of the restriction.
2803 * We therefore prevent devices associated with an RMRR from participating in
2804 * the IOMMU API, which eliminates them from device assignment.
2806 * In both cases we assume that PCI USB devices with RMRRs have them largely
2807 * for historical reasons and that the RMRR space is not actively used post
2808 * boot. This exclusion may change if vendors begin to abuse it.
2810 * The same exception is made for graphics devices, with the requirement that
2811 * any use of the RMRR regions will be torn down before assigning the device
2814 static bool device_is_rmrr_locked(struct device *dev)
2816 if (!device_has_rmrr(dev))
2819 if (dev_is_pci(dev)) {
2820 struct pci_dev *pdev = to_pci_dev(dev);
2822 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
2829 static int iommu_should_identity_map(struct device *dev, int startup)
2832 if (dev_is_pci(dev)) {
2833 struct pci_dev *pdev = to_pci_dev(dev);
2835 if (device_is_rmrr_locked(dev))
2838 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2841 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2844 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2848 * We want to start off with all devices in the 1:1 domain, and
2849 * take them out later if we find they can't access all of memory.
2851 * However, we can't do this for PCI devices behind bridges,
2852 * because all PCI devices behind the same bridge will end up
2853 * with the same source-id on their transactions.
2855 * Practically speaking, we can't change things around for these
2856 * devices at run-time, because we can't be sure there'll be no
2857 * DMA transactions in flight for any of their siblings.
2859 * So PCI devices (unless they're on the root bus) as well as
2860 * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2861 * the 1:1 domain, just in _case_ one of their siblings turns out
2862 * not to be able to map all of memory.
2864 if (!pci_is_pcie(pdev)) {
2865 if (!pci_is_root_bus(pdev->bus))
2867 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2869 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2872 if (device_has_rmrr(dev))
2877 * At boot time, we don't yet know if devices will be 64-bit capable.
2878 * Assume that they will — if they turn out not to be, then we can
2879 * take them out of the 1:1 domain later.
2883 * If the device's dma_mask is less than the system's memory
2884 * size then this is not a candidate for identity mapping.
2886 u64 dma_mask = *dev->dma_mask;
2888 if (dev->coherent_dma_mask &&
2889 dev->coherent_dma_mask < dma_mask)
2890 dma_mask = dev->coherent_dma_mask;
2892 return dma_mask >= dma_get_required_mask(dev);
2898 static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2902 if (!iommu_should_identity_map(dev, 1))
2905 ret = domain_add_dev_info(si_domain, dev);
2907 pr_info("%s identity mapping for device %s\n",
2908 hw ? "Hardware" : "Software", dev_name(dev));
2909 else if (ret == -ENODEV)
2910 /* device not associated with an iommu */
2917 static int __init iommu_prepare_static_identity_mapping(int hw)
2919 struct pci_dev *pdev = NULL;
2920 struct dmar_drhd_unit *drhd;
2921 struct intel_iommu *iommu;
2926 for_each_pci_dev(pdev) {
2927 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2932 for_each_active_iommu(iommu, drhd)
2933 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2934 struct acpi_device_physical_node *pn;
2935 struct acpi_device *adev;
2937 if (dev->bus != &acpi_bus_type)
2940 adev= to_acpi_device(dev);
2941 mutex_lock(&adev->physical_node_lock);
2942 list_for_each_entry(pn, &adev->physical_node_list, node) {
2943 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2947 mutex_unlock(&adev->physical_node_lock);
2955 static void intel_iommu_init_qi(struct intel_iommu *iommu)
2958 * Start from the sane iommu hardware state.
2959 * If the queued invalidation is already initialized by us
2960 * (for example, while enabling interrupt-remapping) then
2961 * we got the things already rolling from a sane state.
2965 * Clear any previous faults.
2967 dmar_fault(-1, iommu);
2969 * Disable queued invalidation if supported and already enabled
2970 * before OS handover.
2972 dmar_disable_qi(iommu);
2975 if (dmar_enable_qi(iommu)) {
2977 * Queued Invalidate not enabled, use Register Based Invalidate
2979 iommu->flush.flush_context = __iommu_flush_context;
2980 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2981 pr_info("%s: Using Register based invalidation\n",
2984 iommu->flush.flush_context = qi_flush_context;
2985 iommu->flush.flush_iotlb = qi_flush_iotlb;
2986 pr_info("%s: Using Queued invalidation\n", iommu->name);
2990 static int copy_context_table(struct intel_iommu *iommu,
2991 struct root_entry *old_re,
2992 struct context_entry **tbl,
2995 int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
2996 struct context_entry *new_ce = NULL, ce;
2997 struct context_entry *old_ce = NULL;
2998 struct root_entry re;
2999 phys_addr_t old_ce_phys;
3001 tbl_idx = ext ? bus * 2 : bus;
3002 memcpy(&re, old_re, sizeof(re));
3004 for (devfn = 0; devfn < 256; devfn++) {
3005 /* First calculate the correct index */
3006 idx = (ext ? devfn * 2 : devfn) % 256;
3009 /* First save what we may have and clean up */
3011 tbl[tbl_idx] = new_ce;
3012 __iommu_flush_cache(iommu, new_ce,
3022 old_ce_phys = root_entry_lctp(&re);
3024 old_ce_phys = root_entry_uctp(&re);
3027 if (ext && devfn == 0) {
3028 /* No LCTP, try UCTP */
3037 old_ce = memremap(old_ce_phys, PAGE_SIZE,
3042 new_ce = alloc_pgtable_page(iommu->node);
3049 /* Now copy the context entry */
3050 memcpy(&ce, old_ce + idx, sizeof(ce));
3052 if (!__context_present(&ce))
3055 did = context_domain_id(&ce);
3056 if (did >= 0 && did < cap_ndoms(iommu->cap))
3057 set_bit(did, iommu->domain_ids);
3060 * We need a marker for copied context entries. This
3061 * marker needs to work for the old format as well as
3062 * for extended context entries.
3064 * Bit 67 of the context entry is used. In the old
3065 * format this bit is available to software, in the
3066 * extended format it is the PGE bit, but PGE is ignored
3067 * by HW if PASIDs are disabled (and thus still
3070 * So disable PASIDs first and then mark the entry
3071 * copied. This means that we don't copy PASID
3072 * translations from the old kernel, but this is fine as
3073 * faults there are not fatal.
3075 context_clear_pasid_enable(&ce);
3076 context_set_copied(&ce);
3081 tbl[tbl_idx + pos] = new_ce;
3083 __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
3092 static int copy_translation_tables(struct intel_iommu *iommu)
3094 struct context_entry **ctxt_tbls;
3095 struct root_entry *old_rt;
3096 phys_addr_t old_rt_phys;
3097 int ctxt_table_entries;
3098 unsigned long flags;
3103 rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
3104 ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
3105 new_ext = !!ecap_ecs(iommu->ecap);
3108 * The RTT bit can only be changed when translation is disabled,
3109 * but disabling translation means to open a window for data
3110 * corruption. So bail out and don't copy anything if we would
3111 * have to change the bit.
3116 old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
3120 old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
3124 /* This is too big for the stack - allocate it from slab */
3125 ctxt_table_entries = ext ? 512 : 256;
3127 ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
3131 for (bus = 0; bus < 256; bus++) {
3132 ret = copy_context_table(iommu, &old_rt[bus],
3133 ctxt_tbls, bus, ext);
3135 pr_err("%s: Failed to copy context table for bus %d\n",
3141 spin_lock_irqsave(&iommu->lock, flags);
3143 /* Context tables are copied, now write them to the root_entry table */
3144 for (bus = 0; bus < 256; bus++) {
3145 int idx = ext ? bus * 2 : bus;
3148 if (ctxt_tbls[idx]) {
3149 val = virt_to_phys(ctxt_tbls[idx]) | 1;
3150 iommu->root_entry[bus].lo = val;
3153 if (!ext || !ctxt_tbls[idx + 1])
3156 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
3157 iommu->root_entry[bus].hi = val;
3160 spin_unlock_irqrestore(&iommu->lock, flags);
3164 __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3174 static int __init init_dmars(void)
3176 struct dmar_drhd_unit *drhd;
3177 struct dmar_rmrr_unit *rmrr;
3178 bool copied_tables = false;
3180 struct intel_iommu *iommu;
3186 * initialize and program root entry to not present
3189 for_each_drhd_unit(drhd) {
3191 * lock not needed as this is only incremented in the single
3192 * threaded kernel __init code path all other access are read
3195 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
3199 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
3202 /* Preallocate enough resources for IOMMU hot-addition */
3203 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3204 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3206 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3209 pr_err("Allocating global iommu array failed\n");
3214 for_each_possible_cpu(cpu) {
3215 struct deferred_flush_data *dfd = per_cpu_ptr(&deferred_flush,
3218 dfd->tables = kzalloc(g_num_of_iommus *
3219 sizeof(struct deferred_flush_table),
3226 spin_lock_init(&dfd->lock);
3227 setup_timer(&dfd->timer, flush_unmaps_timeout, cpu);
3230 for_each_active_iommu(iommu, drhd) {
3231 g_iommus[iommu->seq_id] = iommu;
3233 intel_iommu_init_qi(iommu);
3235 ret = iommu_init_domains(iommu);
3239 init_translation_status(iommu);
3241 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3242 iommu_disable_translation(iommu);
3243 clear_translation_pre_enabled(iommu);
3244 pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3250 * we could share the same root & context tables
3251 * among all IOMMU's. Need to Split it later.
3253 ret = iommu_alloc_root_entry(iommu);
3257 if (translation_pre_enabled(iommu)) {
3258 pr_info("Translation already enabled - trying to copy translation structures\n");
3260 ret = copy_translation_tables(iommu);
3263 * We found the IOMMU with translation
3264 * enabled - but failed to copy over the
3265 * old root-entry table. Try to proceed
3266 * by disabling translation now and
3267 * allocating a clean root-entry table.
3268 * This might cause DMAR faults, but
3269 * probably the dump will still succeed.
3271 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3273 iommu_disable_translation(iommu);
3274 clear_translation_pre_enabled(iommu);
3276 pr_info("Copied translation tables from previous kernel for %s\n",
3278 copied_tables = true;
3282 if (!ecap_pass_through(iommu->ecap))
3283 hw_pass_through = 0;
3284 #ifdef CONFIG_INTEL_IOMMU_SVM
3285 if (pasid_enabled(iommu))
3286 intel_svm_alloc_pasid_tables(iommu);
3291 * Now that qi is enabled on all iommus, set the root entry and flush
3292 * caches. This is required on some Intel X58 chipsets, otherwise the
3293 * flush_context function will loop forever and the boot hangs.
3295 for_each_active_iommu(iommu, drhd) {
3296 iommu_flush_write_buffer(iommu);
3297 iommu_set_root_entry(iommu);
3298 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3299 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3302 if (iommu_pass_through)
3303 iommu_identity_mapping |= IDENTMAP_ALL;
3305 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
3306 iommu_identity_mapping |= IDENTMAP_GFX;
3309 if (iommu_identity_mapping) {
3310 ret = si_domain_init(hw_pass_through);
3315 check_tylersburg_isoch();
3318 * If we copied translations from a previous kernel in the kdump
3319 * case, we can not assign the devices to domains now, as that
3320 * would eliminate the old mappings. So skip this part and defer
3321 * the assignment to device driver initialization time.
3327 * If pass through is not set or not enabled, setup context entries for
3328 * identity mappings for rmrr, gfx, and isa and may fall back to static
3329 * identity mapping if iommu_identity_mapping is set.
3331 if (iommu_identity_mapping) {
3332 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
3334 pr_crit("Failed to setup IOMMU pass-through\n");
3340 * for each dev attached to rmrr
3342 * locate drhd for dev, alloc domain for dev
3343 * allocate free domain
3344 * allocate page table entries for rmrr
3345 * if context not allocated for bus
3346 * allocate and init context
3347 * set present in root table for this bus
3348 * init context with domain, translation etc
3352 pr_info("Setting RMRR:\n");
3353 for_each_rmrr_units(rmrr) {
3354 /* some BIOS lists non-exist devices in DMAR table. */
3355 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3357 ret = iommu_prepare_rmrr_dev(rmrr, dev);
3359 pr_err("Mapping reserved region failed\n");
3363 iommu_prepare_isa();
3370 * global invalidate context cache
3371 * global invalidate iotlb
3372 * enable translation
3374 for_each_iommu(iommu, drhd) {
3375 if (drhd->ignored) {
3377 * we always have to disable PMRs or DMA may fail on
3381 iommu_disable_protect_mem_regions(iommu);
3385 iommu_flush_write_buffer(iommu);
3387 #ifdef CONFIG_INTEL_IOMMU_SVM
3388 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
3389 ret = intel_svm_enable_prq(iommu);
3394 ret = dmar_set_interrupt(iommu);
3398 if (!translation_pre_enabled(iommu))
3399 iommu_enable_translation(iommu);
3401 iommu_disable_protect_mem_regions(iommu);
3407 for_each_active_iommu(iommu, drhd) {
3408 disable_dmar_iommu(iommu);
3409 free_dmar_iommu(iommu);
3412 for_each_possible_cpu(cpu)
3413 kfree(per_cpu_ptr(&deferred_flush, cpu)->tables);
3419 /* This takes a number of _MM_ pages, not VTD pages */
3420 static unsigned long intel_alloc_iova(struct device *dev,
3421 struct dmar_domain *domain,
3422 unsigned long nrpages, uint64_t dma_mask)
3424 unsigned long iova_pfn = 0;
3426 /* Restrict dma_mask to the width that the iommu can handle */
3427 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
3428 /* Ensure we reserve the whole size-aligned region */
3429 nrpages = __roundup_pow_of_two(nrpages);
3431 if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
3433 * First try to allocate an io virtual address in
3434 * DMA_BIT_MASK(32) and if that fails then try allocating
3437 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3438 IOVA_PFN(DMA_BIT_MASK(32)));
3442 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, IOVA_PFN(dma_mask));
3443 if (unlikely(!iova_pfn)) {
3444 pr_err("Allocating %ld-page iova for %s failed",
3445 nrpages, dev_name(dev));
3452 static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
3454 struct dmar_domain *domain, *tmp;
3455 struct dmar_rmrr_unit *rmrr;
3456 struct device *i_dev;
3459 domain = find_domain(dev);
3463 domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
3467 /* We have a new domain - setup possible RMRRs for the device */
3469 for_each_rmrr_units(rmrr) {
3470 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3475 ret = domain_prepare_identity_map(dev, domain,
3479 dev_err(dev, "Mapping reserved region failed\n");
3484 tmp = set_domain_for_dev(dev, domain);
3485 if (!tmp || domain != tmp) {
3486 domain_exit(domain);
3493 pr_err("Allocating domain for %s failed\n", dev_name(dev));
3499 static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
3501 struct device_domain_info *info;
3503 /* No lock here, assumes no domain exit in normal case */
3504 info = dev->archdata.iommu;
3506 return info->domain;
3508 return __get_valid_domain_for_dev(dev);
3511 /* Check if the dev needs to go through non-identity map and unmap process.*/
3512 static int iommu_no_mapping(struct device *dev)
3516 if (iommu_dummy(dev))
3519 if (!iommu_identity_mapping)
3522 found = identity_mapping(dev);
3524 if (iommu_should_identity_map(dev, 0))
3528 * 32 bit DMA is removed from si_domain and fall back
3529 * to non-identity mapping.
3531 dmar_remove_one_dev_info(si_domain, dev);
3532 pr_info("32bit %s uses non-identity mapping\n",
3538 * In case of a detached 64 bit DMA device from vm, the device
3539 * is put into si_domain for identity mapping.
3541 if (iommu_should_identity_map(dev, 0)) {
3543 ret = domain_add_dev_info(si_domain, dev);
3545 pr_info("64bit %s uses identity mapping\n",
3555 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3556 size_t size, int dir, u64 dma_mask)
3558 struct dmar_domain *domain;
3559 phys_addr_t start_paddr;
3560 unsigned long iova_pfn;
3563 struct intel_iommu *iommu;
3564 unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
3566 BUG_ON(dir == DMA_NONE);
3568 if (iommu_no_mapping(dev))
3571 domain = get_valid_domain_for_dev(dev);
3575 iommu = domain_get_iommu(domain);
3576 size = aligned_nrpages(paddr, size);
3578 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3583 * Check if DMAR supports zero-length reads on write only
3586 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3587 !cap_zlr(iommu->cap))
3588 prot |= DMA_PTE_READ;
3589 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3590 prot |= DMA_PTE_WRITE;
3592 * paddr - (paddr + size) might be partial page, we should map the whole
3593 * page. Note: if two part of one page are separately mapped, we
3594 * might have two guest_addr mapping to the same host paddr, but this
3595 * is not a big problem
3597 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
3598 mm_to_dma_pfn(paddr_pfn), size, prot);
3602 /* it's a non-present to present mapping. Only flush if caching mode */
3603 if (cap_caching_mode(iommu->cap))
3604 iommu_flush_iotlb_psi(iommu, domain,
3605 mm_to_dma_pfn(iova_pfn),
3608 iommu_flush_write_buffer(iommu);
3610 start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
3611 start_paddr += paddr & ~PAGE_MASK;
3616 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3617 pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
3618 dev_name(dev), size, (unsigned long long)paddr, dir);
3622 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3623 unsigned long offset, size_t size,
3624 enum dma_data_direction dir,
3625 unsigned long attrs)
3627 return __intel_map_single(dev, page_to_phys(page) + offset, size,
3628 dir, *dev->dma_mask);
3631 static void flush_unmaps(struct deferred_flush_data *flush_data)
3635 flush_data->timer_on = 0;
3637 /* just flush them all */
3638 for (i = 0; i < g_num_of_iommus; i++) {
3639 struct intel_iommu *iommu = g_iommus[i];
3640 struct deferred_flush_table *flush_table =
3641 &flush_data->tables[i];
3645 if (!flush_table->next)
3648 /* In caching mode, global flushes turn emulation expensive */
3649 if (!cap_caching_mode(iommu->cap))
3650 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3651 DMA_TLB_GLOBAL_FLUSH);
3652 for (j = 0; j < flush_table->next; j++) {
3654 struct deferred_flush_entry *entry =
3655 &flush_table->entries[j];
3656 unsigned long iova_pfn = entry->iova_pfn;
3657 unsigned long nrpages = entry->nrpages;
3658 struct dmar_domain *domain = entry->domain;
3659 struct page *freelist = entry->freelist;
3661 /* On real hardware multiple invalidations are expensive */
3662 if (cap_caching_mode(iommu->cap))
3663 iommu_flush_iotlb_psi(iommu, domain,
3664 mm_to_dma_pfn(iova_pfn),
3665 nrpages, !freelist, 0);
3667 mask = ilog2(nrpages);
3668 iommu_flush_dev_iotlb(domain,
3669 (uint64_t)iova_pfn << PAGE_SHIFT, mask);
3671 free_iova_fast(&domain->iovad, iova_pfn, nrpages);
3673 dma_free_pagelist(freelist);
3675 flush_table->next = 0;
3678 flush_data->size = 0;
3681 static void flush_unmaps_timeout(unsigned long cpuid)
3683 struct deferred_flush_data *flush_data = per_cpu_ptr(&deferred_flush, cpuid);
3684 unsigned long flags;
3686 spin_lock_irqsave(&flush_data->lock, flags);
3687 flush_unmaps(flush_data);
3688 spin_unlock_irqrestore(&flush_data->lock, flags);
3691 static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
3692 unsigned long nrpages, struct page *freelist)
3694 unsigned long flags;
3695 int entry_id, iommu_id;
3696 struct intel_iommu *iommu;
3697 struct deferred_flush_entry *entry;
3698 struct deferred_flush_data *flush_data;
3700 flush_data = raw_cpu_ptr(&deferred_flush);
3702 /* Flush all CPUs' entries to avoid deferring too much. If
3703 * this becomes a bottleneck, can just flush us, and rely on
3704 * flush timer for the rest.
3706 if (flush_data->size == HIGH_WATER_MARK) {
3709 for_each_online_cpu(cpu)
3710 flush_unmaps_timeout(cpu);
3713 spin_lock_irqsave(&flush_data->lock, flags);
3715 iommu = domain_get_iommu(dom);
3716 iommu_id = iommu->seq_id;
3718 entry_id = flush_data->tables[iommu_id].next;
3719 ++(flush_data->tables[iommu_id].next);
3721 entry = &flush_data->tables[iommu_id].entries[entry_id];
3722 entry->domain = dom;
3723 entry->iova_pfn = iova_pfn;
3724 entry->nrpages = nrpages;
3725 entry->freelist = freelist;
3727 if (!flush_data->timer_on) {
3728 mod_timer(&flush_data->timer, jiffies + msecs_to_jiffies(10));
3729 flush_data->timer_on = 1;
3732 spin_unlock_irqrestore(&flush_data->lock, flags);
3735 static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
3737 struct dmar_domain *domain;
3738 unsigned long start_pfn, last_pfn;
3739 unsigned long nrpages;
3740 unsigned long iova_pfn;
3741 struct intel_iommu *iommu;
3742 struct page *freelist;
3744 if (iommu_no_mapping(dev))
3747 domain = find_domain(dev);
3750 iommu = domain_get_iommu(domain);
3752 iova_pfn = IOVA_PFN(dev_addr);
3754 nrpages = aligned_nrpages(dev_addr, size);
3755 start_pfn = mm_to_dma_pfn(iova_pfn);
3756 last_pfn = start_pfn + nrpages - 1;
3758 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3759 dev_name(dev), start_pfn, last_pfn);
3761 freelist = domain_unmap(domain, start_pfn, last_pfn);
3763 if (intel_iommu_strict) {
3764 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
3765 nrpages, !freelist, 0);
3767 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
3768 dma_free_pagelist(freelist);
3770 add_unmap(domain, iova_pfn, nrpages, freelist);
3772 * queue up the release of the unmap to save the 1/6th of the
3773 * cpu used up by the iotlb flush operation...
3778 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3779 size_t size, enum dma_data_direction dir,
3780 unsigned long attrs)
3782 intel_unmap(dev, dev_addr, size);
3785 static void *intel_alloc_coherent(struct device *dev, size_t size,
3786 dma_addr_t *dma_handle, gfp_t flags,
3787 unsigned long attrs)
3789 struct page *page = NULL;
3792 size = PAGE_ALIGN(size);
3793 order = get_order(size);
3795 if (!iommu_no_mapping(dev))
3796 flags &= ~(GFP_DMA | GFP_DMA32);
3797 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3798 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
3804 if (gfpflags_allow_blocking(flags)) {
3805 unsigned int count = size >> PAGE_SHIFT;
3807 page = dma_alloc_from_contiguous(dev, count, order);
3808 if (page && iommu_no_mapping(dev) &&
3809 page_to_phys(page) + size > dev->coherent_dma_mask) {
3810 dma_release_from_contiguous(dev, page, count);
3816 page = alloc_pages(flags, order);
3819 memset(page_address(page), 0, size);
3821 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3823 dev->coherent_dma_mask);
3825 return page_address(page);
3826 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3827 __free_pages(page, order);
3832 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3833 dma_addr_t dma_handle, unsigned long attrs)
3836 struct page *page = virt_to_page(vaddr);
3838 size = PAGE_ALIGN(size);
3839 order = get_order(size);
3841 intel_unmap(dev, dma_handle, size);
3842 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3843 __free_pages(page, order);
3846 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3847 int nelems, enum dma_data_direction dir,
3848 unsigned long attrs)
3850 dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
3851 unsigned long nrpages = 0;
3852 struct scatterlist *sg;
3855 for_each_sg(sglist, sg, nelems, i) {
3856 nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
3859 intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
3862 static int intel_nontranslate_map_sg(struct device *hddev,
3863 struct scatterlist *sglist, int nelems, int dir)
3866 struct scatterlist *sg;
3868 for_each_sg(sglist, sg, nelems, i) {
3869 BUG_ON(!sg_page(sg));
3870 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3871 sg->dma_length = sg->length;
3876 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3877 enum dma_data_direction dir, unsigned long attrs)
3880 struct dmar_domain *domain;
3883 unsigned long iova_pfn;
3885 struct scatterlist *sg;
3886 unsigned long start_vpfn;
3887 struct intel_iommu *iommu;
3889 BUG_ON(dir == DMA_NONE);
3890 if (iommu_no_mapping(dev))
3891 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
3893 domain = get_valid_domain_for_dev(dev);
3897 iommu = domain_get_iommu(domain);
3899 for_each_sg(sglist, sg, nelems, i)
3900 size += aligned_nrpages(sg->offset, sg->length);
3902 iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3905 sglist->dma_length = 0;
3910 * Check if DMAR supports zero-length reads on write only
3913 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3914 !cap_zlr(iommu->cap))
3915 prot |= DMA_PTE_READ;
3916 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3917 prot |= DMA_PTE_WRITE;
3919 start_vpfn = mm_to_dma_pfn(iova_pfn);
3921 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3922 if (unlikely(ret)) {
3923 dma_pte_free_pagetable(domain, start_vpfn,
3924 start_vpfn + size - 1);
3925 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3929 /* it's a non-present to present mapping. Only flush if caching mode */
3930 if (cap_caching_mode(iommu->cap))
3931 iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
3933 iommu_flush_write_buffer(iommu);
3938 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3943 struct dma_map_ops intel_dma_ops = {
3944 .alloc = intel_alloc_coherent,
3945 .free = intel_free_coherent,
3946 .map_sg = intel_map_sg,
3947 .unmap_sg = intel_unmap_sg,
3948 .map_page = intel_map_page,
3949 .unmap_page = intel_unmap_page,
3950 .mapping_error = intel_mapping_error,
3953 static inline int iommu_domain_cache_init(void)
3957 iommu_domain_cache = kmem_cache_create("iommu_domain",
3958 sizeof(struct dmar_domain),
3963 if (!iommu_domain_cache) {
3964 pr_err("Couldn't create iommu_domain cache\n");
3971 static inline int iommu_devinfo_cache_init(void)
3975 iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3976 sizeof(struct device_domain_info),
3980 if (!iommu_devinfo_cache) {
3981 pr_err("Couldn't create devinfo cache\n");
3988 static int __init iommu_init_mempool(void)
3991 ret = iova_cache_get();
3995 ret = iommu_domain_cache_init();
3999 ret = iommu_devinfo_cache_init();
4003 kmem_cache_destroy(iommu_domain_cache);
4010 static void __init iommu_exit_mempool(void)
4012 kmem_cache_destroy(iommu_devinfo_cache);
4013 kmem_cache_destroy(iommu_domain_cache);
4017 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
4019 struct dmar_drhd_unit *drhd;
4023 /* We know that this device on this chipset has its own IOMMU.
4024 * If we find it under a different IOMMU, then the BIOS is lying
4025 * to us. Hope that the IOMMU for this device is actually
4026 * disabled, and it needs no translation...
4028 rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
4030 /* "can't" happen */
4031 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
4034 vtbar &= 0xffff0000;
4036 /* we know that the this iommu should be at offset 0xa000 from vtbar */
4037 drhd = dmar_find_matched_drhd_unit(pdev);
4038 if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
4039 TAINT_FIRMWARE_WORKAROUND,
4040 "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
4041 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
4043 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
4045 static void __init init_no_remapping_devices(void)
4047 struct dmar_drhd_unit *drhd;
4051 for_each_drhd_unit(drhd) {
4052 if (!drhd->include_all) {
4053 for_each_active_dev_scope(drhd->devices,
4054 drhd->devices_cnt, i, dev)
4056 /* ignore DMAR unit if no devices exist */
4057 if (i == drhd->devices_cnt)
4062 for_each_active_drhd_unit(drhd) {
4063 if (drhd->include_all)
4066 for_each_active_dev_scope(drhd->devices,
4067 drhd->devices_cnt, i, dev)
4068 if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
4070 if (i < drhd->devices_cnt)
4073 /* This IOMMU has *only* gfx devices. Either bypass it or
4074 set the gfx_mapped flag, as appropriate */
4076 intel_iommu_gfx_mapped = 1;
4079 for_each_active_dev_scope(drhd->devices,
4080 drhd->devices_cnt, i, dev)
4081 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
4086 #ifdef CONFIG_SUSPEND
4087 static int init_iommu_hw(void)
4089 struct dmar_drhd_unit *drhd;
4090 struct intel_iommu *iommu = NULL;
4092 for_each_active_iommu(iommu, drhd)
4094 dmar_reenable_qi(iommu);
4096 for_each_iommu(iommu, drhd) {
4097 if (drhd->ignored) {
4099 * we always have to disable PMRs or DMA may fail on
4103 iommu_disable_protect_mem_regions(iommu);
4107 iommu_flush_write_buffer(iommu);
4109 iommu_set_root_entry(iommu);
4111 iommu->flush.flush_context(iommu, 0, 0, 0,
4112 DMA_CCMD_GLOBAL_INVL);
4113 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4114 iommu_enable_translation(iommu);
4115 iommu_disable_protect_mem_regions(iommu);
4121 static void iommu_flush_all(void)
4123 struct dmar_drhd_unit *drhd;
4124 struct intel_iommu *iommu;
4126 for_each_active_iommu(iommu, drhd) {
4127 iommu->flush.flush_context(iommu, 0, 0, 0,
4128 DMA_CCMD_GLOBAL_INVL);
4129 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
4130 DMA_TLB_GLOBAL_FLUSH);
4134 static int iommu_suspend(void)
4136 struct dmar_drhd_unit *drhd;
4137 struct intel_iommu *iommu = NULL;
4140 for_each_active_iommu(iommu, drhd) {
4141 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
4143 if (!iommu->iommu_state)
4149 for_each_active_iommu(iommu, drhd) {
4150 iommu_disable_translation(iommu);
4152 raw_spin_lock_irqsave(&iommu->register_lock, flag);
4154 iommu->iommu_state[SR_DMAR_FECTL_REG] =
4155 readl(iommu->reg + DMAR_FECTL_REG);
4156 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
4157 readl(iommu->reg + DMAR_FEDATA_REG);
4158 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
4159 readl(iommu->reg + DMAR_FEADDR_REG);
4160 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
4161 readl(iommu->reg + DMAR_FEUADDR_REG);
4163 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4168 for_each_active_iommu(iommu, drhd)
4169 kfree(iommu->iommu_state);
4174 static void iommu_resume(void)
4176 struct dmar_drhd_unit *drhd;
4177 struct intel_iommu *iommu = NULL;
4180 if (init_iommu_hw()) {
4182 panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
4184 WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
4188 for_each_active_iommu(iommu, drhd) {
4190 raw_spin_lock_irqsave(&iommu->register_lock, flag);
4192 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
4193 iommu->reg + DMAR_FECTL_REG);
4194 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
4195 iommu->reg + DMAR_FEDATA_REG);
4196 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
4197 iommu->reg + DMAR_FEADDR_REG);
4198 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
4199 iommu->reg + DMAR_FEUADDR_REG);
4201 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4204 for_each_active_iommu(iommu, drhd)
4205 kfree(iommu->iommu_state);
4208 static struct syscore_ops iommu_syscore_ops = {
4209 .resume = iommu_resume,
4210 .suspend = iommu_suspend,
4213 static void __init init_iommu_pm_ops(void)
4215 register_syscore_ops(&iommu_syscore_ops);
4219 static inline void init_iommu_pm_ops(void) {}
4220 #endif /* CONFIG_PM */
4223 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
4225 struct acpi_dmar_reserved_memory *rmrr;
4226 struct dmar_rmrr_unit *rmrru;
4228 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
4232 rmrru->hdr = header;
4233 rmrr = (struct acpi_dmar_reserved_memory *)header;
4234 rmrru->base_address = rmrr->base_address;
4235 rmrru->end_address = rmrr->end_address;
4236 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
4237 ((void *)rmrr) + rmrr->header.length,
4238 &rmrru->devices_cnt);
4239 if (rmrru->devices_cnt && rmrru->devices == NULL) {
4244 list_add(&rmrru->list, &dmar_rmrr_units);
4249 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
4251 struct dmar_atsr_unit *atsru;
4252 struct acpi_dmar_atsr *tmp;
4254 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4255 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
4256 if (atsr->segment != tmp->segment)
4258 if (atsr->header.length != tmp->header.length)
4260 if (memcmp(atsr, tmp, atsr->header.length) == 0)
4267 int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4269 struct acpi_dmar_atsr *atsr;
4270 struct dmar_atsr_unit *atsru;
4272 if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
4275 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4276 atsru = dmar_find_atsr(atsr);
4280 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
4285 * If memory is allocated from slab by ACPI _DSM method, we need to
4286 * copy the memory content because the memory buffer will be freed
4289 atsru->hdr = (void *)(atsru + 1);
4290 memcpy(atsru->hdr, hdr, hdr->length);
4291 atsru->include_all = atsr->flags & 0x1;
4292 if (!atsru->include_all) {
4293 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4294 (void *)atsr + atsr->header.length,
4295 &atsru->devices_cnt);
4296 if (atsru->devices_cnt && atsru->devices == NULL) {
4302 list_add_rcu(&atsru->list, &dmar_atsr_units);
4307 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4309 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4313 int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4315 struct acpi_dmar_atsr *atsr;
4316 struct dmar_atsr_unit *atsru;
4318 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4319 atsru = dmar_find_atsr(atsr);
4321 list_del_rcu(&atsru->list);
4323 intel_iommu_free_atsr(atsru);
4329 int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4333 struct acpi_dmar_atsr *atsr;
4334 struct dmar_atsr_unit *atsru;
4336 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4337 atsru = dmar_find_atsr(atsr);
4341 if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
4342 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4350 static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4353 struct intel_iommu *iommu = dmaru->iommu;
4355 if (g_iommus[iommu->seq_id])
4358 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
4359 pr_warn("%s: Doesn't support hardware pass through.\n",
4363 if (!ecap_sc_support(iommu->ecap) &&
4364 domain_update_iommu_snooping(iommu)) {
4365 pr_warn("%s: Doesn't support snooping.\n",
4369 sp = domain_update_iommu_superpage(iommu) - 1;
4370 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
4371 pr_warn("%s: Doesn't support large page.\n",
4377 * Disable translation if already enabled prior to OS handover.
4379 if (iommu->gcmd & DMA_GCMD_TE)
4380 iommu_disable_translation(iommu);
4382 g_iommus[iommu->seq_id] = iommu;
4383 ret = iommu_init_domains(iommu);
4385 ret = iommu_alloc_root_entry(iommu);
4389 #ifdef CONFIG_INTEL_IOMMU_SVM
4390 if (pasid_enabled(iommu))
4391 intel_svm_alloc_pasid_tables(iommu);
4394 if (dmaru->ignored) {
4396 * we always have to disable PMRs or DMA may fail on this device
4399 iommu_disable_protect_mem_regions(iommu);
4403 intel_iommu_init_qi(iommu);
4404 iommu_flush_write_buffer(iommu);
4406 #ifdef CONFIG_INTEL_IOMMU_SVM
4407 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
4408 ret = intel_svm_enable_prq(iommu);
4413 ret = dmar_set_interrupt(iommu);
4417 iommu_set_root_entry(iommu);
4418 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4419 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4420 iommu_enable_translation(iommu);
4422 iommu_disable_protect_mem_regions(iommu);
4426 disable_dmar_iommu(iommu);
4428 free_dmar_iommu(iommu);
4432 int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4435 struct intel_iommu *iommu = dmaru->iommu;
4437 if (!intel_iommu_enabled)
4443 ret = intel_iommu_add(dmaru);
4445 disable_dmar_iommu(iommu);
4446 free_dmar_iommu(iommu);
4452 static void intel_iommu_free_dmars(void)
4454 struct dmar_rmrr_unit *rmrru, *rmrr_n;
4455 struct dmar_atsr_unit *atsru, *atsr_n;
4457 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4458 list_del(&rmrru->list);
4459 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
4463 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4464 list_del(&atsru->list);
4465 intel_iommu_free_atsr(atsru);
4469 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4472 struct pci_bus *bus;
4473 struct pci_dev *bridge = NULL;
4475 struct acpi_dmar_atsr *atsr;
4476 struct dmar_atsr_unit *atsru;
4478 dev = pci_physfn(dev);
4479 for (bus = dev->bus; bus; bus = bus->parent) {
4481 /* If it's an integrated device, allow ATS */
4484 /* Connected via non-PCIe: no ATS */
4485 if (!pci_is_pcie(bridge) ||
4486 pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
4488 /* If we found the root port, look it up in the ATSR */
4489 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
4494 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4495 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4496 if (atsr->segment != pci_domain_nr(dev->bus))
4499 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
4500 if (tmp == &bridge->dev)
4503 if (atsru->include_all)
4513 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4516 struct dmar_rmrr_unit *rmrru;
4517 struct dmar_atsr_unit *atsru;
4518 struct acpi_dmar_atsr *atsr;
4519 struct acpi_dmar_reserved_memory *rmrr;
4521 if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
4524 list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4525 rmrr = container_of(rmrru->hdr,
4526 struct acpi_dmar_reserved_memory, header);
4527 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4528 ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4529 ((void *)rmrr) + rmrr->header.length,
4530 rmrr->segment, rmrru->devices,
4531 rmrru->devices_cnt);
4534 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4535 dmar_remove_dev_scope(info, rmrr->segment,
4536 rmrru->devices, rmrru->devices_cnt);
4540 list_for_each_entry(atsru, &dmar_atsr_units, list) {
4541 if (atsru->include_all)
4544 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4545 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4546 ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4547 (void *)atsr + atsr->header.length,
4548 atsr->segment, atsru->devices,
4549 atsru->devices_cnt);
4554 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4555 if (dmar_remove_dev_scope(info, atsr->segment,
4556 atsru->devices, atsru->devices_cnt))
4565 * Here we only respond to action of unbound device from driver.
4567 * Added device is not attached to its DMAR domain here yet. That will happen
4568 * when mapping the device to iova.
4570 static int device_notifier(struct notifier_block *nb,
4571 unsigned long action, void *data)
4573 struct device *dev = data;
4574 struct dmar_domain *domain;
4576 if (iommu_dummy(dev))
4579 if (action != BUS_NOTIFY_REMOVED_DEVICE)
4582 domain = find_domain(dev);
4586 dmar_remove_one_dev_info(domain, dev);
4587 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
4588 domain_exit(domain);
4593 static struct notifier_block device_nb = {
4594 .notifier_call = device_notifier,
4597 static int intel_iommu_memory_notifier(struct notifier_block *nb,
4598 unsigned long val, void *v)
4600 struct memory_notify *mhp = v;
4601 unsigned long long start, end;
4602 unsigned long start_vpfn, last_vpfn;
4605 case MEM_GOING_ONLINE:
4606 start = mhp->start_pfn << PAGE_SHIFT;
4607 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4608 if (iommu_domain_identity_map(si_domain, start, end)) {
4609 pr_warn("Failed to build identity map for [%llx-%llx]\n",
4616 case MEM_CANCEL_ONLINE:
4617 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4618 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4619 while (start_vpfn <= last_vpfn) {
4621 struct dmar_drhd_unit *drhd;
4622 struct intel_iommu *iommu;
4623 struct page *freelist;
4625 iova = find_iova(&si_domain->iovad, start_vpfn);
4627 pr_debug("Failed get IOVA for PFN %lx\n",
4632 iova = split_and_remove_iova(&si_domain->iovad, iova,
4633 start_vpfn, last_vpfn);
4635 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
4636 start_vpfn, last_vpfn);
4640 freelist = domain_unmap(si_domain, iova->pfn_lo,
4644 for_each_active_iommu(iommu, drhd)
4645 iommu_flush_iotlb_psi(iommu, si_domain,
4646 iova->pfn_lo, iova_size(iova),
4649 dma_free_pagelist(freelist);
4651 start_vpfn = iova->pfn_hi + 1;
4652 free_iova_mem(iova);
4660 static struct notifier_block intel_iommu_memory_nb = {
4661 .notifier_call = intel_iommu_memory_notifier,
4665 static void free_all_cpu_cached_iovas(unsigned int cpu)
4669 for (i = 0; i < g_num_of_iommus; i++) {
4670 struct intel_iommu *iommu = g_iommus[i];
4671 struct dmar_domain *domain;
4677 for (did = 0; did < cap_ndoms(iommu->cap); did++) {
4678 domain = get_iommu_domain(iommu, (u16)did);
4682 free_cpu_cached_iovas(cpu, &domain->iovad);
4687 static int intel_iommu_cpu_notifier(struct notifier_block *nfb,
4688 unsigned long action, void *v)
4690 unsigned int cpu = (unsigned long)v;
4694 case CPU_DEAD_FROZEN:
4695 free_all_cpu_cached_iovas(cpu);
4696 flush_unmaps_timeout(cpu);
4702 static struct notifier_block intel_iommu_cpu_nb = {
4703 .notifier_call = intel_iommu_cpu_notifier,
4706 static ssize_t intel_iommu_show_version(struct device *dev,
4707 struct device_attribute *attr,
4710 struct intel_iommu *iommu = dev_get_drvdata(dev);
4711 u32 ver = readl(iommu->reg + DMAR_VER_REG);
4712 return sprintf(buf, "%d:%d\n",
4713 DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4715 static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4717 static ssize_t intel_iommu_show_address(struct device *dev,
4718 struct device_attribute *attr,
4721 struct intel_iommu *iommu = dev_get_drvdata(dev);
4722 return sprintf(buf, "%llx\n", iommu->reg_phys);
4724 static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4726 static ssize_t intel_iommu_show_cap(struct device *dev,
4727 struct device_attribute *attr,
4730 struct intel_iommu *iommu = dev_get_drvdata(dev);
4731 return sprintf(buf, "%llx\n", iommu->cap);
4733 static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4735 static ssize_t intel_iommu_show_ecap(struct device *dev,
4736 struct device_attribute *attr,
4739 struct intel_iommu *iommu = dev_get_drvdata(dev);
4740 return sprintf(buf, "%llx\n", iommu->ecap);
4742 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4744 static ssize_t intel_iommu_show_ndoms(struct device *dev,
4745 struct device_attribute *attr,
4748 struct intel_iommu *iommu = dev_get_drvdata(dev);
4749 return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4751 static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4753 static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4754 struct device_attribute *attr,
4757 struct intel_iommu *iommu = dev_get_drvdata(dev);
4758 return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4759 cap_ndoms(iommu->cap)));
4761 static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4763 static struct attribute *intel_iommu_attrs[] = {
4764 &dev_attr_version.attr,
4765 &dev_attr_address.attr,
4767 &dev_attr_ecap.attr,
4768 &dev_attr_domains_supported.attr,
4769 &dev_attr_domains_used.attr,
4773 static struct attribute_group intel_iommu_group = {
4774 .name = "intel-iommu",
4775 .attrs = intel_iommu_attrs,
4778 const struct attribute_group *intel_iommu_groups[] = {
4783 int __init intel_iommu_init(void)
4786 struct dmar_drhd_unit *drhd;
4787 struct intel_iommu *iommu;
4789 /* VT-d is required for a TXT/tboot launch, so enforce that */
4790 force_on = tboot_force_iommu();
4792 if (iommu_init_mempool()) {
4794 panic("tboot: Failed to initialize iommu memory\n");
4798 down_write(&dmar_global_lock);
4799 if (dmar_table_init()) {
4801 panic("tboot: Failed to initialize DMAR table\n");
4805 if (dmar_dev_scope_init() < 0) {
4807 panic("tboot: Failed to initialize DMAR device scope\n");
4811 if (no_iommu || dmar_disabled)
4814 if (list_empty(&dmar_rmrr_units))
4815 pr_info("No RMRR found\n");
4817 if (list_empty(&dmar_atsr_units))
4818 pr_info("No ATSR found\n");
4820 if (dmar_init_reserved_ranges()) {
4822 panic("tboot: Failed to reserve iommu ranges\n");
4823 goto out_free_reserved_range;
4826 init_no_remapping_devices();
4831 panic("tboot: Failed to initialize DMARs\n");
4832 pr_err("Initialization failed\n");
4833 goto out_free_reserved_range;
4835 up_write(&dmar_global_lock);
4836 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4838 #ifdef CONFIG_SWIOTLB
4841 dma_ops = &intel_dma_ops;
4843 init_iommu_pm_ops();
4845 for_each_active_iommu(iommu, drhd)
4846 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4850 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4851 bus_register_notifier(&pci_bus_type, &device_nb);
4852 if (si_domain && !hw_pass_through)
4853 register_memory_notifier(&intel_iommu_memory_nb);
4854 register_hotcpu_notifier(&intel_iommu_cpu_nb);
4856 intel_iommu_enabled = 1;
4860 out_free_reserved_range:
4861 put_iova_domain(&reserved_iova_list);
4863 intel_iommu_free_dmars();
4864 up_write(&dmar_global_lock);
4865 iommu_exit_mempool();
4869 static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4871 struct intel_iommu *iommu = opaque;
4873 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4878 * NB - intel-iommu lacks any sort of reference counting for the users of
4879 * dependent devices. If multiple endpoints have intersecting dependent
4880 * devices, unbinding the driver from any one of them will possibly leave
4881 * the others unable to operate.
4883 static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
4885 if (!iommu || !dev || !dev_is_pci(dev))
4888 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
4891 static void __dmar_remove_one_dev_info(struct device_domain_info *info)
4893 struct intel_iommu *iommu;
4894 unsigned long flags;
4896 assert_spin_locked(&device_domain_lock);
4901 iommu = info->iommu;
4904 iommu_disable_dev_iotlb(info);
4905 domain_context_clear(iommu, info->dev);
4908 unlink_domain_info(info);
4910 spin_lock_irqsave(&iommu->lock, flags);
4911 domain_detach_iommu(info->domain, iommu);
4912 spin_unlock_irqrestore(&iommu->lock, flags);
4914 free_devinfo_mem(info);
4917 static void dmar_remove_one_dev_info(struct dmar_domain *domain,
4920 struct device_domain_info *info;
4921 unsigned long flags;
4923 spin_lock_irqsave(&device_domain_lock, flags);
4924 info = dev->archdata.iommu;
4925 __dmar_remove_one_dev_info(info);
4926 spin_unlock_irqrestore(&device_domain_lock, flags);
4929 static int md_domain_init(struct dmar_domain *domain, int guest_width)
4933 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
4935 domain_reserve_special_ranges(domain);
4937 /* calculate AGAW */
4938 domain->gaw = guest_width;
4939 adjust_width = guestwidth_to_adjustwidth(guest_width);
4940 domain->agaw = width_to_agaw(adjust_width);
4942 domain->iommu_coherency = 0;
4943 domain->iommu_snooping = 0;
4944 domain->iommu_superpage = 0;
4945 domain->max_addr = 0;
4947 /* always allocate the top pgd */
4948 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
4951 domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4955 static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
4957 struct dmar_domain *dmar_domain;
4958 struct iommu_domain *domain;
4960 if (type != IOMMU_DOMAIN_UNMANAGED)
4963 dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
4965 pr_err("Can't allocate dmar_domain\n");
4968 if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
4969 pr_err("Domain initialization failed\n");
4970 domain_exit(dmar_domain);
4973 domain_update_iommu_cap(dmar_domain);
4975 domain = &dmar_domain->domain;
4976 domain->geometry.aperture_start = 0;
4977 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4978 domain->geometry.force_aperture = true;
4983 static void intel_iommu_domain_free(struct iommu_domain *domain)
4985 domain_exit(to_dmar_domain(domain));
4988 static int intel_iommu_attach_device(struct iommu_domain *domain,
4991 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4992 struct intel_iommu *iommu;
4996 if (device_is_rmrr_locked(dev)) {
4997 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n");
5001 /* normally dev is not mapped */
5002 if (unlikely(domain_context_mapped(dev))) {
5003 struct dmar_domain *old_domain;
5005 old_domain = find_domain(dev);
5008 dmar_remove_one_dev_info(old_domain, dev);
5011 if (!domain_type_is_vm_or_si(old_domain) &&
5012 list_empty(&old_domain->devices))
5013 domain_exit(old_domain);
5017 iommu = device_to_iommu(dev, &bus, &devfn);
5021 /* check if this iommu agaw is sufficient for max mapped address */
5022 addr_width = agaw_to_width(iommu->agaw);
5023 if (addr_width > cap_mgaw(iommu->cap))
5024 addr_width = cap_mgaw(iommu->cap);
5026 if (dmar_domain->max_addr > (1LL << addr_width)) {
5027 pr_err("%s: iommu width (%d) is not "
5028 "sufficient for the mapped address (%llx)\n",
5029 __func__, addr_width, dmar_domain->max_addr);
5032 dmar_domain->gaw = addr_width;
5035 * Knock out extra levels of page tables if necessary
5037 while (iommu->agaw < dmar_domain->agaw) {
5038 struct dma_pte *pte;
5040 pte = dmar_domain->pgd;
5041 if (dma_pte_present(pte)) {
5042 dmar_domain->pgd = (struct dma_pte *)
5043 phys_to_virt(dma_pte_addr(pte));
5044 free_pgtable_page(pte);
5046 dmar_domain->agaw--;
5049 return domain_add_dev_info(dmar_domain, dev);
5052 static void intel_iommu_detach_device(struct iommu_domain *domain,
5055 dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
5058 static int intel_iommu_map(struct iommu_domain *domain,
5059 unsigned long iova, phys_addr_t hpa,
5060 size_t size, int iommu_prot)
5062 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5067 if (iommu_prot & IOMMU_READ)
5068 prot |= DMA_PTE_READ;
5069 if (iommu_prot & IOMMU_WRITE)
5070 prot |= DMA_PTE_WRITE;
5071 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
5072 prot |= DMA_PTE_SNP;
5074 max_addr = iova + size;
5075 if (dmar_domain->max_addr < max_addr) {
5078 /* check if minimum agaw is sufficient for mapped address */
5079 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
5080 if (end < max_addr) {
5081 pr_err("%s: iommu width (%d) is not "
5082 "sufficient for the mapped address (%llx)\n",
5083 __func__, dmar_domain->gaw, max_addr);
5086 dmar_domain->max_addr = max_addr;
5088 /* Round up size to next multiple of PAGE_SIZE, if it and
5089 the low bits of hpa would take us onto the next page */
5090 size = aligned_nrpages(hpa, size);
5091 ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
5092 hpa >> VTD_PAGE_SHIFT, size, prot);
5096 static size_t intel_iommu_unmap(struct iommu_domain *domain,
5097 unsigned long iova, size_t size)
5099 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5100 struct page *freelist = NULL;
5101 struct intel_iommu *iommu;
5102 unsigned long start_pfn, last_pfn;
5103 unsigned int npages;
5104 int iommu_id, level = 0;
5106 /* Cope with horrid API which requires us to unmap more than the
5107 size argument if it happens to be a large-page mapping. */
5108 BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
5110 if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
5111 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
5113 start_pfn = iova >> VTD_PAGE_SHIFT;
5114 last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
5116 freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
5118 npages = last_pfn - start_pfn + 1;
5120 for_each_domain_iommu(iommu_id, dmar_domain) {
5121 iommu = g_iommus[iommu_id];
5123 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
5124 start_pfn, npages, !freelist, 0);
5127 dma_free_pagelist(freelist);
5129 if (dmar_domain->max_addr == iova + size)
5130 dmar_domain->max_addr = iova;
5135 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
5138 struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5139 struct dma_pte *pte;
5143 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
5145 phys = dma_pte_addr(pte);
5150 static bool intel_iommu_capable(enum iommu_cap cap)
5152 if (cap == IOMMU_CAP_CACHE_COHERENCY)
5153 return domain_update_iommu_snooping(NULL) == 1;
5154 if (cap == IOMMU_CAP_INTR_REMAP)
5155 return irq_remapping_enabled == 1;
5160 static int intel_iommu_add_device(struct device *dev)
5162 struct intel_iommu *iommu;
5163 struct iommu_group *group;
5166 iommu = device_to_iommu(dev, &bus, &devfn);
5170 iommu_device_link(iommu->iommu_dev, dev);
5172 group = iommu_group_get_for_dev(dev);
5175 return PTR_ERR(group);
5177 iommu_group_put(group);
5181 static void intel_iommu_remove_device(struct device *dev)
5183 struct intel_iommu *iommu;
5186 iommu = device_to_iommu(dev, &bus, &devfn);
5190 iommu_group_remove_device(dev);
5192 iommu_device_unlink(iommu->iommu_dev, dev);
5195 #ifdef CONFIG_INTEL_IOMMU_SVM
5196 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
5198 struct device_domain_info *info;
5199 struct context_entry *context;
5200 struct dmar_domain *domain;
5201 unsigned long flags;
5205 domain = get_valid_domain_for_dev(sdev->dev);
5209 spin_lock_irqsave(&device_domain_lock, flags);
5210 spin_lock(&iommu->lock);
5213 info = sdev->dev->archdata.iommu;
5214 if (!info || !info->pasid_supported)
5217 context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
5218 if (WARN_ON(!context))
5221 ctx_lo = context[0].lo;
5223 sdev->did = domain->iommu_did[iommu->seq_id];
5224 sdev->sid = PCI_DEVID(info->bus, info->devfn);
5226 if (!(ctx_lo & CONTEXT_PASIDE)) {
5227 context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
5228 context[1].lo = (u64)virt_to_phys(iommu->pasid_table) | ecap_pss(iommu->ecap);
5230 /* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
5231 * extended to permit requests-with-PASID if the PASIDE bit
5232 * is set. which makes sense. For CONTEXT_TT_PASS_THROUGH,
5233 * however, the PASIDE bit is ignored and requests-with-PASID
5234 * are unconditionally blocked. Which makes less sense.
5235 * So convert from CONTEXT_TT_PASS_THROUGH to one of the new
5236 * "guest mode" translation types depending on whether ATS
5237 * is available or not. Annoyingly, we can't use the new
5238 * modes *unless* PASIDE is set. */
5239 if ((ctx_lo & CONTEXT_TT_MASK) == (CONTEXT_TT_PASS_THROUGH << 2)) {
5240 ctx_lo &= ~CONTEXT_TT_MASK;
5241 if (info->ats_supported)
5242 ctx_lo |= CONTEXT_TT_PT_PASID_DEV_IOTLB << 2;
5244 ctx_lo |= CONTEXT_TT_PT_PASID << 2;
5246 ctx_lo |= CONTEXT_PASIDE;
5247 if (iommu->pasid_state_table)
5248 ctx_lo |= CONTEXT_DINVE;
5249 if (info->pri_supported)
5250 ctx_lo |= CONTEXT_PRS;
5251 context[0].lo = ctx_lo;
5253 iommu->flush.flush_context(iommu, sdev->did, sdev->sid,
5254 DMA_CCMD_MASK_NOBIT,
5255 DMA_CCMD_DEVICE_INVL);
5258 /* Enable PASID support in the device, if it wasn't already */
5259 if (!info->pasid_enabled)
5260 iommu_enable_dev_iotlb(info);
5262 if (info->ats_enabled) {
5263 sdev->dev_iotlb = 1;
5264 sdev->qdep = info->ats_qdep;
5265 if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
5271 spin_unlock(&iommu->lock);
5272 spin_unlock_irqrestore(&device_domain_lock, flags);
5277 struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
5279 struct intel_iommu *iommu;
5282 if (iommu_dummy(dev)) {
5284 "No IOMMU translation for device; cannot enable SVM\n");
5288 iommu = device_to_iommu(dev, &bus, &devfn);
5290 dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
5294 if (!iommu->pasid_table) {
5295 dev_err(dev, "PASID not enabled on IOMMU; cannot enable SVM\n");
5301 #endif /* CONFIG_INTEL_IOMMU_SVM */
5303 static const struct iommu_ops intel_iommu_ops = {
5304 .capable = intel_iommu_capable,
5305 .domain_alloc = intel_iommu_domain_alloc,
5306 .domain_free = intel_iommu_domain_free,
5307 .attach_dev = intel_iommu_attach_device,
5308 .detach_dev = intel_iommu_detach_device,
5309 .map = intel_iommu_map,
5310 .unmap = intel_iommu_unmap,
5311 .map_sg = default_iommu_map_sg,
5312 .iova_to_phys = intel_iommu_iova_to_phys,
5313 .add_device = intel_iommu_add_device,
5314 .remove_device = intel_iommu_remove_device,
5315 .device_group = pci_device_group,
5316 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
5319 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
5321 /* G4x/GM45 integrated gfx dmar support is totally busted. */
5322 pr_info("Disabling IOMMU for graphics on this chipset\n");
5326 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
5327 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
5328 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
5329 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
5330 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
5331 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
5332 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
5334 static void quirk_iommu_rwbf(struct pci_dev *dev)
5337 * Mobile 4 Series Chipset neglects to set RWBF capability,
5338 * but needs it. Same seems to hold for the desktop versions.
5340 pr_info("Forcing write-buffer flush capability\n");
5344 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
5345 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
5346 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
5347 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
5348 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
5349 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
5350 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
5353 #define GGC_MEMORY_SIZE_MASK (0xf << 8)
5354 #define GGC_MEMORY_SIZE_NONE (0x0 << 8)
5355 #define GGC_MEMORY_SIZE_1M (0x1 << 8)
5356 #define GGC_MEMORY_SIZE_2M (0x3 << 8)
5357 #define GGC_MEMORY_VT_ENABLED (0x8 << 8)
5358 #define GGC_MEMORY_SIZE_2M_VT (0x9 << 8)
5359 #define GGC_MEMORY_SIZE_3M_VT (0xa << 8)
5360 #define GGC_MEMORY_SIZE_4M_VT (0xb << 8)
5362 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
5366 if (pci_read_config_word(dev, GGC, &ggc))
5369 if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
5370 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
5372 } else if (dmar_map_gfx) {
5373 /* we have to ensure the gfx device is idle before we flush */
5374 pr_info("Disabling batched IOTLB flush on Ironlake\n");
5375 intel_iommu_strict = 1;
5378 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
5379 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
5380 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
5381 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
5383 /* On Tylersburg chipsets, some BIOSes have been known to enable the
5384 ISOCH DMAR unit for the Azalia sound device, but not give it any
5385 TLB entries, which causes it to deadlock. Check for that. We do
5386 this in a function called from init_dmars(), instead of in a PCI
5387 quirk, because we don't want to print the obnoxious "BIOS broken"
5388 message if VT-d is actually disabled.
5390 static void __init check_tylersburg_isoch(void)
5392 struct pci_dev *pdev;
5393 uint32_t vtisochctrl;
5395 /* If there's no Azalia in the system anyway, forget it. */
5396 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
5401 /* System Management Registers. Might be hidden, in which case
5402 we can't do the sanity check. But that's OK, because the
5403 known-broken BIOSes _don't_ actually hide it, so far. */
5404 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
5408 if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
5415 /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5416 if (vtisochctrl & 1)
5419 /* Drop all bits other than the number of TLB entries */
5420 vtisochctrl &= 0x1c;
5422 /* If we have the recommended number of TLB entries (16), fine. */
5423 if (vtisochctrl == 0x10)
5426 /* Zero TLB entries? You get to ride the short bus to school. */
5428 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5429 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5430 dmi_get_system_info(DMI_BIOS_VENDOR),
5431 dmi_get_system_info(DMI_BIOS_VERSION),
5432 dmi_get_system_info(DMI_PRODUCT_VERSION));
5433 iommu_identity_mapping |= IDENTMAP_AZALIA;
5437 pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",