]> rtime.felk.cvut.cz Git - zynq/linux.git/blob - drivers/iommu/intel-iommu.c
Apply preempt_rt patch-4.9-rt1.patch.xz
[zynq/linux.git] / drivers / iommu / intel-iommu.c
1 /*
2  * Copyright © 2006-2014 Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * Authors: David Woodhouse <dwmw2@infradead.org>,
14  *          Ashok Raj <ashok.raj@intel.com>,
15  *          Shaohua Li <shaohua.li@intel.com>,
16  *          Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>,
17  *          Fenghua Yu <fenghua.yu@intel.com>
18  *          Joerg Roedel <jroedel@suse.de>
19  */
20
21 #define pr_fmt(fmt)     "DMAR: " fmt
22
23 #include <linux/init.h>
24 #include <linux/bitmap.h>
25 #include <linux/debugfs.h>
26 #include <linux/export.h>
27 #include <linux/slab.h>
28 #include <linux/irq.h>
29 #include <linux/interrupt.h>
30 #include <linux/spinlock.h>
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/mempool.h>
35 #include <linux/memory.h>
36 #include <linux/cpu.h>
37 #include <linux/timer.h>
38 #include <linux/io.h>
39 #include <linux/iova.h>
40 #include <linux/iommu.h>
41 #include <linux/intel-iommu.h>
42 #include <linux/syscore_ops.h>
43 #include <linux/tboot.h>
44 #include <linux/dmi.h>
45 #include <linux/pci-ats.h>
46 #include <linux/memblock.h>
47 #include <linux/dma-contiguous.h>
48 #include <linux/crash_dump.h>
49 #include <asm/irq_remapping.h>
50 #include <asm/cacheflush.h>
51 #include <asm/iommu.h>
52
53 #include "irq_remapping.h"
54
55 #define ROOT_SIZE               VTD_PAGE_SIZE
56 #define CONTEXT_SIZE            VTD_PAGE_SIZE
57
58 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
59 #define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
60 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
61 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
62
63 #define IOAPIC_RANGE_START      (0xfee00000)
64 #define IOAPIC_RANGE_END        (0xfeefffff)
65 #define IOVA_START_ADDR         (0x1000)
66
67 #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
68
69 #define MAX_AGAW_WIDTH 64
70 #define MAX_AGAW_PFN_WIDTH      (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
71
72 #define __DOMAIN_MAX_PFN(gaw)  ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
73 #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
74
75 /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
76    to match. That way, we can use 'unsigned long' for PFNs with impunity. */
77 #define DOMAIN_MAX_PFN(gaw)     ((unsigned long) min_t(uint64_t, \
78                                 __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
79 #define DOMAIN_MAX_ADDR(gaw)    (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
80
81 /* IO virtual address start page frame number */
82 #define IOVA_START_PFN          (1)
83
84 #define IOVA_PFN(addr)          ((addr) >> PAGE_SHIFT)
85 #define DMA_32BIT_PFN           IOVA_PFN(DMA_BIT_MASK(32))
86 #define DMA_64BIT_PFN           IOVA_PFN(DMA_BIT_MASK(64))
87
88 /* page table handling */
89 #define LEVEL_STRIDE            (9)
90 #define LEVEL_MASK              (((u64)1 << LEVEL_STRIDE) - 1)
91
92 /*
93  * This bitmap is used to advertise the page sizes our hardware support
94  * to the IOMMU core, which will then use this information to split
95  * physically contiguous memory regions it is mapping into page sizes
96  * that we support.
97  *
98  * Traditionally the IOMMU core just handed us the mappings directly,
99  * after making sure the size is an order of a 4KiB page and that the
100  * mapping has natural alignment.
101  *
102  * To retain this behavior, we currently advertise that we support
103  * all page sizes that are an order of 4KiB.
104  *
105  * If at some point we'd like to utilize the IOMMU core's new behavior,
106  * we could change this to advertise the real page sizes we support.
107  */
108 #define INTEL_IOMMU_PGSIZES     (~0xFFFUL)
109
110 static inline int agaw_to_level(int agaw)
111 {
112         return agaw + 2;
113 }
114
115 static inline int agaw_to_width(int agaw)
116 {
117         return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
118 }
119
120 static inline int width_to_agaw(int width)
121 {
122         return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
123 }
124
125 static inline unsigned int level_to_offset_bits(int level)
126 {
127         return (level - 1) * LEVEL_STRIDE;
128 }
129
130 static inline int pfn_level_offset(unsigned long pfn, int level)
131 {
132         return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
133 }
134
135 static inline unsigned long level_mask(int level)
136 {
137         return -1UL << level_to_offset_bits(level);
138 }
139
140 static inline unsigned long level_size(int level)
141 {
142         return 1UL << level_to_offset_bits(level);
143 }
144
145 static inline unsigned long align_to_level(unsigned long pfn, int level)
146 {
147         return (pfn + level_size(level) - 1) & level_mask(level);
148 }
149
150 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
151 {
152         return  1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
153 }
154
155 /* VT-d pages must always be _smaller_ than MM pages. Otherwise things
156    are never going to work. */
157 static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
158 {
159         return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
160 }
161
162 static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
163 {
164         return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
165 }
166 static inline unsigned long page_to_dma_pfn(struct page *pg)
167 {
168         return mm_to_dma_pfn(page_to_pfn(pg));
169 }
170 static inline unsigned long virt_to_dma_pfn(void *p)
171 {
172         return page_to_dma_pfn(virt_to_page(p));
173 }
174
175 /* global iommu list, set NULL for ignored DMAR units */
176 static struct intel_iommu **g_iommus;
177
178 static void __init check_tylersburg_isoch(void);
179 static int rwbf_quirk;
180
181 /*
182  * set to 1 to panic kernel if can't successfully enable VT-d
183  * (used when kernel is launched w/ TXT)
184  */
185 static int force_on = 0;
186
187 /*
188  * 0: Present
189  * 1-11: Reserved
190  * 12-63: Context Ptr (12 - (haw-1))
191  * 64-127: Reserved
192  */
193 struct root_entry {
194         u64     lo;
195         u64     hi;
196 };
197 #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
198
199 /*
200  * Take a root_entry and return the Lower Context Table Pointer (LCTP)
201  * if marked present.
202  */
203 static phys_addr_t root_entry_lctp(struct root_entry *re)
204 {
205         if (!(re->lo & 1))
206                 return 0;
207
208         return re->lo & VTD_PAGE_MASK;
209 }
210
211 /*
212  * Take a root_entry and return the Upper Context Table Pointer (UCTP)
213  * if marked present.
214  */
215 static phys_addr_t root_entry_uctp(struct root_entry *re)
216 {
217         if (!(re->hi & 1))
218                 return 0;
219
220         return re->hi & VTD_PAGE_MASK;
221 }
222 /*
223  * low 64 bits:
224  * 0: present
225  * 1: fault processing disable
226  * 2-3: translation type
227  * 12-63: address space root
228  * high 64 bits:
229  * 0-2: address width
230  * 3-6: aval
231  * 8-23: domain id
232  */
233 struct context_entry {
234         u64 lo;
235         u64 hi;
236 };
237
238 static inline void context_clear_pasid_enable(struct context_entry *context)
239 {
240         context->lo &= ~(1ULL << 11);
241 }
242
243 static inline bool context_pasid_enabled(struct context_entry *context)
244 {
245         return !!(context->lo & (1ULL << 11));
246 }
247
248 static inline void context_set_copied(struct context_entry *context)
249 {
250         context->hi |= (1ull << 3);
251 }
252
253 static inline bool context_copied(struct context_entry *context)
254 {
255         return !!(context->hi & (1ULL << 3));
256 }
257
258 static inline bool __context_present(struct context_entry *context)
259 {
260         return (context->lo & 1);
261 }
262
263 static inline bool context_present(struct context_entry *context)
264 {
265         return context_pasid_enabled(context) ?
266              __context_present(context) :
267              __context_present(context) && !context_copied(context);
268 }
269
270 static inline void context_set_present(struct context_entry *context)
271 {
272         context->lo |= 1;
273 }
274
275 static inline void context_set_fault_enable(struct context_entry *context)
276 {
277         context->lo &= (((u64)-1) << 2) | 1;
278 }
279
280 static inline void context_set_translation_type(struct context_entry *context,
281                                                 unsigned long value)
282 {
283         context->lo &= (((u64)-1) << 4) | 3;
284         context->lo |= (value & 3) << 2;
285 }
286
287 static inline void context_set_address_root(struct context_entry *context,
288                                             unsigned long value)
289 {
290         context->lo &= ~VTD_PAGE_MASK;
291         context->lo |= value & VTD_PAGE_MASK;
292 }
293
294 static inline void context_set_address_width(struct context_entry *context,
295                                              unsigned long value)
296 {
297         context->hi |= value & 7;
298 }
299
300 static inline void context_set_domain_id(struct context_entry *context,
301                                          unsigned long value)
302 {
303         context->hi |= (value & ((1 << 16) - 1)) << 8;
304 }
305
306 static inline int context_domain_id(struct context_entry *c)
307 {
308         return((c->hi >> 8) & 0xffff);
309 }
310
311 static inline void context_clear_entry(struct context_entry *context)
312 {
313         context->lo = 0;
314         context->hi = 0;
315 }
316
317 /*
318  * 0: readable
319  * 1: writable
320  * 2-6: reserved
321  * 7: super page
322  * 8-10: available
323  * 11: snoop behavior
324  * 12-63: Host physcial address
325  */
326 struct dma_pte {
327         u64 val;
328 };
329
330 static inline void dma_clear_pte(struct dma_pte *pte)
331 {
332         pte->val = 0;
333 }
334
335 static inline u64 dma_pte_addr(struct dma_pte *pte)
336 {
337 #ifdef CONFIG_64BIT
338         return pte->val & VTD_PAGE_MASK;
339 #else
340         /* Must have a full atomic 64-bit read */
341         return  __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
342 #endif
343 }
344
345 static inline bool dma_pte_present(struct dma_pte *pte)
346 {
347         return (pte->val & 3) != 0;
348 }
349
350 static inline bool dma_pte_superpage(struct dma_pte *pte)
351 {
352         return (pte->val & DMA_PTE_LARGE_PAGE);
353 }
354
355 static inline int first_pte_in_page(struct dma_pte *pte)
356 {
357         return !((unsigned long)pte & ~VTD_PAGE_MASK);
358 }
359
360 /*
361  * This domain is a statically identity mapping domain.
362  *      1. This domain creats a static 1:1 mapping to all usable memory.
363  *      2. It maps to each iommu if successful.
364  *      3. Each iommu mapps to this domain if successful.
365  */
366 static struct dmar_domain *si_domain;
367 static int hw_pass_through = 1;
368
369 /*
370  * Domain represents a virtual machine, more than one devices
371  * across iommus may be owned in one domain, e.g. kvm guest.
372  */
373 #define DOMAIN_FLAG_VIRTUAL_MACHINE     (1 << 0)
374
375 /* si_domain contains mulitple devices */
376 #define DOMAIN_FLAG_STATIC_IDENTITY     (1 << 1)
377
378 #define for_each_domain_iommu(idx, domain)                      \
379         for (idx = 0; idx < g_num_of_iommus; idx++)             \
380                 if (domain->iommu_refcnt[idx])
381
382 struct dmar_domain {
383         int     nid;                    /* node id */
384
385         unsigned        iommu_refcnt[DMAR_UNITS_SUPPORTED];
386                                         /* Refcount of devices per iommu */
387
388
389         u16             iommu_did[DMAR_UNITS_SUPPORTED];
390                                         /* Domain ids per IOMMU. Use u16 since
391                                          * domain ids are 16 bit wide according
392                                          * to VT-d spec, section 9.3 */
393
394         bool has_iotlb_device;
395         struct list_head devices;       /* all devices' list */
396         struct iova_domain iovad;       /* iova's that belong to this domain */
397
398         struct dma_pte  *pgd;           /* virtual address */
399         int             gaw;            /* max guest address width */
400
401         /* adjusted guest address width, 0 is level 2 30-bit */
402         int             agaw;
403
404         int             flags;          /* flags to find out type of domain */
405
406         int             iommu_coherency;/* indicate coherency of iommu access */
407         int             iommu_snooping; /* indicate snooping control feature*/
408         int             iommu_count;    /* reference count of iommu */
409         int             iommu_superpage;/* Level of superpages supported:
410                                            0 == 4KiB (no superpages), 1 == 2MiB,
411                                            2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
412         u64             max_addr;       /* maximum mapped address */
413
414         struct iommu_domain domain;     /* generic domain data structure for
415                                            iommu core */
416 };
417
418 /* PCI domain-device relationship */
419 struct device_domain_info {
420         struct list_head link;  /* link to domain siblings */
421         struct list_head global; /* link to global list */
422         u8 bus;                 /* PCI bus number */
423         u8 devfn;               /* PCI devfn number */
424         u8 pasid_supported:3;
425         u8 pasid_enabled:1;
426         u8 pri_supported:1;
427         u8 pri_enabled:1;
428         u8 ats_supported:1;
429         u8 ats_enabled:1;
430         u8 ats_qdep;
431         struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
432         struct intel_iommu *iommu; /* IOMMU used by this device */
433         struct dmar_domain *domain; /* pointer to domain */
434 };
435
436 struct dmar_rmrr_unit {
437         struct list_head list;          /* list of rmrr units   */
438         struct acpi_dmar_header *hdr;   /* ACPI header          */
439         u64     base_address;           /* reserved base address*/
440         u64     end_address;            /* reserved end address */
441         struct dmar_dev_scope *devices; /* target devices */
442         int     devices_cnt;            /* target device count */
443 };
444
445 struct dmar_atsr_unit {
446         struct list_head list;          /* list of ATSR units */
447         struct acpi_dmar_header *hdr;   /* ACPI header */
448         struct dmar_dev_scope *devices; /* target devices */
449         int devices_cnt;                /* target device count */
450         u8 include_all:1;               /* include all ports */
451 };
452
453 static LIST_HEAD(dmar_atsr_units);
454 static LIST_HEAD(dmar_rmrr_units);
455
456 #define for_each_rmrr_units(rmrr) \
457         list_for_each_entry(rmrr, &dmar_rmrr_units, list)
458
459 static void flush_unmaps_timeout(unsigned long data);
460
461 struct deferred_flush_entry {
462         unsigned long iova_pfn;
463         unsigned long nrpages;
464         struct dmar_domain *domain;
465         struct page *freelist;
466 };
467
468 #define HIGH_WATER_MARK 250
469 struct deferred_flush_table {
470         int next;
471         struct deferred_flush_entry entries[HIGH_WATER_MARK];
472 };
473
474 struct deferred_flush_data {
475         spinlock_t lock;
476         int timer_on;
477         struct timer_list timer;
478         long size;
479         struct deferred_flush_table *tables;
480 };
481
482 static DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
483
484 /* bitmap for indexing intel_iommus */
485 static int g_num_of_iommus;
486
487 static void domain_exit(struct dmar_domain *domain);
488 static void domain_remove_dev_info(struct dmar_domain *domain);
489 static void dmar_remove_one_dev_info(struct dmar_domain *domain,
490                                      struct device *dev);
491 static void __dmar_remove_one_dev_info(struct device_domain_info *info);
492 static void domain_context_clear(struct intel_iommu *iommu,
493                                  struct device *dev);
494 static int domain_detach_iommu(struct dmar_domain *domain,
495                                struct intel_iommu *iommu);
496
497 #ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON
498 int dmar_disabled = 0;
499 #else
500 int dmar_disabled = 1;
501 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
502
503 int intel_iommu_enabled = 0;
504 EXPORT_SYMBOL_GPL(intel_iommu_enabled);
505
506 static int dmar_map_gfx = 1;
507 static int dmar_forcedac;
508 static int intel_iommu_strict;
509 static int intel_iommu_superpage = 1;
510 static int intel_iommu_ecs = 1;
511 static int intel_iommu_pasid28;
512 static int iommu_identity_mapping;
513
514 #define IDENTMAP_ALL            1
515 #define IDENTMAP_GFX            2
516 #define IDENTMAP_AZALIA         4
517
518 /* Broadwell and Skylake have broken ECS support — normal so-called "second
519  * level" translation of DMA requests-without-PASID doesn't actually happen
520  * unless you also set the NESTE bit in an extended context-entry. Which of
521  * course means that SVM doesn't work because it's trying to do nested
522  * translation of the physical addresses it finds in the process page tables,
523  * through the IOVA->phys mapping found in the "second level" page tables.
524  *
525  * The VT-d specification was retroactively changed to change the definition
526  * of the capability bits and pretend that Broadwell/Skylake never happened...
527  * but unfortunately the wrong bit was changed. It's ECS which is broken, but
528  * for some reason it was the PASID capability bit which was redefined (from
529  * bit 28 on BDW/SKL to bit 40 in future).
530  *
531  * So our test for ECS needs to eschew those implementations which set the old
532  * PASID capabiity bit 28, since those are the ones on which ECS is broken.
533  * Unless we are working around the 'pasid28' limitations, that is, by putting
534  * the device into passthrough mode for normal DMA and thus masking the bug.
535  */
536 #define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
537                             (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
538 /* PASID support is thus enabled if ECS is enabled and *either* of the old
539  * or new capability bits are set. */
540 #define pasid_enabled(iommu) (ecs_enabled(iommu) &&                     \
541                               (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
542
543 int intel_iommu_gfx_mapped;
544 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
545
546 #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
547 static DEFINE_SPINLOCK(device_domain_lock);
548 static LIST_HEAD(device_domain_list);
549
550 static const struct iommu_ops intel_iommu_ops;
551
552 static bool translation_pre_enabled(struct intel_iommu *iommu)
553 {
554         return (iommu->flags & VTD_FLAG_TRANS_PRE_ENABLED);
555 }
556
557 static void clear_translation_pre_enabled(struct intel_iommu *iommu)
558 {
559         iommu->flags &= ~VTD_FLAG_TRANS_PRE_ENABLED;
560 }
561
562 static void init_translation_status(struct intel_iommu *iommu)
563 {
564         u32 gsts;
565
566         gsts = readl(iommu->reg + DMAR_GSTS_REG);
567         if (gsts & DMA_GSTS_TES)
568                 iommu->flags |= VTD_FLAG_TRANS_PRE_ENABLED;
569 }
570
571 /* Convert generic 'struct iommu_domain to private struct dmar_domain */
572 static struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
573 {
574         return container_of(dom, struct dmar_domain, domain);
575 }
576
577 static int __init intel_iommu_setup(char *str)
578 {
579         if (!str)
580                 return -EINVAL;
581         while (*str) {
582                 if (!strncmp(str, "on", 2)) {
583                         dmar_disabled = 0;
584                         pr_info("IOMMU enabled\n");
585                 } else if (!strncmp(str, "off", 3)) {
586                         dmar_disabled = 1;
587                         pr_info("IOMMU disabled\n");
588                 } else if (!strncmp(str, "igfx_off", 8)) {
589                         dmar_map_gfx = 0;
590                         pr_info("Disable GFX device mapping\n");
591                 } else if (!strncmp(str, "forcedac", 8)) {
592                         pr_info("Forcing DAC for PCI devices\n");
593                         dmar_forcedac = 1;
594                 } else if (!strncmp(str, "strict", 6)) {
595                         pr_info("Disable batched IOTLB flush\n");
596                         intel_iommu_strict = 1;
597                 } else if (!strncmp(str, "sp_off", 6)) {
598                         pr_info("Disable supported super page\n");
599                         intel_iommu_superpage = 0;
600                 } else if (!strncmp(str, "ecs_off", 7)) {
601                         printk(KERN_INFO
602                                 "Intel-IOMMU: disable extended context table support\n");
603                         intel_iommu_ecs = 0;
604                 } else if (!strncmp(str, "pasid28", 7)) {
605                         printk(KERN_INFO
606                                 "Intel-IOMMU: enable pre-production PASID support\n");
607                         intel_iommu_pasid28 = 1;
608                         iommu_identity_mapping |= IDENTMAP_GFX;
609                 }
610
611                 str += strcspn(str, ",");
612                 while (*str == ',')
613                         str++;
614         }
615         return 0;
616 }
617 __setup("intel_iommu=", intel_iommu_setup);
618
619 static struct kmem_cache *iommu_domain_cache;
620 static struct kmem_cache *iommu_devinfo_cache;
621
622 static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did)
623 {
624         struct dmar_domain **domains;
625         int idx = did >> 8;
626
627         domains = iommu->domains[idx];
628         if (!domains)
629                 return NULL;
630
631         return domains[did & 0xff];
632 }
633
634 static void set_iommu_domain(struct intel_iommu *iommu, u16 did,
635                              struct dmar_domain *domain)
636 {
637         struct dmar_domain **domains;
638         int idx = did >> 8;
639
640         if (!iommu->domains[idx]) {
641                 size_t size = 256 * sizeof(struct dmar_domain *);
642                 iommu->domains[idx] = kzalloc(size, GFP_ATOMIC);
643         }
644
645         domains = iommu->domains[idx];
646         if (WARN_ON(!domains))
647                 return;
648         else
649                 domains[did & 0xff] = domain;
650 }
651
652 static inline void *alloc_pgtable_page(int node)
653 {
654         struct page *page;
655         void *vaddr = NULL;
656
657         page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
658         if (page)
659                 vaddr = page_address(page);
660         return vaddr;
661 }
662
663 static inline void free_pgtable_page(void *vaddr)
664 {
665         free_page((unsigned long)vaddr);
666 }
667
668 static inline void *alloc_domain_mem(void)
669 {
670         return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
671 }
672
673 static void free_domain_mem(void *vaddr)
674 {
675         kmem_cache_free(iommu_domain_cache, vaddr);
676 }
677
678 static inline void * alloc_devinfo_mem(void)
679 {
680         return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
681 }
682
683 static inline void free_devinfo_mem(void *vaddr)
684 {
685         kmem_cache_free(iommu_devinfo_cache, vaddr);
686 }
687
688 static inline int domain_type_is_vm(struct dmar_domain *domain)
689 {
690         return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE;
691 }
692
693 static inline int domain_type_is_si(struct dmar_domain *domain)
694 {
695         return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY;
696 }
697
698 static inline int domain_type_is_vm_or_si(struct dmar_domain *domain)
699 {
700         return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE |
701                                 DOMAIN_FLAG_STATIC_IDENTITY);
702 }
703
704 static inline int domain_pfn_supported(struct dmar_domain *domain,
705                                        unsigned long pfn)
706 {
707         int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
708
709         return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
710 }
711
712 static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
713 {
714         unsigned long sagaw;
715         int agaw = -1;
716
717         sagaw = cap_sagaw(iommu->cap);
718         for (agaw = width_to_agaw(max_gaw);
719              agaw >= 0; agaw--) {
720                 if (test_bit(agaw, &sagaw))
721                         break;
722         }
723
724         return agaw;
725 }
726
727 /*
728  * Calculate max SAGAW for each iommu.
729  */
730 int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
731 {
732         return __iommu_calculate_agaw(iommu, MAX_AGAW_WIDTH);
733 }
734
735 /*
736  * calculate agaw for each iommu.
737  * "SAGAW" may be different across iommus, use a default agaw, and
738  * get a supported less agaw for iommus that don't support the default agaw.
739  */
740 int iommu_calculate_agaw(struct intel_iommu *iommu)
741 {
742         return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
743 }
744
745 /* This functionin only returns single iommu in a domain */
746 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
747 {
748         int iommu_id;
749
750         /* si_domain and vm domain should not get here. */
751         BUG_ON(domain_type_is_vm_or_si(domain));
752         for_each_domain_iommu(iommu_id, domain)
753                 break;
754
755         if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
756                 return NULL;
757
758         return g_iommus[iommu_id];
759 }
760
761 static void domain_update_iommu_coherency(struct dmar_domain *domain)
762 {
763         struct dmar_drhd_unit *drhd;
764         struct intel_iommu *iommu;
765         bool found = false;
766         int i;
767
768         domain->iommu_coherency = 1;
769
770         for_each_domain_iommu(i, domain) {
771                 found = true;
772                 if (!ecap_coherent(g_iommus[i]->ecap)) {
773                         domain->iommu_coherency = 0;
774                         break;
775                 }
776         }
777         if (found)
778                 return;
779
780         /* No hardware attached; use lowest common denominator */
781         rcu_read_lock();
782         for_each_active_iommu(iommu, drhd) {
783                 if (!ecap_coherent(iommu->ecap)) {
784                         domain->iommu_coherency = 0;
785                         break;
786                 }
787         }
788         rcu_read_unlock();
789 }
790
791 static int domain_update_iommu_snooping(struct intel_iommu *skip)
792 {
793         struct dmar_drhd_unit *drhd;
794         struct intel_iommu *iommu;
795         int ret = 1;
796
797         rcu_read_lock();
798         for_each_active_iommu(iommu, drhd) {
799                 if (iommu != skip) {
800                         if (!ecap_sc_support(iommu->ecap)) {
801                                 ret = 0;
802                                 break;
803                         }
804                 }
805         }
806         rcu_read_unlock();
807
808         return ret;
809 }
810
811 static int domain_update_iommu_superpage(struct intel_iommu *skip)
812 {
813         struct dmar_drhd_unit *drhd;
814         struct intel_iommu *iommu;
815         int mask = 0xf;
816
817         if (!intel_iommu_superpage) {
818                 return 0;
819         }
820
821         /* set iommu_superpage to the smallest common denominator */
822         rcu_read_lock();
823         for_each_active_iommu(iommu, drhd) {
824                 if (iommu != skip) {
825                         mask &= cap_super_page_val(iommu->cap);
826                         if (!mask)
827                                 break;
828                 }
829         }
830         rcu_read_unlock();
831
832         return fls(mask);
833 }
834
835 /* Some capabilities may be different across iommus */
836 static void domain_update_iommu_cap(struct dmar_domain *domain)
837 {
838         domain_update_iommu_coherency(domain);
839         domain->iommu_snooping = domain_update_iommu_snooping(NULL);
840         domain->iommu_superpage = domain_update_iommu_superpage(NULL);
841 }
842
843 static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
844                                                        u8 bus, u8 devfn, int alloc)
845 {
846         struct root_entry *root = &iommu->root_entry[bus];
847         struct context_entry *context;
848         u64 *entry;
849
850         entry = &root->lo;
851         if (ecs_enabled(iommu)) {
852                 if (devfn >= 0x80) {
853                         devfn -= 0x80;
854                         entry = &root->hi;
855                 }
856                 devfn *= 2;
857         }
858         if (*entry & 1)
859                 context = phys_to_virt(*entry & VTD_PAGE_MASK);
860         else {
861                 unsigned long phy_addr;
862                 if (!alloc)
863                         return NULL;
864
865                 context = alloc_pgtable_page(iommu->node);
866                 if (!context)
867                         return NULL;
868
869                 __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE);
870                 phy_addr = virt_to_phys((void *)context);
871                 *entry = phy_addr | 1;
872                 __iommu_flush_cache(iommu, entry, sizeof(*entry));
873         }
874         return &context[devfn];
875 }
876
877 static int iommu_dummy(struct device *dev)
878 {
879         return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
880 }
881
882 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
883 {
884         struct dmar_drhd_unit *drhd = NULL;
885         struct intel_iommu *iommu;
886         struct device *tmp;
887         struct pci_dev *ptmp, *pdev = NULL;
888         u16 segment = 0;
889         int i;
890
891         if (iommu_dummy(dev))
892                 return NULL;
893
894         if (dev_is_pci(dev)) {
895                 struct pci_dev *pf_pdev;
896
897                 pdev = to_pci_dev(dev);
898                 /* VFs aren't listed in scope tables; we need to look up
899                  * the PF instead to find the IOMMU. */
900                 pf_pdev = pci_physfn(pdev);
901                 dev = &pf_pdev->dev;
902                 segment = pci_domain_nr(pdev->bus);
903         } else if (has_acpi_companion(dev))
904                 dev = &ACPI_COMPANION(dev)->dev;
905
906         rcu_read_lock();
907         for_each_active_iommu(iommu, drhd) {
908                 if (pdev && segment != drhd->segment)
909                         continue;
910
911                 for_each_active_dev_scope(drhd->devices,
912                                           drhd->devices_cnt, i, tmp) {
913                         if (tmp == dev) {
914                                 /* For a VF use its original BDF# not that of the PF
915                                  * which we used for the IOMMU lookup. Strictly speaking
916                                  * we could do this for all PCI devices; we only need to
917                                  * get the BDF# from the scope table for ACPI matches. */
918                                 if (pdev->is_virtfn)
919                                         goto got_pdev;
920
921                                 *bus = drhd->devices[i].bus;
922                                 *devfn = drhd->devices[i].devfn;
923                                 goto out;
924                         }
925
926                         if (!pdev || !dev_is_pci(tmp))
927                                 continue;
928
929                         ptmp = to_pci_dev(tmp);
930                         if (ptmp->subordinate &&
931                             ptmp->subordinate->number <= pdev->bus->number &&
932                             ptmp->subordinate->busn_res.end >= pdev->bus->number)
933                                 goto got_pdev;
934                 }
935
936                 if (pdev && drhd->include_all) {
937                 got_pdev:
938                         *bus = pdev->bus->number;
939                         *devfn = pdev->devfn;
940                         goto out;
941                 }
942         }
943         iommu = NULL;
944  out:
945         rcu_read_unlock();
946
947         return iommu;
948 }
949
950 static void domain_flush_cache(struct dmar_domain *domain,
951                                void *addr, int size)
952 {
953         if (!domain->iommu_coherency)
954                 clflush_cache_range(addr, size);
955 }
956
957 static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn)
958 {
959         struct context_entry *context;
960         int ret = 0;
961         unsigned long flags;
962
963         spin_lock_irqsave(&iommu->lock, flags);
964         context = iommu_context_addr(iommu, bus, devfn, 0);
965         if (context)
966                 ret = context_present(context);
967         spin_unlock_irqrestore(&iommu->lock, flags);
968         return ret;
969 }
970
971 static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn)
972 {
973         struct context_entry *context;
974         unsigned long flags;
975
976         spin_lock_irqsave(&iommu->lock, flags);
977         context = iommu_context_addr(iommu, bus, devfn, 0);
978         if (context) {
979                 context_clear_entry(context);
980                 __iommu_flush_cache(iommu, context, sizeof(*context));
981         }
982         spin_unlock_irqrestore(&iommu->lock, flags);
983 }
984
985 static void free_context_table(struct intel_iommu *iommu)
986 {
987         int i;
988         unsigned long flags;
989         struct context_entry *context;
990
991         spin_lock_irqsave(&iommu->lock, flags);
992         if (!iommu->root_entry) {
993                 goto out;
994         }
995         for (i = 0; i < ROOT_ENTRY_NR; i++) {
996                 context = iommu_context_addr(iommu, i, 0, 0);
997                 if (context)
998                         free_pgtable_page(context);
999
1000                 if (!ecs_enabled(iommu))
1001                         continue;
1002
1003                 context = iommu_context_addr(iommu, i, 0x80, 0);
1004                 if (context)
1005                         free_pgtable_page(context);
1006
1007         }
1008         free_pgtable_page(iommu->root_entry);
1009         iommu->root_entry = NULL;
1010 out:
1011         spin_unlock_irqrestore(&iommu->lock, flags);
1012 }
1013
1014 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
1015                                       unsigned long pfn, int *target_level)
1016 {
1017         struct dma_pte *parent, *pte = NULL;
1018         int level = agaw_to_level(domain->agaw);
1019         int offset;
1020
1021         BUG_ON(!domain->pgd);
1022
1023         if (!domain_pfn_supported(domain, pfn))
1024                 /* Address beyond IOMMU's addressing capabilities. */
1025                 return NULL;
1026
1027         parent = domain->pgd;
1028
1029         while (1) {
1030                 void *tmp_page;
1031
1032                 offset = pfn_level_offset(pfn, level);
1033                 pte = &parent[offset];
1034                 if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
1035                         break;
1036                 if (level == *target_level)
1037                         break;
1038
1039                 if (!dma_pte_present(pte)) {
1040                         uint64_t pteval;
1041
1042                         tmp_page = alloc_pgtable_page(domain->nid);
1043
1044                         if (!tmp_page)
1045                                 return NULL;
1046
1047                         domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
1048                         pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
1049                         if (cmpxchg64(&pte->val, 0ULL, pteval))
1050                                 /* Someone else set it while we were thinking; use theirs. */
1051                                 free_pgtable_page(tmp_page);
1052                         else
1053                                 domain_flush_cache(domain, pte, sizeof(*pte));
1054                 }
1055                 if (level == 1)
1056                         break;
1057
1058                 parent = phys_to_virt(dma_pte_addr(pte));
1059                 level--;
1060         }
1061
1062         if (!*target_level)
1063                 *target_level = level;
1064
1065         return pte;
1066 }
1067
1068
1069 /* return address's pte at specific level */
1070 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
1071                                          unsigned long pfn,
1072                                          int level, int *large_page)
1073 {
1074         struct dma_pte *parent, *pte = NULL;
1075         int total = agaw_to_level(domain->agaw);
1076         int offset;
1077
1078         parent = domain->pgd;
1079         while (level <= total) {
1080                 offset = pfn_level_offset(pfn, total);
1081                 pte = &parent[offset];
1082                 if (level == total)
1083                         return pte;
1084
1085                 if (!dma_pte_present(pte)) {
1086                         *large_page = total;
1087                         break;
1088                 }
1089
1090                 if (dma_pte_superpage(pte)) {
1091                         *large_page = total;
1092                         return pte;
1093                 }
1094
1095                 parent = phys_to_virt(dma_pte_addr(pte));
1096                 total--;
1097         }
1098         return NULL;
1099 }
1100
1101 /* clear last level pte, a tlb flush should be followed */
1102 static void dma_pte_clear_range(struct dmar_domain *domain,
1103                                 unsigned long start_pfn,
1104                                 unsigned long last_pfn)
1105 {
1106         unsigned int large_page = 1;
1107         struct dma_pte *first_pte, *pte;
1108
1109         BUG_ON(!domain_pfn_supported(domain, start_pfn));
1110         BUG_ON(!domain_pfn_supported(domain, last_pfn));
1111         BUG_ON(start_pfn > last_pfn);
1112
1113         /* we don't need lock here; nobody else touches the iova range */
1114         do {
1115                 large_page = 1;
1116                 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
1117                 if (!pte) {
1118                         start_pfn = align_to_level(start_pfn + 1, large_page + 1);
1119                         continue;
1120                 }
1121                 do {
1122                         dma_clear_pte(pte);
1123                         start_pfn += lvl_to_nr_pages(large_page);
1124                         pte++;
1125                 } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
1126
1127                 domain_flush_cache(domain, first_pte,
1128                                    (void *)pte - (void *)first_pte);
1129
1130         } while (start_pfn && start_pfn <= last_pfn);
1131 }
1132
1133 static void dma_pte_free_level(struct dmar_domain *domain, int level,
1134                                struct dma_pte *pte, unsigned long pfn,
1135                                unsigned long start_pfn, unsigned long last_pfn)
1136 {
1137         pfn = max(start_pfn, pfn);
1138         pte = &pte[pfn_level_offset(pfn, level)];
1139
1140         do {
1141                 unsigned long level_pfn;
1142                 struct dma_pte *level_pte;
1143
1144                 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1145                         goto next;
1146
1147                 level_pfn = pfn & level_mask(level - 1);
1148                 level_pte = phys_to_virt(dma_pte_addr(pte));
1149
1150                 if (level > 2)
1151                         dma_pte_free_level(domain, level - 1, level_pte,
1152                                            level_pfn, start_pfn, last_pfn);
1153
1154                 /* If range covers entire pagetable, free it */
1155                 if (!(start_pfn > level_pfn ||
1156                       last_pfn < level_pfn + level_size(level) - 1)) {
1157                         dma_clear_pte(pte);
1158                         domain_flush_cache(domain, pte, sizeof(*pte));
1159                         free_pgtable_page(level_pte);
1160                 }
1161 next:
1162                 pfn += level_size(level);
1163         } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1164 }
1165
1166 /* clear last level (leaf) ptes and free page table pages. */
1167 static void dma_pte_free_pagetable(struct dmar_domain *domain,
1168                                    unsigned long start_pfn,
1169                                    unsigned long last_pfn)
1170 {
1171         BUG_ON(!domain_pfn_supported(domain, start_pfn));
1172         BUG_ON(!domain_pfn_supported(domain, last_pfn));
1173         BUG_ON(start_pfn > last_pfn);
1174
1175         dma_pte_clear_range(domain, start_pfn, last_pfn);
1176
1177         /* We don't need lock here; nobody else touches the iova range */
1178         dma_pte_free_level(domain, agaw_to_level(domain->agaw),
1179                            domain->pgd, 0, start_pfn, last_pfn);
1180
1181         /* free pgd */
1182         if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1183                 free_pgtable_page(domain->pgd);
1184                 domain->pgd = NULL;
1185         }
1186 }
1187
1188 /* When a page at a given level is being unlinked from its parent, we don't
1189    need to *modify* it at all. All we need to do is make a list of all the
1190    pages which can be freed just as soon as we've flushed the IOTLB and we
1191    know the hardware page-walk will no longer touch them.
1192    The 'pte' argument is the *parent* PTE, pointing to the page that is to
1193    be freed. */
1194 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
1195                                             int level, struct dma_pte *pte,
1196                                             struct page *freelist)
1197 {
1198         struct page *pg;
1199
1200         pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
1201         pg->freelist = freelist;
1202         freelist = pg;
1203
1204         if (level == 1)
1205                 return freelist;
1206
1207         pte = page_address(pg);
1208         do {
1209                 if (dma_pte_present(pte) && !dma_pte_superpage(pte))
1210                         freelist = dma_pte_list_pagetables(domain, level - 1,
1211                                                            pte, freelist);
1212                 pte++;
1213         } while (!first_pte_in_page(pte));
1214
1215         return freelist;
1216 }
1217
1218 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
1219                                         struct dma_pte *pte, unsigned long pfn,
1220                                         unsigned long start_pfn,
1221                                         unsigned long last_pfn,
1222                                         struct page *freelist)
1223 {
1224         struct dma_pte *first_pte = NULL, *last_pte = NULL;
1225
1226         pfn = max(start_pfn, pfn);
1227         pte = &pte[pfn_level_offset(pfn, level)];
1228
1229         do {
1230                 unsigned long level_pfn;
1231
1232                 if (!dma_pte_present(pte))
1233                         goto next;
1234
1235                 level_pfn = pfn & level_mask(level);
1236
1237                 /* If range covers entire pagetable, free it */
1238                 if (start_pfn <= level_pfn &&
1239                     last_pfn >= level_pfn + level_size(level) - 1) {
1240                         /* These suborbinate page tables are going away entirely. Don't
1241                            bother to clear them; we're just going to *free* them. */
1242                         if (level > 1 && !dma_pte_superpage(pte))
1243                                 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
1244
1245                         dma_clear_pte(pte);
1246                         if (!first_pte)
1247                                 first_pte = pte;
1248                         last_pte = pte;
1249                 } else if (level > 1) {
1250                         /* Recurse down into a level that isn't *entirely* obsolete */
1251                         freelist = dma_pte_clear_level(domain, level - 1,
1252                                                        phys_to_virt(dma_pte_addr(pte)),
1253                                                        level_pfn, start_pfn, last_pfn,
1254                                                        freelist);
1255                 }
1256 next:
1257                 pfn += level_size(level);
1258         } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1259
1260         if (first_pte)
1261                 domain_flush_cache(domain, first_pte,
1262                                    (void *)++last_pte - (void *)first_pte);
1263
1264         return freelist;
1265 }
1266
1267 /* We can't just free the pages because the IOMMU may still be walking
1268    the page tables, and may have cached the intermediate levels. The
1269    pages can only be freed after the IOTLB flush has been done. */
1270 static struct page *domain_unmap(struct dmar_domain *domain,
1271                                  unsigned long start_pfn,
1272                                  unsigned long last_pfn)
1273 {
1274         struct page *freelist = NULL;
1275
1276         BUG_ON(!domain_pfn_supported(domain, start_pfn));
1277         BUG_ON(!domain_pfn_supported(domain, last_pfn));
1278         BUG_ON(start_pfn > last_pfn);
1279
1280         /* we don't need lock here; nobody else touches the iova range */
1281         freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
1282                                        domain->pgd, 0, start_pfn, last_pfn, NULL);
1283
1284         /* free pgd */
1285         if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1286                 struct page *pgd_page = virt_to_page(domain->pgd);
1287                 pgd_page->freelist = freelist;
1288                 freelist = pgd_page;
1289
1290                 domain->pgd = NULL;
1291         }
1292
1293         return freelist;
1294 }
1295
1296 static void dma_free_pagelist(struct page *freelist)
1297 {
1298         struct page *pg;
1299
1300         while ((pg = freelist)) {
1301                 freelist = pg->freelist;
1302                 free_pgtable_page(page_address(pg));
1303         }
1304 }
1305
1306 /* iommu handling */
1307 static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1308 {
1309         struct root_entry *root;
1310         unsigned long flags;
1311
1312         root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1313         if (!root) {
1314                 pr_err("Allocating root entry for %s failed\n",
1315                         iommu->name);
1316                 return -ENOMEM;
1317         }
1318
1319         __iommu_flush_cache(iommu, root, ROOT_SIZE);
1320
1321         spin_lock_irqsave(&iommu->lock, flags);
1322         iommu->root_entry = root;
1323         spin_unlock_irqrestore(&iommu->lock, flags);
1324
1325         return 0;
1326 }
1327
1328 static void iommu_set_root_entry(struct intel_iommu *iommu)
1329 {
1330         u64 addr;
1331         u32 sts;
1332         unsigned long flag;
1333
1334         addr = virt_to_phys(iommu->root_entry);
1335         if (ecs_enabled(iommu))
1336                 addr |= DMA_RTADDR_RTT;
1337
1338         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1339         dmar_writeq(iommu->reg + DMAR_RTADDR_REG, addr);
1340
1341         writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG);
1342
1343         /* Make sure hardware complete it */
1344         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1345                       readl, (sts & DMA_GSTS_RTPS), sts);
1346
1347         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1348 }
1349
1350 static void iommu_flush_write_buffer(struct intel_iommu *iommu)
1351 {
1352         u32 val;
1353         unsigned long flag;
1354
1355         if (!rwbf_quirk && !cap_rwbf(iommu->cap))
1356                 return;
1357
1358         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1359         writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG);
1360
1361         /* Make sure hardware complete it */
1362         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1363                       readl, (!(val & DMA_GSTS_WBFS)), val);
1364
1365         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1366 }
1367
1368 /* return value determine if we need a write buffer flush */
1369 static void __iommu_flush_context(struct intel_iommu *iommu,
1370                                   u16 did, u16 source_id, u8 function_mask,
1371                                   u64 type)
1372 {
1373         u64 val = 0;
1374         unsigned long flag;
1375
1376         switch (type) {
1377         case DMA_CCMD_GLOBAL_INVL:
1378                 val = DMA_CCMD_GLOBAL_INVL;
1379                 break;
1380         case DMA_CCMD_DOMAIN_INVL:
1381                 val = DMA_CCMD_DOMAIN_INVL|DMA_CCMD_DID(did);
1382                 break;
1383         case DMA_CCMD_DEVICE_INVL:
1384                 val = DMA_CCMD_DEVICE_INVL|DMA_CCMD_DID(did)
1385                         | DMA_CCMD_SID(source_id) | DMA_CCMD_FM(function_mask);
1386                 break;
1387         default:
1388                 BUG();
1389         }
1390         val |= DMA_CCMD_ICC;
1391
1392         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1393         dmar_writeq(iommu->reg + DMAR_CCMD_REG, val);
1394
1395         /* Make sure hardware complete it */
1396         IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG,
1397                 dmar_readq, (!(val & DMA_CCMD_ICC)), val);
1398
1399         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1400 }
1401
1402 /* return value determine if we need a write buffer flush */
1403 static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
1404                                 u64 addr, unsigned int size_order, u64 type)
1405 {
1406         int tlb_offset = ecap_iotlb_offset(iommu->ecap);
1407         u64 val = 0, val_iva = 0;
1408         unsigned long flag;
1409
1410         switch (type) {
1411         case DMA_TLB_GLOBAL_FLUSH:
1412                 /* global flush doesn't need set IVA_REG */
1413                 val = DMA_TLB_GLOBAL_FLUSH|DMA_TLB_IVT;
1414                 break;
1415         case DMA_TLB_DSI_FLUSH:
1416                 val = DMA_TLB_DSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1417                 break;
1418         case DMA_TLB_PSI_FLUSH:
1419                 val = DMA_TLB_PSI_FLUSH|DMA_TLB_IVT|DMA_TLB_DID(did);
1420                 /* IH bit is passed in as part of address */
1421                 val_iva = size_order | addr;
1422                 break;
1423         default:
1424                 BUG();
1425         }
1426         /* Note: set drain read/write */
1427 #if 0
1428         /*
1429          * This is probably to be super secure.. Looks like we can
1430          * ignore it without any impact.
1431          */
1432         if (cap_read_drain(iommu->cap))
1433                 val |= DMA_TLB_READ_DRAIN;
1434 #endif
1435         if (cap_write_drain(iommu->cap))
1436                 val |= DMA_TLB_WRITE_DRAIN;
1437
1438         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1439         /* Note: Only uses first TLB reg currently */
1440         if (val_iva)
1441                 dmar_writeq(iommu->reg + tlb_offset, val_iva);
1442         dmar_writeq(iommu->reg + tlb_offset + 8, val);
1443
1444         /* Make sure hardware complete it */
1445         IOMMU_WAIT_OP(iommu, tlb_offset + 8,
1446                 dmar_readq, (!(val & DMA_TLB_IVT)), val);
1447
1448         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1449
1450         /* check IOTLB invalidation granularity */
1451         if (DMA_TLB_IAIG(val) == 0)
1452                 pr_err("Flush IOTLB failed\n");
1453         if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type))
1454                 pr_debug("TLB flush request %Lx, actual %Lx\n",
1455                         (unsigned long long)DMA_TLB_IIRG(type),
1456                         (unsigned long long)DMA_TLB_IAIG(val));
1457 }
1458
1459 static struct device_domain_info *
1460 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1461                          u8 bus, u8 devfn)
1462 {
1463         struct device_domain_info *info;
1464
1465         assert_spin_locked(&device_domain_lock);
1466
1467         if (!iommu->qi)
1468                 return NULL;
1469
1470         list_for_each_entry(info, &domain->devices, link)
1471                 if (info->iommu == iommu && info->bus == bus &&
1472                     info->devfn == devfn) {
1473                         if (info->ats_supported && info->dev)
1474                                 return info;
1475                         break;
1476                 }
1477
1478         return NULL;
1479 }
1480
1481 static void domain_update_iotlb(struct dmar_domain *domain)
1482 {
1483         struct device_domain_info *info;
1484         bool has_iotlb_device = false;
1485
1486         assert_spin_locked(&device_domain_lock);
1487
1488         list_for_each_entry(info, &domain->devices, link) {
1489                 struct pci_dev *pdev;
1490
1491                 if (!info->dev || !dev_is_pci(info->dev))
1492                         continue;
1493
1494                 pdev = to_pci_dev(info->dev);
1495                 if (pdev->ats_enabled) {
1496                         has_iotlb_device = true;
1497                         break;
1498                 }
1499         }
1500
1501         domain->has_iotlb_device = has_iotlb_device;
1502 }
1503
1504 static void iommu_enable_dev_iotlb(struct device_domain_info *info)
1505 {
1506         struct pci_dev *pdev;
1507
1508         assert_spin_locked(&device_domain_lock);
1509
1510         if (!info || !dev_is_pci(info->dev))
1511                 return;
1512
1513         pdev = to_pci_dev(info->dev);
1514
1515 #ifdef CONFIG_INTEL_IOMMU_SVM
1516         /* The PCIe spec, in its wisdom, declares that the behaviour of
1517            the device if you enable PASID support after ATS support is
1518            undefined. So always enable PASID support on devices which
1519            have it, even if we can't yet know if we're ever going to
1520            use it. */
1521         if (info->pasid_supported && !pci_enable_pasid(pdev, info->pasid_supported & ~1))
1522                 info->pasid_enabled = 1;
1523
1524         if (info->pri_supported && !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32))
1525                 info->pri_enabled = 1;
1526 #endif
1527         if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
1528                 info->ats_enabled = 1;
1529                 domain_update_iotlb(info->domain);
1530                 info->ats_qdep = pci_ats_queue_depth(pdev);
1531         }
1532 }
1533
1534 static void iommu_disable_dev_iotlb(struct device_domain_info *info)
1535 {
1536         struct pci_dev *pdev;
1537
1538         assert_spin_locked(&device_domain_lock);
1539
1540         if (!dev_is_pci(info->dev))
1541                 return;
1542
1543         pdev = to_pci_dev(info->dev);
1544
1545         if (info->ats_enabled) {
1546                 pci_disable_ats(pdev);
1547                 info->ats_enabled = 0;
1548                 domain_update_iotlb(info->domain);
1549         }
1550 #ifdef CONFIG_INTEL_IOMMU_SVM
1551         if (info->pri_enabled) {
1552                 pci_disable_pri(pdev);
1553                 info->pri_enabled = 0;
1554         }
1555         if (info->pasid_enabled) {
1556                 pci_disable_pasid(pdev);
1557                 info->pasid_enabled = 0;
1558         }
1559 #endif
1560 }
1561
1562 static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
1563                                   u64 addr, unsigned mask)
1564 {
1565         u16 sid, qdep;
1566         unsigned long flags;
1567         struct device_domain_info *info;
1568
1569         if (!domain->has_iotlb_device)
1570                 return;
1571
1572         spin_lock_irqsave(&device_domain_lock, flags);
1573         list_for_each_entry(info, &domain->devices, link) {
1574                 if (!info->ats_enabled)
1575                         continue;
1576
1577                 sid = info->bus << 8 | info->devfn;
1578                 qdep = info->ats_qdep;
1579                 qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
1580         }
1581         spin_unlock_irqrestore(&device_domain_lock, flags);
1582 }
1583
1584 static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
1585                                   struct dmar_domain *domain,
1586                                   unsigned long pfn, unsigned int pages,
1587                                   int ih, int map)
1588 {
1589         unsigned int mask = ilog2(__roundup_pow_of_two(pages));
1590         uint64_t addr = (uint64_t)pfn << VTD_PAGE_SHIFT;
1591         u16 did = domain->iommu_did[iommu->seq_id];
1592
1593         BUG_ON(pages == 0);
1594
1595         if (ih)
1596                 ih = 1 << 6;
1597         /*
1598          * Fallback to domain selective flush if no PSI support or the size is
1599          * too big.
1600          * PSI requires page size to be 2 ^ x, and the base address is naturally
1601          * aligned to the size
1602          */
1603         if (!cap_pgsel_inv(iommu->cap) || mask > cap_max_amask_val(iommu->cap))
1604                 iommu->flush.flush_iotlb(iommu, did, 0, 0,
1605                                                 DMA_TLB_DSI_FLUSH);
1606         else
1607                 iommu->flush.flush_iotlb(iommu, did, addr | ih, mask,
1608                                                 DMA_TLB_PSI_FLUSH);
1609
1610         /*
1611          * In caching mode, changes of pages from non-present to present require
1612          * flush. However, device IOTLB doesn't need to be flushed in this case.
1613          */
1614         if (!cap_caching_mode(iommu->cap) || !map)
1615                 iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
1616                                       addr, mask);
1617 }
1618
1619 static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1620 {
1621         u32 pmen;
1622         unsigned long flags;
1623
1624         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1625         pmen = readl(iommu->reg + DMAR_PMEN_REG);
1626         pmen &= ~DMA_PMEN_EPM;
1627         writel(pmen, iommu->reg + DMAR_PMEN_REG);
1628
1629         /* wait for the protected region status bit to clear */
1630         IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG,
1631                 readl, !(pmen & DMA_PMEN_PRS), pmen);
1632
1633         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1634 }
1635
1636 static void iommu_enable_translation(struct intel_iommu *iommu)
1637 {
1638         u32 sts;
1639         unsigned long flags;
1640
1641         raw_spin_lock_irqsave(&iommu->register_lock, flags);
1642         iommu->gcmd |= DMA_GCMD_TE;
1643         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1644
1645         /* Make sure hardware complete it */
1646         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1647                       readl, (sts & DMA_GSTS_TES), sts);
1648
1649         raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1650 }
1651
1652 static void iommu_disable_translation(struct intel_iommu *iommu)
1653 {
1654         u32 sts;
1655         unsigned long flag;
1656
1657         raw_spin_lock_irqsave(&iommu->register_lock, flag);
1658         iommu->gcmd &= ~DMA_GCMD_TE;
1659         writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1660
1661         /* Make sure hardware complete it */
1662         IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
1663                       readl, (!(sts & DMA_GSTS_TES)), sts);
1664
1665         raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1666 }
1667
1668
1669 static int iommu_init_domains(struct intel_iommu *iommu)
1670 {
1671         u32 ndomains, nlongs;
1672         size_t size;
1673
1674         ndomains = cap_ndoms(iommu->cap);
1675         pr_debug("%s: Number of Domains supported <%d>\n",
1676                  iommu->name, ndomains);
1677         nlongs = BITS_TO_LONGS(ndomains);
1678
1679         spin_lock_init(&iommu->lock);
1680
1681         iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1682         if (!iommu->domain_ids) {
1683                 pr_err("%s: Allocating domain id array failed\n",
1684                        iommu->name);
1685                 return -ENOMEM;
1686         }
1687
1688         size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **);
1689         iommu->domains = kzalloc(size, GFP_KERNEL);
1690
1691         if (iommu->domains) {
1692                 size = 256 * sizeof(struct dmar_domain *);
1693                 iommu->domains[0] = kzalloc(size, GFP_KERNEL);
1694         }
1695
1696         if (!iommu->domains || !iommu->domains[0]) {
1697                 pr_err("%s: Allocating domain array failed\n",
1698                        iommu->name);
1699                 kfree(iommu->domain_ids);
1700                 kfree(iommu->domains);
1701                 iommu->domain_ids = NULL;
1702                 iommu->domains    = NULL;
1703                 return -ENOMEM;
1704         }
1705
1706
1707
1708         /*
1709          * If Caching mode is set, then invalid translations are tagged
1710          * with domain-id 0, hence we need to pre-allocate it. We also
1711          * use domain-id 0 as a marker for non-allocated domain-id, so
1712          * make sure it is not used for a real domain.
1713          */
1714         set_bit(0, iommu->domain_ids);
1715
1716         return 0;
1717 }
1718
1719 static void disable_dmar_iommu(struct intel_iommu *iommu)
1720 {
1721         struct device_domain_info *info, *tmp;
1722         unsigned long flags;
1723
1724         if (!iommu->domains || !iommu->domain_ids)
1725                 return;
1726
1727 again:
1728         spin_lock_irqsave(&device_domain_lock, flags);
1729         list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1730                 struct dmar_domain *domain;
1731
1732                 if (info->iommu != iommu)
1733                         continue;
1734
1735                 if (!info->dev || !info->domain)
1736                         continue;
1737
1738                 domain = info->domain;
1739
1740                 __dmar_remove_one_dev_info(info);
1741
1742                 if (!domain_type_is_vm_or_si(domain)) {
1743                         /*
1744                          * The domain_exit() function  can't be called under
1745                          * device_domain_lock, as it takes this lock itself.
1746                          * So release the lock here and re-run the loop
1747                          * afterwards.
1748                          */
1749                         spin_unlock_irqrestore(&device_domain_lock, flags);
1750                         domain_exit(domain);
1751                         goto again;
1752                 }
1753         }
1754         spin_unlock_irqrestore(&device_domain_lock, flags);
1755
1756         if (iommu->gcmd & DMA_GCMD_TE)
1757                 iommu_disable_translation(iommu);
1758 }
1759
1760 static void free_dmar_iommu(struct intel_iommu *iommu)
1761 {
1762         if ((iommu->domains) && (iommu->domain_ids)) {
1763                 int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8;
1764                 int i;
1765
1766                 for (i = 0; i < elems; i++)
1767                         kfree(iommu->domains[i]);
1768                 kfree(iommu->domains);
1769                 kfree(iommu->domain_ids);
1770                 iommu->domains = NULL;
1771                 iommu->domain_ids = NULL;
1772         }
1773
1774         g_iommus[iommu->seq_id] = NULL;
1775
1776         /* free context mapping */
1777         free_context_table(iommu);
1778
1779 #ifdef CONFIG_INTEL_IOMMU_SVM
1780         if (pasid_enabled(iommu)) {
1781                 if (ecap_prs(iommu->ecap))
1782                         intel_svm_finish_prq(iommu);
1783                 intel_svm_free_pasid_tables(iommu);
1784         }
1785 #endif
1786 }
1787
1788 static struct dmar_domain *alloc_domain(int flags)
1789 {
1790         struct dmar_domain *domain;
1791
1792         domain = alloc_domain_mem();
1793         if (!domain)
1794                 return NULL;
1795
1796         memset(domain, 0, sizeof(*domain));
1797         domain->nid = -1;
1798         domain->flags = flags;
1799         domain->has_iotlb_device = false;
1800         INIT_LIST_HEAD(&domain->devices);
1801
1802         return domain;
1803 }
1804
1805 /* Must be called with iommu->lock */
1806 static int domain_attach_iommu(struct dmar_domain *domain,
1807                                struct intel_iommu *iommu)
1808 {
1809         unsigned long ndomains;
1810         int num;
1811
1812         assert_spin_locked(&device_domain_lock);
1813         assert_spin_locked(&iommu->lock);
1814
1815         domain->iommu_refcnt[iommu->seq_id] += 1;
1816         domain->iommu_count += 1;
1817         if (domain->iommu_refcnt[iommu->seq_id] == 1) {
1818                 ndomains = cap_ndoms(iommu->cap);
1819                 num      = find_first_zero_bit(iommu->domain_ids, ndomains);
1820
1821                 if (num >= ndomains) {
1822                         pr_err("%s: No free domain ids\n", iommu->name);
1823                         domain->iommu_refcnt[iommu->seq_id] -= 1;
1824                         domain->iommu_count -= 1;
1825                         return -ENOSPC;
1826                 }
1827
1828                 set_bit(num, iommu->domain_ids);
1829                 set_iommu_domain(iommu, num, domain);
1830
1831                 domain->iommu_did[iommu->seq_id] = num;
1832                 domain->nid                      = iommu->node;
1833
1834                 domain_update_iommu_cap(domain);
1835         }
1836
1837         return 0;
1838 }
1839
1840 static int domain_detach_iommu(struct dmar_domain *domain,
1841                                struct intel_iommu *iommu)
1842 {
1843         int num, count = INT_MAX;
1844
1845         assert_spin_locked(&device_domain_lock);
1846         assert_spin_locked(&iommu->lock);
1847
1848         domain->iommu_refcnt[iommu->seq_id] -= 1;
1849         count = --domain->iommu_count;
1850         if (domain->iommu_refcnt[iommu->seq_id] == 0) {
1851                 num = domain->iommu_did[iommu->seq_id];
1852                 clear_bit(num, iommu->domain_ids);
1853                 set_iommu_domain(iommu, num, NULL);
1854
1855                 domain_update_iommu_cap(domain);
1856                 domain->iommu_did[iommu->seq_id] = 0;
1857         }
1858
1859         return count;
1860 }
1861
1862 static struct iova_domain reserved_iova_list;
1863 static struct lock_class_key reserved_rbtree_key;
1864
1865 static int dmar_init_reserved_ranges(void)
1866 {
1867         struct pci_dev *pdev = NULL;
1868         struct iova *iova;
1869         int i;
1870
1871         init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1872                         DMA_32BIT_PFN);
1873
1874         lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1875                 &reserved_rbtree_key);
1876
1877         /* IOAPIC ranges shouldn't be accessed by DMA */
1878         iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
1879                 IOVA_PFN(IOAPIC_RANGE_END));
1880         if (!iova) {
1881                 pr_err("Reserve IOAPIC range failed\n");
1882                 return -ENODEV;
1883         }
1884
1885         /* Reserve all PCI MMIO to avoid peer-to-peer access */
1886         for_each_pci_dev(pdev) {
1887                 struct resource *r;
1888
1889                 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1890                         r = &pdev->resource[i];
1891                         if (!r->flags || !(r->flags & IORESOURCE_MEM))
1892                                 continue;
1893                         iova = reserve_iova(&reserved_iova_list,
1894                                             IOVA_PFN(r->start),
1895                                             IOVA_PFN(r->end));
1896                         if (!iova) {
1897                                 pr_err("Reserve iova failed\n");
1898                                 return -ENODEV;
1899                         }
1900                 }
1901         }
1902         return 0;
1903 }
1904
1905 static void domain_reserve_special_ranges(struct dmar_domain *domain)
1906 {
1907         copy_reserved_iova(&reserved_iova_list, &domain->iovad);
1908 }
1909
1910 static inline int guestwidth_to_adjustwidth(int gaw)
1911 {
1912         int agaw;
1913         int r = (gaw - 12) % 9;
1914
1915         if (r == 0)
1916                 agaw = gaw;
1917         else
1918                 agaw = gaw + 9 - r;
1919         if (agaw > 64)
1920                 agaw = 64;
1921         return agaw;
1922 }
1923
1924 static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
1925                        int guest_width)
1926 {
1927         int adjust_width, agaw;
1928         unsigned long sagaw;
1929
1930         init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1931                         DMA_32BIT_PFN);
1932         domain_reserve_special_ranges(domain);
1933
1934         /* calculate AGAW */
1935         if (guest_width > cap_mgaw(iommu->cap))
1936                 guest_width = cap_mgaw(iommu->cap);
1937         domain->gaw = guest_width;
1938         adjust_width = guestwidth_to_adjustwidth(guest_width);
1939         agaw = width_to_agaw(adjust_width);
1940         sagaw = cap_sagaw(iommu->cap);
1941         if (!test_bit(agaw, &sagaw)) {
1942                 /* hardware doesn't support it, choose a bigger one */
1943                 pr_debug("Hardware doesn't support agaw %d\n", agaw);
1944                 agaw = find_next_bit(&sagaw, 5, agaw);
1945                 if (agaw >= 5)
1946                         return -ENODEV;
1947         }
1948         domain->agaw = agaw;
1949
1950         if (ecap_coherent(iommu->ecap))
1951                 domain->iommu_coherency = 1;
1952         else
1953                 domain->iommu_coherency = 0;
1954
1955         if (ecap_sc_support(iommu->ecap))
1956                 domain->iommu_snooping = 1;
1957         else
1958                 domain->iommu_snooping = 0;
1959
1960         if (intel_iommu_superpage)
1961                 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
1962         else
1963                 domain->iommu_superpage = 0;
1964
1965         domain->nid = iommu->node;
1966
1967         /* always allocate the top pgd */
1968         domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
1969         if (!domain->pgd)
1970                 return -ENOMEM;
1971         __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
1972         return 0;
1973 }
1974
1975 static void domain_exit(struct dmar_domain *domain)
1976 {
1977         struct page *freelist = NULL;
1978
1979         /* Domain 0 is reserved, so dont process it */
1980         if (!domain)
1981                 return;
1982
1983         /* Flush any lazy unmaps that may reference this domain */
1984         if (!intel_iommu_strict) {
1985                 int cpu;
1986
1987                 for_each_possible_cpu(cpu)
1988                         flush_unmaps_timeout(cpu);
1989         }
1990
1991         /* Remove associated devices and clear attached or cached domains */
1992         rcu_read_lock();
1993         domain_remove_dev_info(domain);
1994         rcu_read_unlock();
1995
1996         /* destroy iovas */
1997         put_iova_domain(&domain->iovad);
1998
1999         freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
2000
2001         dma_free_pagelist(freelist);
2002
2003         free_domain_mem(domain);
2004 }
2005
2006 static int domain_context_mapping_one(struct dmar_domain *domain,
2007                                       struct intel_iommu *iommu,
2008                                       u8 bus, u8 devfn)
2009 {
2010         u16 did = domain->iommu_did[iommu->seq_id];
2011         int translation = CONTEXT_TT_MULTI_LEVEL;
2012         struct device_domain_info *info = NULL;
2013         struct context_entry *context;
2014         unsigned long flags;
2015         struct dma_pte *pgd;
2016         int ret, agaw;
2017
2018         WARN_ON(did == 0);
2019
2020         if (hw_pass_through && domain_type_is_si(domain))
2021                 translation = CONTEXT_TT_PASS_THROUGH;
2022
2023         pr_debug("Set context mapping for %02x:%02x.%d\n",
2024                 bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
2025
2026         BUG_ON(!domain->pgd);
2027
2028         spin_lock_irqsave(&device_domain_lock, flags);
2029         spin_lock(&iommu->lock);
2030
2031         ret = -ENOMEM;
2032         context = iommu_context_addr(iommu, bus, devfn, 1);
2033         if (!context)
2034                 goto out_unlock;
2035
2036         ret = 0;
2037         if (context_present(context))
2038                 goto out_unlock;
2039
2040         pgd = domain->pgd;
2041
2042         context_clear_entry(context);
2043         context_set_domain_id(context, did);
2044
2045         /*
2046          * Skip top levels of page tables for iommu which has less agaw
2047          * than default.  Unnecessary for PT mode.
2048          */
2049         if (translation != CONTEXT_TT_PASS_THROUGH) {
2050                 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
2051                         ret = -ENOMEM;
2052                         pgd = phys_to_virt(dma_pte_addr(pgd));
2053                         if (!dma_pte_present(pgd))
2054                                 goto out_unlock;
2055                 }
2056
2057                 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
2058                 if (info && info->ats_supported)
2059                         translation = CONTEXT_TT_DEV_IOTLB;
2060                 else
2061                         translation = CONTEXT_TT_MULTI_LEVEL;
2062
2063                 context_set_address_root(context, virt_to_phys(pgd));
2064                 context_set_address_width(context, iommu->agaw);
2065         } else {
2066                 /*
2067                  * In pass through mode, AW must be programmed to
2068                  * indicate the largest AGAW value supported by
2069                  * hardware. And ASR is ignored by hardware.
2070                  */
2071                 context_set_address_width(context, iommu->msagaw);
2072         }
2073
2074         context_set_translation_type(context, translation);
2075         context_set_fault_enable(context);
2076         context_set_present(context);
2077         domain_flush_cache(domain, context, sizeof(*context));
2078
2079         /*
2080          * It's a non-present to present mapping. If hardware doesn't cache
2081          * non-present entry we only need to flush the write-buffer. If the
2082          * _does_ cache non-present entries, then it does so in the special
2083          * domain #0, which we have to flush:
2084          */
2085         if (cap_caching_mode(iommu->cap)) {
2086                 iommu->flush.flush_context(iommu, 0,
2087                                            (((u16)bus) << 8) | devfn,
2088                                            DMA_CCMD_MASK_NOBIT,
2089                                            DMA_CCMD_DEVICE_INVL);
2090                 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
2091         } else {
2092                 iommu_flush_write_buffer(iommu);
2093         }
2094         iommu_enable_dev_iotlb(info);
2095
2096         ret = 0;
2097
2098 out_unlock:
2099         spin_unlock(&iommu->lock);
2100         spin_unlock_irqrestore(&device_domain_lock, flags);
2101
2102         return ret;
2103 }
2104
2105 struct domain_context_mapping_data {
2106         struct dmar_domain *domain;
2107         struct intel_iommu *iommu;
2108 };
2109
2110 static int domain_context_mapping_cb(struct pci_dev *pdev,
2111                                      u16 alias, void *opaque)
2112 {
2113         struct domain_context_mapping_data *data = opaque;
2114
2115         return domain_context_mapping_one(data->domain, data->iommu,
2116                                           PCI_BUS_NUM(alias), alias & 0xff);
2117 }
2118
2119 static int
2120 domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2121 {
2122         struct intel_iommu *iommu;
2123         u8 bus, devfn;
2124         struct domain_context_mapping_data data;
2125
2126         iommu = device_to_iommu(dev, &bus, &devfn);
2127         if (!iommu)
2128                 return -ENODEV;
2129
2130         if (!dev_is_pci(dev))
2131                 return domain_context_mapping_one(domain, iommu, bus, devfn);
2132
2133         data.domain = domain;
2134         data.iommu = iommu;
2135
2136         return pci_for_each_dma_alias(to_pci_dev(dev),
2137                                       &domain_context_mapping_cb, &data);
2138 }
2139
2140 static int domain_context_mapped_cb(struct pci_dev *pdev,
2141                                     u16 alias, void *opaque)
2142 {
2143         struct intel_iommu *iommu = opaque;
2144
2145         return !device_context_mapped(iommu, PCI_BUS_NUM(alias), alias & 0xff);
2146 }
2147
2148 static int domain_context_mapped(struct device *dev)
2149 {
2150         struct intel_iommu *iommu;
2151         u8 bus, devfn;
2152
2153         iommu = device_to_iommu(dev, &bus, &devfn);
2154         if (!iommu)
2155                 return -ENODEV;
2156
2157         if (!dev_is_pci(dev))
2158                 return device_context_mapped(iommu, bus, devfn);
2159
2160         return !pci_for_each_dma_alias(to_pci_dev(dev),
2161                                        domain_context_mapped_cb, iommu);
2162 }
2163
2164 /* Returns a number of VTD pages, but aligned to MM page size */
2165 static inline unsigned long aligned_nrpages(unsigned long host_addr,
2166                                             size_t size)
2167 {
2168         host_addr &= ~PAGE_MASK;
2169         return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
2170 }
2171
2172 /* Return largest possible superpage level for a given mapping */
2173 static inline int hardware_largepage_caps(struct dmar_domain *domain,
2174                                           unsigned long iov_pfn,
2175                                           unsigned long phy_pfn,
2176                                           unsigned long pages)
2177 {
2178         int support, level = 1;
2179         unsigned long pfnmerge;
2180
2181         support = domain->iommu_superpage;
2182
2183         /* To use a large page, the virtual *and* physical addresses
2184            must be aligned to 2MiB/1GiB/etc. Lower bits set in either
2185            of them will mean we have to use smaller pages. So just
2186            merge them and check both at once. */
2187         pfnmerge = iov_pfn | phy_pfn;
2188
2189         while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
2190                 pages >>= VTD_STRIDE_SHIFT;
2191                 if (!pages)
2192                         break;
2193                 pfnmerge >>= VTD_STRIDE_SHIFT;
2194                 level++;
2195                 support--;
2196         }
2197         return level;
2198 }
2199
2200 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2201                             struct scatterlist *sg, unsigned long phys_pfn,
2202                             unsigned long nr_pages, int prot)
2203 {
2204         struct dma_pte *first_pte = NULL, *pte = NULL;
2205         phys_addr_t uninitialized_var(pteval);
2206         unsigned long sg_res = 0;
2207         unsigned int largepage_lvl = 0;
2208         unsigned long lvl_pages = 0;
2209
2210         BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1));
2211
2212         if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
2213                 return -EINVAL;
2214
2215         prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
2216
2217         if (!sg) {
2218                 sg_res = nr_pages;
2219                 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2220         }
2221
2222         while (nr_pages > 0) {
2223                 uint64_t tmp;
2224
2225                 if (!sg_res) {
2226                         sg_res = aligned_nrpages(sg->offset, sg->length);
2227                         sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2228                         sg->dma_length = sg->length;
2229                         pteval = page_to_phys(sg_page(sg)) | prot;
2230                         phys_pfn = pteval >> VTD_PAGE_SHIFT;
2231                 }
2232
2233                 if (!pte) {
2234                         largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
2235
2236                         first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
2237                         if (!pte)
2238                                 return -ENOMEM;
2239                         /* It is large page*/
2240                         if (largepage_lvl > 1) {
2241                                 unsigned long nr_superpages, end_pfn;
2242
2243                                 pteval |= DMA_PTE_LARGE_PAGE;
2244                                 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2245
2246                                 nr_superpages = sg_res / lvl_pages;
2247                                 end_pfn = iov_pfn + nr_superpages * lvl_pages - 1;
2248
2249                                 /*
2250                                  * Ensure that old small page tables are
2251                                  * removed to make room for superpage(s).
2252                                  */
2253                                 dma_pte_free_pagetable(domain, iov_pfn, end_pfn);
2254                         } else {
2255                                 pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
2256                         }
2257
2258                 }
2259                 /* We don't need lock here, nobody else
2260                  * touches the iova range
2261                  */
2262                 tmp = cmpxchg64_local(&pte->val, 0ULL, pteval);
2263                 if (tmp) {
2264                         static int dumps = 5;
2265                         pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
2266                                 iov_pfn, tmp, (unsigned long long)pteval);
2267                         if (dumps) {
2268                                 dumps--;
2269                                 debug_dma_dump_mappings(NULL);
2270                         }
2271                         WARN_ON(1);
2272                 }
2273
2274                 lvl_pages = lvl_to_nr_pages(largepage_lvl);
2275
2276                 BUG_ON(nr_pages < lvl_pages);
2277                 BUG_ON(sg_res < lvl_pages);
2278
2279                 nr_pages -= lvl_pages;
2280                 iov_pfn += lvl_pages;
2281                 phys_pfn += lvl_pages;
2282                 pteval += lvl_pages * VTD_PAGE_SIZE;
2283                 sg_res -= lvl_pages;
2284
2285                 /* If the next PTE would be the first in a new page, then we
2286                    need to flush the cache on the entries we've just written.
2287                    And then we'll need to recalculate 'pte', so clear it and
2288                    let it get set again in the if (!pte) block above.
2289
2290                    If we're done (!nr_pages) we need to flush the cache too.
2291
2292                    Also if we've been setting superpages, we may need to
2293                    recalculate 'pte' and switch back to smaller pages for the
2294                    end of the mapping, if the trailing size is not enough to
2295                    use another superpage (i.e. sg_res < lvl_pages). */
2296                 pte++;
2297                 if (!nr_pages || first_pte_in_page(pte) ||
2298                     (largepage_lvl > 1 && sg_res < lvl_pages)) {
2299                         domain_flush_cache(domain, first_pte,
2300                                            (void *)pte - (void *)first_pte);
2301                         pte = NULL;
2302                 }
2303
2304                 if (!sg_res && nr_pages)
2305                         sg = sg_next(sg);
2306         }
2307         return 0;
2308 }
2309
2310 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2311                                     struct scatterlist *sg, unsigned long nr_pages,
2312                                     int prot)
2313 {
2314         return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot);
2315 }
2316
2317 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2318                                      unsigned long phys_pfn, unsigned long nr_pages,
2319                                      int prot)
2320 {
2321         return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot);
2322 }
2323
2324 static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
2325 {
2326         if (!iommu)
2327                 return;
2328
2329         clear_context_table(iommu, bus, devfn);
2330         iommu->flush.flush_context(iommu, 0, 0, 0,
2331                                            DMA_CCMD_GLOBAL_INVL);
2332         iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2333 }
2334
2335 static inline void unlink_domain_info(struct device_domain_info *info)
2336 {
2337         assert_spin_locked(&device_domain_lock);
2338         list_del(&info->link);
2339         list_del(&info->global);
2340         if (info->dev)
2341                 info->dev->archdata.iommu = NULL;
2342 }
2343
2344 static void domain_remove_dev_info(struct dmar_domain *domain)
2345 {
2346         struct device_domain_info *info, *tmp;
2347         unsigned long flags;
2348
2349         spin_lock_irqsave(&device_domain_lock, flags);
2350         list_for_each_entry_safe(info, tmp, &domain->devices, link)
2351                 __dmar_remove_one_dev_info(info);
2352         spin_unlock_irqrestore(&device_domain_lock, flags);
2353 }
2354
2355 /*
2356  * find_domain
2357  * Note: we use struct device->archdata.iommu stores the info
2358  */
2359 static struct dmar_domain *find_domain(struct device *dev)
2360 {
2361         struct device_domain_info *info;
2362
2363         /* No lock here, assumes no domain exit in normal case */
2364         info = dev->archdata.iommu;
2365         if (info)
2366                 return info->domain;
2367         return NULL;
2368 }
2369
2370 static inline struct device_domain_info *
2371 dmar_search_domain_by_dev_info(int segment, int bus, int devfn)
2372 {
2373         struct device_domain_info *info;
2374
2375         list_for_each_entry(info, &device_domain_list, global)
2376                 if (info->iommu->segment == segment && info->bus == bus &&
2377                     info->devfn == devfn)
2378                         return info;
2379
2380         return NULL;
2381 }
2382
2383 static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2384                                                     int bus, int devfn,
2385                                                     struct device *dev,
2386                                                     struct dmar_domain *domain)
2387 {
2388         struct dmar_domain *found = NULL;
2389         struct device_domain_info *info;
2390         unsigned long flags;
2391         int ret;
2392
2393         info = alloc_devinfo_mem();
2394         if (!info)
2395                 return NULL;
2396
2397         info->bus = bus;
2398         info->devfn = devfn;
2399         info->ats_supported = info->pasid_supported = info->pri_supported = 0;
2400         info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
2401         info->ats_qdep = 0;
2402         info->dev = dev;
2403         info->domain = domain;
2404         info->iommu = iommu;
2405
2406         if (dev && dev_is_pci(dev)) {
2407                 struct pci_dev *pdev = to_pci_dev(info->dev);
2408
2409                 if (ecap_dev_iotlb_support(iommu->ecap) &&
2410                     pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
2411                     dmar_find_matched_atsr_unit(pdev))
2412                         info->ats_supported = 1;
2413
2414                 if (ecs_enabled(iommu)) {
2415                         if (pasid_enabled(iommu)) {
2416                                 int features = pci_pasid_features(pdev);
2417                                 if (features >= 0)
2418                                         info->pasid_supported = features | 1;
2419                         }
2420
2421                         if (info->ats_supported && ecap_prs(iommu->ecap) &&
2422                             pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI))
2423                                 info->pri_supported = 1;
2424                 }
2425         }
2426
2427         spin_lock_irqsave(&device_domain_lock, flags);
2428         if (dev)
2429                 found = find_domain(dev);
2430
2431         if (!found) {
2432                 struct device_domain_info *info2;
2433                 info2 = dmar_search_domain_by_dev_info(iommu->segment, bus, devfn);
2434                 if (info2) {
2435                         found      = info2->domain;
2436                         info2->dev = dev;
2437                 }
2438         }
2439
2440         if (found) {
2441                 spin_unlock_irqrestore(&device_domain_lock, flags);
2442                 free_devinfo_mem(info);
2443                 /* Caller must free the original domain */
2444                 return found;
2445         }
2446
2447         spin_lock(&iommu->lock);
2448         ret = domain_attach_iommu(domain, iommu);
2449         spin_unlock(&iommu->lock);
2450
2451         if (ret) {
2452                 spin_unlock_irqrestore(&device_domain_lock, flags);
2453                 free_devinfo_mem(info);
2454                 return NULL;
2455         }
2456
2457         list_add(&info->link, &domain->devices);
2458         list_add(&info->global, &device_domain_list);
2459         if (dev)
2460                 dev->archdata.iommu = info;
2461         spin_unlock_irqrestore(&device_domain_lock, flags);
2462
2463         if (dev && domain_context_mapping(domain, dev)) {
2464                 pr_err("Domain context map for %s failed\n", dev_name(dev));
2465                 dmar_remove_one_dev_info(domain, dev);
2466                 return NULL;
2467         }
2468
2469         return domain;
2470 }
2471
2472 static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
2473 {
2474         *(u16 *)opaque = alias;
2475         return 0;
2476 }
2477
2478 static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
2479 {
2480         struct device_domain_info *info = NULL;
2481         struct dmar_domain *domain = NULL;
2482         struct intel_iommu *iommu;
2483         u16 req_id, dma_alias;
2484         unsigned long flags;
2485         u8 bus, devfn;
2486
2487         iommu = device_to_iommu(dev, &bus, &devfn);
2488         if (!iommu)
2489                 return NULL;
2490
2491         req_id = ((u16)bus << 8) | devfn;
2492
2493         if (dev_is_pci(dev)) {
2494                 struct pci_dev *pdev = to_pci_dev(dev);
2495
2496                 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2497
2498                 spin_lock_irqsave(&device_domain_lock, flags);
2499                 info = dmar_search_domain_by_dev_info(pci_domain_nr(pdev->bus),
2500                                                       PCI_BUS_NUM(dma_alias),
2501                                                       dma_alias & 0xff);
2502                 if (info) {
2503                         iommu = info->iommu;
2504                         domain = info->domain;
2505                 }
2506                 spin_unlock_irqrestore(&device_domain_lock, flags);
2507
2508                 /* DMA alias already has a domain, use it */
2509                 if (info)
2510                         goto out;
2511         }
2512
2513         /* Allocate and initialize new domain for the device */
2514         domain = alloc_domain(0);
2515         if (!domain)
2516                 return NULL;
2517         if (domain_init(domain, iommu, gaw)) {
2518                 domain_exit(domain);
2519                 return NULL;
2520         }
2521
2522 out:
2523
2524         return domain;
2525 }
2526
2527 static struct dmar_domain *set_domain_for_dev(struct device *dev,
2528                                               struct dmar_domain *domain)
2529 {
2530         struct intel_iommu *iommu;
2531         struct dmar_domain *tmp;
2532         u16 req_id, dma_alias;
2533         u8 bus, devfn;
2534
2535         iommu = device_to_iommu(dev, &bus, &devfn);
2536         if (!iommu)
2537                 return NULL;
2538
2539         req_id = ((u16)bus << 8) | devfn;
2540
2541         if (dev_is_pci(dev)) {
2542                 struct pci_dev *pdev = to_pci_dev(dev);
2543
2544                 pci_for_each_dma_alias(pdev, get_last_alias, &dma_alias);
2545
2546                 /* register PCI DMA alias device */
2547                 if (req_id != dma_alias) {
2548                         tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias),
2549                                         dma_alias & 0xff, NULL, domain);
2550
2551                         if (!tmp || tmp != domain)
2552                                 return tmp;
2553                 }
2554         }
2555
2556         tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2557         if (!tmp || tmp != domain)
2558                 return tmp;
2559
2560         return domain;
2561 }
2562
2563 static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw)
2564 {
2565         struct dmar_domain *domain, *tmp;
2566
2567         domain = find_domain(dev);
2568         if (domain)
2569                 goto out;
2570
2571         domain = find_or_alloc_domain(dev, gaw);
2572         if (!domain)
2573                 goto out;
2574
2575         tmp = set_domain_for_dev(dev, domain);
2576         if (!tmp || domain != tmp) {
2577                 domain_exit(domain);
2578                 domain = tmp;
2579         }
2580
2581 out:
2582
2583         return domain;
2584 }
2585
2586 static int iommu_domain_identity_map(struct dmar_domain *domain,
2587                                      unsigned long long start,
2588                                      unsigned long long end)
2589 {
2590         unsigned long first_vpfn = start >> VTD_PAGE_SHIFT;
2591         unsigned long last_vpfn = end >> VTD_PAGE_SHIFT;
2592
2593         if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn),
2594                           dma_to_mm_pfn(last_vpfn))) {
2595                 pr_err("Reserving iova failed\n");
2596                 return -ENOMEM;
2597         }
2598
2599         pr_debug("Mapping reserved region %llx-%llx\n", start, end);
2600         /*
2601          * RMRR range might have overlap with physical memory range,
2602          * clear it first
2603          */
2604         dma_pte_clear_range(domain, first_vpfn, last_vpfn);
2605
2606         return domain_pfn_mapping(domain, first_vpfn, first_vpfn,
2607                                   last_vpfn - first_vpfn + 1,
2608                                   DMA_PTE_READ|DMA_PTE_WRITE);
2609 }
2610
2611 static int domain_prepare_identity_map(struct device *dev,
2612                                        struct dmar_domain *domain,
2613                                        unsigned long long start,
2614                                        unsigned long long end)
2615 {
2616         /* For _hardware_ passthrough, don't bother. But for software
2617            passthrough, we do it anyway -- it may indicate a memory
2618            range which is reserved in E820, so which didn't get set
2619            up to start with in si_domain */
2620         if (domain == si_domain && hw_pass_through) {
2621                 pr_warn("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
2622                         dev_name(dev), start, end);
2623                 return 0;
2624         }
2625
2626         pr_info("Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
2627                 dev_name(dev), start, end);
2628
2629         if (end < start) {
2630                 WARN(1, "Your BIOS is broken; RMRR ends before it starts!\n"
2631                         "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2632                         dmi_get_system_info(DMI_BIOS_VENDOR),
2633                         dmi_get_system_info(DMI_BIOS_VERSION),
2634                      dmi_get_system_info(DMI_PRODUCT_VERSION));
2635                 return -EIO;
2636         }
2637
2638         if (end >> agaw_to_width(domain->agaw)) {
2639                 WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
2640                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
2641                      agaw_to_width(domain->agaw),
2642                      dmi_get_system_info(DMI_BIOS_VENDOR),
2643                      dmi_get_system_info(DMI_BIOS_VERSION),
2644                      dmi_get_system_info(DMI_PRODUCT_VERSION));
2645                 return -EIO;
2646         }
2647
2648         return iommu_domain_identity_map(domain, start, end);
2649 }
2650
2651 static int iommu_prepare_identity_map(struct device *dev,
2652                                       unsigned long long start,
2653                                       unsigned long long end)
2654 {
2655         struct dmar_domain *domain;
2656         int ret;
2657
2658         domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
2659         if (!domain)
2660                 return -ENOMEM;
2661
2662         ret = domain_prepare_identity_map(dev, domain, start, end);
2663         if (ret)
2664                 domain_exit(domain);
2665
2666         return ret;
2667 }
2668
2669 static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
2670                                          struct device *dev)
2671 {
2672         if (dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
2673                 return 0;
2674         return iommu_prepare_identity_map(dev, rmrr->base_address,
2675                                           rmrr->end_address);
2676 }
2677
2678 #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA
2679 static inline void iommu_prepare_isa(void)
2680 {
2681         struct pci_dev *pdev;
2682         int ret;
2683
2684         pdev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
2685         if (!pdev)
2686                 return;
2687
2688         pr_info("Prepare 0-16MiB unity mapping for LPC\n");
2689         ret = iommu_prepare_identity_map(&pdev->dev, 0, 16*1024*1024 - 1);
2690
2691         if (ret)
2692                 pr_err("Failed to create 0-16MiB identity map - floppy might not work\n");
2693
2694         pci_dev_put(pdev);
2695 }
2696 #else
2697 static inline void iommu_prepare_isa(void)
2698 {
2699         return;
2700 }
2701 #endif /* !CONFIG_INTEL_IOMMU_FLPY_WA */
2702
2703 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2704
2705 static int __init si_domain_init(int hw)
2706 {
2707         int nid, ret = 0;
2708
2709         si_domain = alloc_domain(DOMAIN_FLAG_STATIC_IDENTITY);
2710         if (!si_domain)
2711                 return -EFAULT;
2712
2713         if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
2714                 domain_exit(si_domain);
2715                 return -EFAULT;
2716         }
2717
2718         pr_debug("Identity mapping domain allocated\n");
2719
2720         if (hw)
2721                 return 0;
2722
2723         for_each_online_node(nid) {
2724                 unsigned long start_pfn, end_pfn;
2725                 int i;
2726
2727                 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
2728                         ret = iommu_domain_identity_map(si_domain,
2729                                         PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
2730                         if (ret)
2731                                 return ret;
2732                 }
2733         }
2734
2735         return 0;
2736 }
2737
2738 static int identity_mapping(struct device *dev)
2739 {
2740         struct device_domain_info *info;
2741
2742         if (likely(!iommu_identity_mapping))
2743                 return 0;
2744
2745         info = dev->archdata.iommu;
2746         if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
2747                 return (info->domain == si_domain);
2748
2749         return 0;
2750 }
2751
2752 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
2753 {
2754         struct dmar_domain *ndomain;
2755         struct intel_iommu *iommu;
2756         u8 bus, devfn;
2757
2758         iommu = device_to_iommu(dev, &bus, &devfn);
2759         if (!iommu)
2760                 return -ENODEV;
2761
2762         ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain);
2763         if (ndomain != domain)
2764                 return -EBUSY;
2765
2766         return 0;
2767 }
2768
2769 static bool device_has_rmrr(struct device *dev)
2770 {
2771         struct dmar_rmrr_unit *rmrr;
2772         struct device *tmp;
2773         int i;
2774
2775         rcu_read_lock();
2776         for_each_rmrr_units(rmrr) {
2777                 /*
2778                  * Return TRUE if this RMRR contains the device that
2779                  * is passed in.
2780                  */
2781                 for_each_active_dev_scope(rmrr->devices,
2782                                           rmrr->devices_cnt, i, tmp)
2783                         if (tmp == dev) {
2784                                 rcu_read_unlock();
2785                                 return true;
2786                         }
2787         }
2788         rcu_read_unlock();
2789         return false;
2790 }
2791
2792 /*
2793  * There are a couple cases where we need to restrict the functionality of
2794  * devices associated with RMRRs.  The first is when evaluating a device for
2795  * identity mapping because problems exist when devices are moved in and out
2796  * of domains and their respective RMRR information is lost.  This means that
2797  * a device with associated RMRRs will never be in a "passthrough" domain.
2798  * The second is use of the device through the IOMMU API.  This interface
2799  * expects to have full control of the IOVA space for the device.  We cannot
2800  * satisfy both the requirement that RMRR access is maintained and have an
2801  * unencumbered IOVA space.  We also have no ability to quiesce the device's
2802  * use of the RMRR space or even inform the IOMMU API user of the restriction.
2803  * We therefore prevent devices associated with an RMRR from participating in
2804  * the IOMMU API, which eliminates them from device assignment.
2805  *
2806  * In both cases we assume that PCI USB devices with RMRRs have them largely
2807  * for historical reasons and that the RMRR space is not actively used post
2808  * boot.  This exclusion may change if vendors begin to abuse it.
2809  *
2810  * The same exception is made for graphics devices, with the requirement that
2811  * any use of the RMRR regions will be torn down before assigning the device
2812  * to a guest.
2813  */
2814 static bool device_is_rmrr_locked(struct device *dev)
2815 {
2816         if (!device_has_rmrr(dev))
2817                 return false;
2818
2819         if (dev_is_pci(dev)) {
2820                 struct pci_dev *pdev = to_pci_dev(dev);
2821
2822                 if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
2823                         return false;
2824         }
2825
2826         return true;
2827 }
2828
2829 static int iommu_should_identity_map(struct device *dev, int startup)
2830 {
2831
2832         if (dev_is_pci(dev)) {
2833                 struct pci_dev *pdev = to_pci_dev(dev);
2834
2835                 if (device_is_rmrr_locked(dev))
2836                         return 0;
2837
2838                 if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev))
2839                         return 1;
2840
2841                 if ((iommu_identity_mapping & IDENTMAP_GFX) && IS_GFX_DEVICE(pdev))
2842                         return 1;
2843
2844                 if (!(iommu_identity_mapping & IDENTMAP_ALL))
2845                         return 0;
2846
2847                 /*
2848                  * We want to start off with all devices in the 1:1 domain, and
2849                  * take them out later if we find they can't access all of memory.
2850                  *
2851                  * However, we can't do this for PCI devices behind bridges,
2852                  * because all PCI devices behind the same bridge will end up
2853                  * with the same source-id on their transactions.
2854                  *
2855                  * Practically speaking, we can't change things around for these
2856                  * devices at run-time, because we can't be sure there'll be no
2857                  * DMA transactions in flight for any of their siblings.
2858                  *
2859                  * So PCI devices (unless they're on the root bus) as well as
2860                  * their parent PCI-PCI or PCIe-PCI bridges must be left _out_ of
2861                  * the 1:1 domain, just in _case_ one of their siblings turns out
2862                  * not to be able to map all of memory.
2863                  */
2864                 if (!pci_is_pcie(pdev)) {
2865                         if (!pci_is_root_bus(pdev->bus))
2866                                 return 0;
2867                         if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
2868                                 return 0;
2869                 } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
2870                         return 0;
2871         } else {
2872                 if (device_has_rmrr(dev))
2873                         return 0;
2874         }
2875
2876         /*
2877          * At boot time, we don't yet know if devices will be 64-bit capable.
2878          * Assume that they will — if they turn out not to be, then we can
2879          * take them out of the 1:1 domain later.
2880          */
2881         if (!startup) {
2882                 /*
2883                  * If the device's dma_mask is less than the system's memory
2884                  * size then this is not a candidate for identity mapping.
2885                  */
2886                 u64 dma_mask = *dev->dma_mask;
2887
2888                 if (dev->coherent_dma_mask &&
2889                     dev->coherent_dma_mask < dma_mask)
2890                         dma_mask = dev->coherent_dma_mask;
2891
2892                 return dma_mask >= dma_get_required_mask(dev);
2893         }
2894
2895         return 1;
2896 }
2897
2898 static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw)
2899 {
2900         int ret;
2901
2902         if (!iommu_should_identity_map(dev, 1))
2903                 return 0;
2904
2905         ret = domain_add_dev_info(si_domain, dev);
2906         if (!ret)
2907                 pr_info("%s identity mapping for device %s\n",
2908                         hw ? "Hardware" : "Software", dev_name(dev));
2909         else if (ret == -ENODEV)
2910                 /* device not associated with an iommu */
2911                 ret = 0;
2912
2913         return ret;
2914 }
2915
2916
2917 static int __init iommu_prepare_static_identity_mapping(int hw)
2918 {
2919         struct pci_dev *pdev = NULL;
2920         struct dmar_drhd_unit *drhd;
2921         struct intel_iommu *iommu;
2922         struct device *dev;
2923         int i;
2924         int ret = 0;
2925
2926         for_each_pci_dev(pdev) {
2927                 ret = dev_prepare_static_identity_mapping(&pdev->dev, hw);
2928                 if (ret)
2929                         return ret;
2930         }
2931
2932         for_each_active_iommu(iommu, drhd)
2933                 for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) {
2934                         struct acpi_device_physical_node *pn;
2935                         struct acpi_device *adev;
2936
2937                         if (dev->bus != &acpi_bus_type)
2938                                 continue;
2939
2940                         adev= to_acpi_device(dev);
2941                         mutex_lock(&adev->physical_node_lock);
2942                         list_for_each_entry(pn, &adev->physical_node_list, node) {
2943                                 ret = dev_prepare_static_identity_mapping(pn->dev, hw);
2944                                 if (ret)
2945                                         break;
2946                         }
2947                         mutex_unlock(&adev->physical_node_lock);
2948                         if (ret)
2949                                 return ret;
2950                 }
2951
2952         return 0;
2953 }
2954
2955 static void intel_iommu_init_qi(struct intel_iommu *iommu)
2956 {
2957         /*
2958          * Start from the sane iommu hardware state.
2959          * If the queued invalidation is already initialized by us
2960          * (for example, while enabling interrupt-remapping) then
2961          * we got the things already rolling from a sane state.
2962          */
2963         if (!iommu->qi) {
2964                 /*
2965                  * Clear any previous faults.
2966                  */
2967                 dmar_fault(-1, iommu);
2968                 /*
2969                  * Disable queued invalidation if supported and already enabled
2970                  * before OS handover.
2971                  */
2972                 dmar_disable_qi(iommu);
2973         }
2974
2975         if (dmar_enable_qi(iommu)) {
2976                 /*
2977                  * Queued Invalidate not enabled, use Register Based Invalidate
2978                  */
2979                 iommu->flush.flush_context = __iommu_flush_context;
2980                 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2981                 pr_info("%s: Using Register based invalidation\n",
2982                         iommu->name);
2983         } else {
2984                 iommu->flush.flush_context = qi_flush_context;
2985                 iommu->flush.flush_iotlb = qi_flush_iotlb;
2986                 pr_info("%s: Using Queued invalidation\n", iommu->name);
2987         }
2988 }
2989
2990 static int copy_context_table(struct intel_iommu *iommu,
2991                               struct root_entry *old_re,
2992                               struct context_entry **tbl,
2993                               int bus, bool ext)
2994 {
2995         int tbl_idx, pos = 0, idx, devfn, ret = 0, did;
2996         struct context_entry *new_ce = NULL, ce;
2997         struct context_entry *old_ce = NULL;
2998         struct root_entry re;
2999         phys_addr_t old_ce_phys;
3000
3001         tbl_idx = ext ? bus * 2 : bus;
3002         memcpy(&re, old_re, sizeof(re));
3003
3004         for (devfn = 0; devfn < 256; devfn++) {
3005                 /* First calculate the correct index */
3006                 idx = (ext ? devfn * 2 : devfn) % 256;
3007
3008                 if (idx == 0) {
3009                         /* First save what we may have and clean up */
3010                         if (new_ce) {
3011                                 tbl[tbl_idx] = new_ce;
3012                                 __iommu_flush_cache(iommu, new_ce,
3013                                                     VTD_PAGE_SIZE);
3014                                 pos = 1;
3015                         }
3016
3017                         if (old_ce)
3018                                 iounmap(old_ce);
3019
3020                         ret = 0;
3021                         if (devfn < 0x80)
3022                                 old_ce_phys = root_entry_lctp(&re);
3023                         else
3024                                 old_ce_phys = root_entry_uctp(&re);
3025
3026                         if (!old_ce_phys) {
3027                                 if (ext && devfn == 0) {
3028                                         /* No LCTP, try UCTP */
3029                                         devfn = 0x7f;
3030                                         continue;
3031                                 } else {
3032                                         goto out;
3033                                 }
3034                         }
3035
3036                         ret = -ENOMEM;
3037                         old_ce = memremap(old_ce_phys, PAGE_SIZE,
3038                                         MEMREMAP_WB);
3039                         if (!old_ce)
3040                                 goto out;
3041
3042                         new_ce = alloc_pgtable_page(iommu->node);
3043                         if (!new_ce)
3044                                 goto out_unmap;
3045
3046                         ret = 0;
3047                 }
3048
3049                 /* Now copy the context entry */
3050                 memcpy(&ce, old_ce + idx, sizeof(ce));
3051
3052                 if (!__context_present(&ce))
3053                         continue;
3054
3055                 did = context_domain_id(&ce);
3056                 if (did >= 0 && did < cap_ndoms(iommu->cap))
3057                         set_bit(did, iommu->domain_ids);
3058
3059                 /*
3060                  * We need a marker for copied context entries. This
3061                  * marker needs to work for the old format as well as
3062                  * for extended context entries.
3063                  *
3064                  * Bit 67 of the context entry is used. In the old
3065                  * format this bit is available to software, in the
3066                  * extended format it is the PGE bit, but PGE is ignored
3067                  * by HW if PASIDs are disabled (and thus still
3068                  * available).
3069                  *
3070                  * So disable PASIDs first and then mark the entry
3071                  * copied. This means that we don't copy PASID
3072                  * translations from the old kernel, but this is fine as
3073                  * faults there are not fatal.
3074                  */
3075                 context_clear_pasid_enable(&ce);
3076                 context_set_copied(&ce);
3077
3078                 new_ce[idx] = ce;
3079         }
3080
3081         tbl[tbl_idx + pos] = new_ce;
3082
3083         __iommu_flush_cache(iommu, new_ce, VTD_PAGE_SIZE);
3084
3085 out_unmap:
3086         memunmap(old_ce);
3087
3088 out:
3089         return ret;
3090 }
3091
3092 static int copy_translation_tables(struct intel_iommu *iommu)
3093 {
3094         struct context_entry **ctxt_tbls;
3095         struct root_entry *old_rt;
3096         phys_addr_t old_rt_phys;
3097         int ctxt_table_entries;
3098         unsigned long flags;
3099         u64 rtaddr_reg;
3100         int bus, ret;
3101         bool new_ext, ext;
3102
3103         rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
3104         ext        = !!(rtaddr_reg & DMA_RTADDR_RTT);
3105         new_ext    = !!ecap_ecs(iommu->ecap);
3106
3107         /*
3108          * The RTT bit can only be changed when translation is disabled,
3109          * but disabling translation means to open a window for data
3110          * corruption. So bail out and don't copy anything if we would
3111          * have to change the bit.
3112          */
3113         if (new_ext != ext)
3114                 return -EINVAL;
3115
3116         old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
3117         if (!old_rt_phys)
3118                 return -EINVAL;
3119
3120         old_rt = memremap(old_rt_phys, PAGE_SIZE, MEMREMAP_WB);
3121         if (!old_rt)
3122                 return -ENOMEM;
3123
3124         /* This is too big for the stack - allocate it from slab */
3125         ctxt_table_entries = ext ? 512 : 256;
3126         ret = -ENOMEM;
3127         ctxt_tbls = kzalloc(ctxt_table_entries * sizeof(void *), GFP_KERNEL);
3128         if (!ctxt_tbls)
3129                 goto out_unmap;
3130
3131         for (bus = 0; bus < 256; bus++) {
3132                 ret = copy_context_table(iommu, &old_rt[bus],
3133                                          ctxt_tbls, bus, ext);
3134                 if (ret) {
3135                         pr_err("%s: Failed to copy context table for bus %d\n",
3136                                 iommu->name, bus);
3137                         continue;
3138                 }
3139         }
3140
3141         spin_lock_irqsave(&iommu->lock, flags);
3142
3143         /* Context tables are copied, now write them to the root_entry table */
3144         for (bus = 0; bus < 256; bus++) {
3145                 int idx = ext ? bus * 2 : bus;
3146                 u64 val;
3147
3148                 if (ctxt_tbls[idx]) {
3149                         val = virt_to_phys(ctxt_tbls[idx]) | 1;
3150                         iommu->root_entry[bus].lo = val;
3151                 }
3152
3153                 if (!ext || !ctxt_tbls[idx + 1])
3154                         continue;
3155
3156                 val = virt_to_phys(ctxt_tbls[idx + 1]) | 1;
3157                 iommu->root_entry[bus].hi = val;
3158         }
3159
3160         spin_unlock_irqrestore(&iommu->lock, flags);
3161
3162         kfree(ctxt_tbls);
3163
3164         __iommu_flush_cache(iommu, iommu->root_entry, PAGE_SIZE);
3165
3166         ret = 0;
3167
3168 out_unmap:
3169         memunmap(old_rt);
3170
3171         return ret;
3172 }
3173
3174 static int __init init_dmars(void)
3175 {
3176         struct dmar_drhd_unit *drhd;
3177         struct dmar_rmrr_unit *rmrr;
3178         bool copied_tables = false;
3179         struct device *dev;
3180         struct intel_iommu *iommu;
3181         int i, ret, cpu;
3182
3183         /*
3184          * for each drhd
3185          *    allocate root
3186          *    initialize and program root entry to not present
3187          * endfor
3188          */
3189         for_each_drhd_unit(drhd) {
3190                 /*
3191                  * lock not needed as this is only incremented in the single
3192                  * threaded kernel __init code path all other access are read
3193                  * only
3194                  */
3195                 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
3196                         g_num_of_iommus++;
3197                         continue;
3198                 }
3199                 pr_err_once("Exceeded %d IOMMUs\n", DMAR_UNITS_SUPPORTED);
3200         }
3201
3202         /* Preallocate enough resources for IOMMU hot-addition */
3203         if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
3204                 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
3205
3206         g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
3207                         GFP_KERNEL);
3208         if (!g_iommus) {
3209                 pr_err("Allocating global iommu array failed\n");
3210                 ret = -ENOMEM;
3211                 goto error;
3212         }
3213
3214         for_each_possible_cpu(cpu) {
3215                 struct deferred_flush_data *dfd = per_cpu_ptr(&deferred_flush,
3216                                                               cpu);
3217
3218                 dfd->tables = kzalloc(g_num_of_iommus *
3219                                       sizeof(struct deferred_flush_table),
3220                                       GFP_KERNEL);
3221                 if (!dfd->tables) {
3222                         ret = -ENOMEM;
3223                         goto free_g_iommus;
3224                 }
3225
3226                 spin_lock_init(&dfd->lock);
3227                 setup_timer(&dfd->timer, flush_unmaps_timeout, cpu);
3228         }
3229
3230         for_each_active_iommu(iommu, drhd) {
3231                 g_iommus[iommu->seq_id] = iommu;
3232
3233                 intel_iommu_init_qi(iommu);
3234
3235                 ret = iommu_init_domains(iommu);
3236                 if (ret)
3237                         goto free_iommu;
3238
3239                 init_translation_status(iommu);
3240
3241                 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
3242                         iommu_disable_translation(iommu);
3243                         clear_translation_pre_enabled(iommu);
3244                         pr_warn("Translation was enabled for %s but we are not in kdump mode\n",
3245                                 iommu->name);
3246                 }
3247
3248                 /*
3249                  * TBD:
3250                  * we could share the same root & context tables
3251                  * among all IOMMU's. Need to Split it later.
3252                  */
3253                 ret = iommu_alloc_root_entry(iommu);
3254                 if (ret)
3255                         goto free_iommu;
3256
3257                 if (translation_pre_enabled(iommu)) {
3258                         pr_info("Translation already enabled - trying to copy translation structures\n");
3259
3260                         ret = copy_translation_tables(iommu);
3261                         if (ret) {
3262                                 /*
3263                                  * We found the IOMMU with translation
3264                                  * enabled - but failed to copy over the
3265                                  * old root-entry table. Try to proceed
3266                                  * by disabling translation now and
3267                                  * allocating a clean root-entry table.
3268                                  * This might cause DMAR faults, but
3269                                  * probably the dump will still succeed.
3270                                  */
3271                                 pr_err("Failed to copy translation tables from previous kernel for %s\n",
3272                                        iommu->name);
3273                                 iommu_disable_translation(iommu);
3274                                 clear_translation_pre_enabled(iommu);
3275                         } else {
3276                                 pr_info("Copied translation tables from previous kernel for %s\n",
3277                                         iommu->name);
3278                                 copied_tables = true;
3279                         }
3280                 }
3281
3282                 if (!ecap_pass_through(iommu->ecap))
3283                         hw_pass_through = 0;
3284 #ifdef CONFIG_INTEL_IOMMU_SVM
3285                 if (pasid_enabled(iommu))
3286                         intel_svm_alloc_pasid_tables(iommu);
3287 #endif
3288         }
3289
3290         /*
3291          * Now that qi is enabled on all iommus, set the root entry and flush
3292          * caches. This is required on some Intel X58 chipsets, otherwise the
3293          * flush_context function will loop forever and the boot hangs.
3294          */
3295         for_each_active_iommu(iommu, drhd) {
3296                 iommu_flush_write_buffer(iommu);
3297                 iommu_set_root_entry(iommu);
3298                 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3299                 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3300         }
3301
3302         if (iommu_pass_through)
3303                 iommu_identity_mapping |= IDENTMAP_ALL;
3304
3305 #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
3306         iommu_identity_mapping |= IDENTMAP_GFX;
3307 #endif
3308
3309         if (iommu_identity_mapping) {
3310                 ret = si_domain_init(hw_pass_through);
3311                 if (ret)
3312                         goto free_iommu;
3313         }
3314
3315         check_tylersburg_isoch();
3316
3317         /*
3318          * If we copied translations from a previous kernel in the kdump
3319          * case, we can not assign the devices to domains now, as that
3320          * would eliminate the old mappings. So skip this part and defer
3321          * the assignment to device driver initialization time.
3322          */
3323         if (copied_tables)
3324                 goto domains_done;
3325
3326         /*
3327          * If pass through is not set or not enabled, setup context entries for
3328          * identity mappings for rmrr, gfx, and isa and may fall back to static
3329          * identity mapping if iommu_identity_mapping is set.
3330          */
3331         if (iommu_identity_mapping) {
3332                 ret = iommu_prepare_static_identity_mapping(hw_pass_through);
3333                 if (ret) {
3334                         pr_crit("Failed to setup IOMMU pass-through\n");
3335                         goto free_iommu;
3336                 }
3337         }
3338         /*
3339          * For each rmrr
3340          *   for each dev attached to rmrr
3341          *   do
3342          *     locate drhd for dev, alloc domain for dev
3343          *     allocate free domain
3344          *     allocate page table entries for rmrr
3345          *     if context not allocated for bus
3346          *           allocate and init context
3347          *           set present in root table for this bus
3348          *     init context with domain, translation etc
3349          *    endfor
3350          * endfor
3351          */
3352         pr_info("Setting RMRR:\n");
3353         for_each_rmrr_units(rmrr) {
3354                 /* some BIOS lists non-exist devices in DMAR table. */
3355                 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3356                                           i, dev) {
3357                         ret = iommu_prepare_rmrr_dev(rmrr, dev);
3358                         if (ret)
3359                                 pr_err("Mapping reserved region failed\n");
3360                 }
3361         }
3362
3363         iommu_prepare_isa();
3364
3365 domains_done:
3366
3367         /*
3368          * for each drhd
3369          *   enable fault log
3370          *   global invalidate context cache
3371          *   global invalidate iotlb
3372          *   enable translation
3373          */
3374         for_each_iommu(iommu, drhd) {
3375                 if (drhd->ignored) {
3376                         /*
3377                          * we always have to disable PMRs or DMA may fail on
3378                          * this device
3379                          */
3380                         if (force_on)
3381                                 iommu_disable_protect_mem_regions(iommu);
3382                         continue;
3383                 }
3384
3385                 iommu_flush_write_buffer(iommu);
3386
3387 #ifdef CONFIG_INTEL_IOMMU_SVM
3388                 if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
3389                         ret = intel_svm_enable_prq(iommu);
3390                         if (ret)
3391                                 goto free_iommu;
3392                 }
3393 #endif
3394                 ret = dmar_set_interrupt(iommu);
3395                 if (ret)
3396                         goto free_iommu;
3397
3398                 if (!translation_pre_enabled(iommu))
3399                         iommu_enable_translation(iommu);
3400
3401                 iommu_disable_protect_mem_regions(iommu);
3402         }
3403
3404         return 0;
3405
3406 free_iommu:
3407         for_each_active_iommu(iommu, drhd) {
3408                 disable_dmar_iommu(iommu);
3409                 free_dmar_iommu(iommu);
3410         }
3411 free_g_iommus:
3412         for_each_possible_cpu(cpu)
3413                 kfree(per_cpu_ptr(&deferred_flush, cpu)->tables);
3414         kfree(g_iommus);
3415 error:
3416         return ret;
3417 }
3418
3419 /* This takes a number of _MM_ pages, not VTD pages */
3420 static unsigned long intel_alloc_iova(struct device *dev,
3421                                      struct dmar_domain *domain,
3422                                      unsigned long nrpages, uint64_t dma_mask)
3423 {
3424         unsigned long iova_pfn = 0;
3425
3426         /* Restrict dma_mask to the width that the iommu can handle */
3427         dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
3428         /* Ensure we reserve the whole size-aligned region */
3429         nrpages = __roundup_pow_of_two(nrpages);
3430
3431         if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
3432                 /*
3433                  * First try to allocate an io virtual address in
3434                  * DMA_BIT_MASK(32) and if that fails then try allocating
3435                  * from higher range
3436                  */
3437                 iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
3438                                            IOVA_PFN(DMA_BIT_MASK(32)));
3439                 if (iova_pfn)
3440                         return iova_pfn;
3441         }
3442         iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, IOVA_PFN(dma_mask));
3443         if (unlikely(!iova_pfn)) {
3444                 pr_err("Allocating %ld-page iova for %s failed",
3445                        nrpages, dev_name(dev));
3446                 return 0;
3447         }
3448
3449         return iova_pfn;
3450 }
3451
3452 static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
3453 {
3454         struct dmar_domain *domain, *tmp;
3455         struct dmar_rmrr_unit *rmrr;
3456         struct device *i_dev;
3457         int i, ret;
3458
3459         domain = find_domain(dev);
3460         if (domain)
3461                 goto out;
3462
3463         domain = find_or_alloc_domain(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
3464         if (!domain)
3465                 goto out;
3466
3467         /* We have a new domain - setup possible RMRRs for the device */
3468         rcu_read_lock();
3469         for_each_rmrr_units(rmrr) {
3470                 for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
3471                                           i, i_dev) {
3472                         if (i_dev != dev)
3473                                 continue;
3474
3475                         ret = domain_prepare_identity_map(dev, domain,
3476                                                           rmrr->base_address,
3477                                                           rmrr->end_address);
3478                         if (ret)
3479                                 dev_err(dev, "Mapping reserved region failed\n");
3480                 }
3481         }
3482         rcu_read_unlock();
3483
3484         tmp = set_domain_for_dev(dev, domain);
3485         if (!tmp || domain != tmp) {
3486                 domain_exit(domain);
3487                 domain = tmp;
3488         }
3489
3490 out:
3491
3492         if (!domain)
3493                 pr_err("Allocating domain for %s failed\n", dev_name(dev));
3494
3495
3496         return domain;
3497 }
3498
3499 static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
3500 {
3501         struct device_domain_info *info;
3502
3503         /* No lock here, assumes no domain exit in normal case */
3504         info = dev->archdata.iommu;
3505         if (likely(info))
3506                 return info->domain;
3507
3508         return __get_valid_domain_for_dev(dev);
3509 }
3510
3511 /* Check if the dev needs to go through non-identity map and unmap process.*/
3512 static int iommu_no_mapping(struct device *dev)
3513 {
3514         int found;
3515
3516         if (iommu_dummy(dev))
3517                 return 1;
3518
3519         if (!iommu_identity_mapping)
3520                 return 0;
3521
3522         found = identity_mapping(dev);
3523         if (found) {
3524                 if (iommu_should_identity_map(dev, 0))
3525                         return 1;
3526                 else {
3527                         /*
3528                          * 32 bit DMA is removed from si_domain and fall back
3529                          * to non-identity mapping.
3530                          */
3531                         dmar_remove_one_dev_info(si_domain, dev);
3532                         pr_info("32bit %s uses non-identity mapping\n",
3533                                 dev_name(dev));
3534                         return 0;
3535                 }
3536         } else {
3537                 /*
3538                  * In case of a detached 64 bit DMA device from vm, the device
3539                  * is put into si_domain for identity mapping.
3540                  */
3541                 if (iommu_should_identity_map(dev, 0)) {
3542                         int ret;
3543                         ret = domain_add_dev_info(si_domain, dev);
3544                         if (!ret) {
3545                                 pr_info("64bit %s uses identity mapping\n",
3546                                         dev_name(dev));
3547                                 return 1;
3548                         }
3549                 }
3550         }
3551
3552         return 0;
3553 }
3554
3555 static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
3556                                      size_t size, int dir, u64 dma_mask)
3557 {
3558         struct dmar_domain *domain;
3559         phys_addr_t start_paddr;
3560         unsigned long iova_pfn;
3561         int prot = 0;
3562         int ret;
3563         struct intel_iommu *iommu;
3564         unsigned long paddr_pfn = paddr >> PAGE_SHIFT;
3565
3566         BUG_ON(dir == DMA_NONE);
3567
3568         if (iommu_no_mapping(dev))
3569                 return paddr;
3570
3571         domain = get_valid_domain_for_dev(dev);
3572         if (!domain)
3573                 return 0;
3574
3575         iommu = domain_get_iommu(domain);
3576         size = aligned_nrpages(paddr, size);
3577
3578         iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
3579         if (!iova_pfn)
3580                 goto error;
3581
3582         /*
3583          * Check if DMAR supports zero-length reads on write only
3584          * mappings..
3585          */
3586         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3587                         !cap_zlr(iommu->cap))
3588                 prot |= DMA_PTE_READ;
3589         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3590                 prot |= DMA_PTE_WRITE;
3591         /*
3592          * paddr - (paddr + size) might be partial page, we should map the whole
3593          * page.  Note: if two part of one page are separately mapped, we
3594          * might have two guest_addr mapping to the same host paddr, but this
3595          * is not a big problem
3596          */
3597         ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
3598                                  mm_to_dma_pfn(paddr_pfn), size, prot);
3599         if (ret)
3600                 goto error;
3601
3602         /* it's a non-present to present mapping. Only flush if caching mode */
3603         if (cap_caching_mode(iommu->cap))
3604                 iommu_flush_iotlb_psi(iommu, domain,
3605                                       mm_to_dma_pfn(iova_pfn),
3606                                       size, 0, 1);
3607         else
3608                 iommu_flush_write_buffer(iommu);
3609
3610         start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
3611         start_paddr += paddr & ~PAGE_MASK;
3612         return start_paddr;
3613
3614 error:
3615         if (iova_pfn)
3616                 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3617         pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
3618                 dev_name(dev), size, (unsigned long long)paddr, dir);
3619         return 0;
3620 }
3621
3622 static dma_addr_t intel_map_page(struct device *dev, struct page *page,
3623                                  unsigned long offset, size_t size,
3624                                  enum dma_data_direction dir,
3625                                  unsigned long attrs)
3626 {
3627         return __intel_map_single(dev, page_to_phys(page) + offset, size,
3628                                   dir, *dev->dma_mask);
3629 }
3630
3631 static void flush_unmaps(struct deferred_flush_data *flush_data)
3632 {
3633         int i, j;
3634
3635         flush_data->timer_on = 0;
3636
3637         /* just flush them all */
3638         for (i = 0; i < g_num_of_iommus; i++) {
3639                 struct intel_iommu *iommu = g_iommus[i];
3640                 struct deferred_flush_table *flush_table =
3641                                 &flush_data->tables[i];
3642                 if (!iommu)
3643                         continue;
3644
3645                 if (!flush_table->next)
3646                         continue;
3647
3648                 /* In caching mode, global flushes turn emulation expensive */
3649                 if (!cap_caching_mode(iommu->cap))
3650                         iommu->flush.flush_iotlb(iommu, 0, 0, 0,
3651                                          DMA_TLB_GLOBAL_FLUSH);
3652                 for (j = 0; j < flush_table->next; j++) {
3653                         unsigned long mask;
3654                         struct deferred_flush_entry *entry =
3655                                                 &flush_table->entries[j];
3656                         unsigned long iova_pfn = entry->iova_pfn;
3657                         unsigned long nrpages = entry->nrpages;
3658                         struct dmar_domain *domain = entry->domain;
3659                         struct page *freelist = entry->freelist;
3660
3661                         /* On real hardware multiple invalidations are expensive */
3662                         if (cap_caching_mode(iommu->cap))
3663                                 iommu_flush_iotlb_psi(iommu, domain,
3664                                         mm_to_dma_pfn(iova_pfn),
3665                                         nrpages, !freelist, 0);
3666                         else {
3667                                 mask = ilog2(nrpages);
3668                                 iommu_flush_dev_iotlb(domain,
3669                                                 (uint64_t)iova_pfn << PAGE_SHIFT, mask);
3670                         }
3671                         free_iova_fast(&domain->iovad, iova_pfn, nrpages);
3672                         if (freelist)
3673                                 dma_free_pagelist(freelist);
3674                 }
3675                 flush_table->next = 0;
3676         }
3677
3678         flush_data->size = 0;
3679 }
3680
3681 static void flush_unmaps_timeout(unsigned long cpuid)
3682 {
3683         struct deferred_flush_data *flush_data = per_cpu_ptr(&deferred_flush, cpuid);
3684         unsigned long flags;
3685
3686         spin_lock_irqsave(&flush_data->lock, flags);
3687         flush_unmaps(flush_data);
3688         spin_unlock_irqrestore(&flush_data->lock, flags);
3689 }
3690
3691 static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
3692                       unsigned long nrpages, struct page *freelist)
3693 {
3694         unsigned long flags;
3695         int entry_id, iommu_id;
3696         struct intel_iommu *iommu;
3697         struct deferred_flush_entry *entry;
3698         struct deferred_flush_data *flush_data;
3699
3700         flush_data = raw_cpu_ptr(&deferred_flush);
3701
3702         /* Flush all CPUs' entries to avoid deferring too much.  If
3703          * this becomes a bottleneck, can just flush us, and rely on
3704          * flush timer for the rest.
3705          */
3706         if (flush_data->size == HIGH_WATER_MARK) {
3707                 int cpu;
3708
3709                 for_each_online_cpu(cpu)
3710                         flush_unmaps_timeout(cpu);
3711         }
3712
3713         spin_lock_irqsave(&flush_data->lock, flags);
3714
3715         iommu = domain_get_iommu(dom);
3716         iommu_id = iommu->seq_id;
3717
3718         entry_id = flush_data->tables[iommu_id].next;
3719         ++(flush_data->tables[iommu_id].next);
3720
3721         entry = &flush_data->tables[iommu_id].entries[entry_id];
3722         entry->domain = dom;
3723         entry->iova_pfn = iova_pfn;
3724         entry->nrpages = nrpages;
3725         entry->freelist = freelist;
3726
3727         if (!flush_data->timer_on) {
3728                 mod_timer(&flush_data->timer, jiffies + msecs_to_jiffies(10));
3729                 flush_data->timer_on = 1;
3730         }
3731         flush_data->size++;
3732         spin_unlock_irqrestore(&flush_data->lock, flags);
3733 }
3734
3735 static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
3736 {
3737         struct dmar_domain *domain;
3738         unsigned long start_pfn, last_pfn;
3739         unsigned long nrpages;
3740         unsigned long iova_pfn;
3741         struct intel_iommu *iommu;
3742         struct page *freelist;
3743
3744         if (iommu_no_mapping(dev))
3745                 return;
3746
3747         domain = find_domain(dev);
3748         BUG_ON(!domain);
3749
3750         iommu = domain_get_iommu(domain);
3751
3752         iova_pfn = IOVA_PFN(dev_addr);
3753
3754         nrpages = aligned_nrpages(dev_addr, size);
3755         start_pfn = mm_to_dma_pfn(iova_pfn);
3756         last_pfn = start_pfn + nrpages - 1;
3757
3758         pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3759                  dev_name(dev), start_pfn, last_pfn);
3760
3761         freelist = domain_unmap(domain, start_pfn, last_pfn);
3762
3763         if (intel_iommu_strict) {
3764                 iommu_flush_iotlb_psi(iommu, domain, start_pfn,
3765                                       nrpages, !freelist, 0);
3766                 /* free iova */
3767                 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
3768                 dma_free_pagelist(freelist);
3769         } else {
3770                 add_unmap(domain, iova_pfn, nrpages, freelist);
3771                 /*
3772                  * queue up the release of the unmap to save the 1/6th of the
3773                  * cpu used up by the iotlb flush operation...
3774                  */
3775         }
3776 }
3777
3778 static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3779                              size_t size, enum dma_data_direction dir,
3780                              unsigned long attrs)
3781 {
3782         intel_unmap(dev, dev_addr, size);
3783 }
3784
3785 static void *intel_alloc_coherent(struct device *dev, size_t size,
3786                                   dma_addr_t *dma_handle, gfp_t flags,
3787                                   unsigned long attrs)
3788 {
3789         struct page *page = NULL;
3790         int order;
3791
3792         size = PAGE_ALIGN(size);
3793         order = get_order(size);
3794
3795         if (!iommu_no_mapping(dev))
3796                 flags &= ~(GFP_DMA | GFP_DMA32);
3797         else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3798                 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
3799                         flags |= GFP_DMA;
3800                 else
3801                         flags |= GFP_DMA32;
3802         }
3803
3804         if (gfpflags_allow_blocking(flags)) {
3805                 unsigned int count = size >> PAGE_SHIFT;
3806
3807                 page = dma_alloc_from_contiguous(dev, count, order);
3808                 if (page && iommu_no_mapping(dev) &&
3809                     page_to_phys(page) + size > dev->coherent_dma_mask) {
3810                         dma_release_from_contiguous(dev, page, count);
3811                         page = NULL;
3812                 }
3813         }
3814
3815         if (!page)
3816                 page = alloc_pages(flags, order);
3817         if (!page)
3818                 return NULL;
3819         memset(page_address(page), 0, size);
3820
3821         *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3822                                          DMA_BIDIRECTIONAL,
3823                                          dev->coherent_dma_mask);
3824         if (*dma_handle)
3825                 return page_address(page);
3826         if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3827                 __free_pages(page, order);
3828
3829         return NULL;
3830 }
3831
3832 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3833                                 dma_addr_t dma_handle, unsigned long attrs)
3834 {
3835         int order;
3836         struct page *page = virt_to_page(vaddr);
3837
3838         size = PAGE_ALIGN(size);
3839         order = get_order(size);
3840
3841         intel_unmap(dev, dma_handle, size);
3842         if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3843                 __free_pages(page, order);
3844 }
3845
3846 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3847                            int nelems, enum dma_data_direction dir,
3848                            unsigned long attrs)
3849 {
3850         dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
3851         unsigned long nrpages = 0;
3852         struct scatterlist *sg;
3853         int i;
3854
3855         for_each_sg(sglist, sg, nelems, i) {
3856                 nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
3857         }
3858
3859         intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
3860 }
3861
3862 static int intel_nontranslate_map_sg(struct device *hddev,
3863         struct scatterlist *sglist, int nelems, int dir)
3864 {
3865         int i;
3866         struct scatterlist *sg;
3867
3868         for_each_sg(sglist, sg, nelems, i) {
3869                 BUG_ON(!sg_page(sg));
3870                 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3871                 sg->dma_length = sg->length;
3872         }
3873         return nelems;
3874 }
3875
3876 static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3877                         enum dma_data_direction dir, unsigned long attrs)
3878 {
3879         int i;
3880         struct dmar_domain *domain;
3881         size_t size = 0;
3882         int prot = 0;
3883         unsigned long iova_pfn;
3884         int ret;
3885         struct scatterlist *sg;
3886         unsigned long start_vpfn;
3887         struct intel_iommu *iommu;
3888
3889         BUG_ON(dir == DMA_NONE);
3890         if (iommu_no_mapping(dev))
3891                 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
3892
3893         domain = get_valid_domain_for_dev(dev);
3894         if (!domain)
3895                 return 0;
3896
3897         iommu = domain_get_iommu(domain);
3898
3899         for_each_sg(sglist, sg, nelems, i)
3900                 size += aligned_nrpages(sg->offset, sg->length);
3901
3902         iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3903                                 *dev->dma_mask);
3904         if (!iova_pfn) {
3905                 sglist->dma_length = 0;
3906                 return 0;
3907         }
3908
3909         /*
3910          * Check if DMAR supports zero-length reads on write only
3911          * mappings..
3912          */
3913         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
3914                         !cap_zlr(iommu->cap))
3915                 prot |= DMA_PTE_READ;
3916         if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
3917                 prot |= DMA_PTE_WRITE;
3918
3919         start_vpfn = mm_to_dma_pfn(iova_pfn);
3920
3921         ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
3922         if (unlikely(ret)) {
3923                 dma_pte_free_pagetable(domain, start_vpfn,
3924                                        start_vpfn + size - 1);
3925                 free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
3926                 return 0;
3927         }
3928
3929         /* it's a non-present to present mapping. Only flush if caching mode */
3930         if (cap_caching_mode(iommu->cap))
3931                 iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1);
3932         else
3933                 iommu_flush_write_buffer(iommu);
3934
3935         return nelems;
3936 }
3937
3938 static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
3939 {
3940         return !dma_addr;
3941 }
3942
3943 struct dma_map_ops intel_dma_ops = {
3944         .alloc = intel_alloc_coherent,
3945         .free = intel_free_coherent,
3946         .map_sg = intel_map_sg,
3947         .unmap_sg = intel_unmap_sg,
3948         .map_page = intel_map_page,
3949         .unmap_page = intel_unmap_page,
3950         .mapping_error = intel_mapping_error,
3951 };
3952
3953 static inline int iommu_domain_cache_init(void)
3954 {
3955         int ret = 0;
3956
3957         iommu_domain_cache = kmem_cache_create("iommu_domain",
3958                                          sizeof(struct dmar_domain),
3959                                          0,
3960                                          SLAB_HWCACHE_ALIGN,
3961
3962                                          NULL);
3963         if (!iommu_domain_cache) {
3964                 pr_err("Couldn't create iommu_domain cache\n");
3965                 ret = -ENOMEM;
3966         }
3967
3968         return ret;
3969 }
3970
3971 static inline int iommu_devinfo_cache_init(void)
3972 {
3973         int ret = 0;
3974
3975         iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
3976                                          sizeof(struct device_domain_info),
3977                                          0,
3978                                          SLAB_HWCACHE_ALIGN,
3979                                          NULL);
3980         if (!iommu_devinfo_cache) {
3981                 pr_err("Couldn't create devinfo cache\n");
3982                 ret = -ENOMEM;
3983         }
3984
3985         return ret;
3986 }
3987
3988 static int __init iommu_init_mempool(void)
3989 {
3990         int ret;
3991         ret = iova_cache_get();
3992         if (ret)
3993                 return ret;
3994
3995         ret = iommu_domain_cache_init();
3996         if (ret)
3997                 goto domain_error;
3998
3999         ret = iommu_devinfo_cache_init();
4000         if (!ret)
4001                 return ret;
4002
4003         kmem_cache_destroy(iommu_domain_cache);
4004 domain_error:
4005         iova_cache_put();
4006
4007         return -ENOMEM;
4008 }
4009
4010 static void __init iommu_exit_mempool(void)
4011 {
4012         kmem_cache_destroy(iommu_devinfo_cache);
4013         kmem_cache_destroy(iommu_domain_cache);
4014         iova_cache_put();
4015 }
4016
4017 static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
4018 {
4019         struct dmar_drhd_unit *drhd;
4020         u32 vtbar;
4021         int rc;
4022
4023         /* We know that this device on this chipset has its own IOMMU.
4024          * If we find it under a different IOMMU, then the BIOS is lying
4025          * to us. Hope that the IOMMU for this device is actually
4026          * disabled, and it needs no translation...
4027          */
4028         rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
4029         if (rc) {
4030                 /* "can't" happen */
4031                 dev_info(&pdev->dev, "failed to run vt-d quirk\n");
4032                 return;
4033         }
4034         vtbar &= 0xffff0000;
4035
4036         /* we know that the this iommu should be at offset 0xa000 from vtbar */
4037         drhd = dmar_find_matched_drhd_unit(pdev);
4038         if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
4039                             TAINT_FIRMWARE_WORKAROUND,
4040                             "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
4041                 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
4042 }
4043 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
4044
4045 static void __init init_no_remapping_devices(void)
4046 {
4047         struct dmar_drhd_unit *drhd;
4048         struct device *dev;
4049         int i;
4050
4051         for_each_drhd_unit(drhd) {
4052                 if (!drhd->include_all) {
4053                         for_each_active_dev_scope(drhd->devices,
4054                                                   drhd->devices_cnt, i, dev)
4055                                 break;
4056                         /* ignore DMAR unit if no devices exist */
4057                         if (i == drhd->devices_cnt)
4058                                 drhd->ignored = 1;
4059                 }
4060         }
4061
4062         for_each_active_drhd_unit(drhd) {
4063                 if (drhd->include_all)
4064                         continue;
4065
4066                 for_each_active_dev_scope(drhd->devices,
4067                                           drhd->devices_cnt, i, dev)
4068                         if (!dev_is_pci(dev) || !IS_GFX_DEVICE(to_pci_dev(dev)))
4069                                 break;
4070                 if (i < drhd->devices_cnt)
4071                         continue;
4072
4073                 /* This IOMMU has *only* gfx devices. Either bypass it or
4074                    set the gfx_mapped flag, as appropriate */
4075                 if (dmar_map_gfx) {
4076                         intel_iommu_gfx_mapped = 1;
4077                 } else {
4078                         drhd->ignored = 1;
4079                         for_each_active_dev_scope(drhd->devices,
4080                                                   drhd->devices_cnt, i, dev)
4081                                 dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
4082                 }
4083         }
4084 }
4085
4086 #ifdef CONFIG_SUSPEND
4087 static int init_iommu_hw(void)
4088 {
4089         struct dmar_drhd_unit *drhd;
4090         struct intel_iommu *iommu = NULL;
4091
4092         for_each_active_iommu(iommu, drhd)
4093                 if (iommu->qi)
4094                         dmar_reenable_qi(iommu);
4095
4096         for_each_iommu(iommu, drhd) {
4097                 if (drhd->ignored) {
4098                         /*
4099                          * we always have to disable PMRs or DMA may fail on
4100                          * this device
4101                          */
4102                         if (force_on)
4103                                 iommu_disable_protect_mem_regions(iommu);
4104                         continue;
4105                 }
4106         
4107                 iommu_flush_write_buffer(iommu);
4108
4109                 iommu_set_root_entry(iommu);
4110
4111                 iommu->flush.flush_context(iommu, 0, 0, 0,
4112                                            DMA_CCMD_GLOBAL_INVL);
4113                 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4114                 iommu_enable_translation(iommu);
4115                 iommu_disable_protect_mem_regions(iommu);
4116         }
4117
4118         return 0;
4119 }
4120
4121 static void iommu_flush_all(void)
4122 {
4123         struct dmar_drhd_unit *drhd;
4124         struct intel_iommu *iommu;
4125
4126         for_each_active_iommu(iommu, drhd) {
4127                 iommu->flush.flush_context(iommu, 0, 0, 0,
4128                                            DMA_CCMD_GLOBAL_INVL);
4129                 iommu->flush.flush_iotlb(iommu, 0, 0, 0,
4130                                          DMA_TLB_GLOBAL_FLUSH);
4131         }
4132 }
4133
4134 static int iommu_suspend(void)
4135 {
4136         struct dmar_drhd_unit *drhd;
4137         struct intel_iommu *iommu = NULL;
4138         unsigned long flag;
4139
4140         for_each_active_iommu(iommu, drhd) {
4141                 iommu->iommu_state = kzalloc(sizeof(u32) * MAX_SR_DMAR_REGS,
4142                                                  GFP_ATOMIC);
4143                 if (!iommu->iommu_state)
4144                         goto nomem;
4145         }
4146
4147         iommu_flush_all();
4148
4149         for_each_active_iommu(iommu, drhd) {
4150                 iommu_disable_translation(iommu);
4151
4152                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
4153
4154                 iommu->iommu_state[SR_DMAR_FECTL_REG] =
4155                         readl(iommu->reg + DMAR_FECTL_REG);
4156                 iommu->iommu_state[SR_DMAR_FEDATA_REG] =
4157                         readl(iommu->reg + DMAR_FEDATA_REG);
4158                 iommu->iommu_state[SR_DMAR_FEADDR_REG] =
4159                         readl(iommu->reg + DMAR_FEADDR_REG);
4160                 iommu->iommu_state[SR_DMAR_FEUADDR_REG] =
4161                         readl(iommu->reg + DMAR_FEUADDR_REG);
4162
4163                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4164         }
4165         return 0;
4166
4167 nomem:
4168         for_each_active_iommu(iommu, drhd)
4169                 kfree(iommu->iommu_state);
4170
4171         return -ENOMEM;
4172 }
4173
4174 static void iommu_resume(void)
4175 {
4176         struct dmar_drhd_unit *drhd;
4177         struct intel_iommu *iommu = NULL;
4178         unsigned long flag;
4179
4180         if (init_iommu_hw()) {
4181                 if (force_on)
4182                         panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
4183                 else
4184                         WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
4185                 return;
4186         }
4187
4188         for_each_active_iommu(iommu, drhd) {
4189
4190                 raw_spin_lock_irqsave(&iommu->register_lock, flag);
4191
4192                 writel(iommu->iommu_state[SR_DMAR_FECTL_REG],
4193                         iommu->reg + DMAR_FECTL_REG);
4194                 writel(iommu->iommu_state[SR_DMAR_FEDATA_REG],
4195                         iommu->reg + DMAR_FEDATA_REG);
4196                 writel(iommu->iommu_state[SR_DMAR_FEADDR_REG],
4197                         iommu->reg + DMAR_FEADDR_REG);
4198                 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG],
4199                         iommu->reg + DMAR_FEUADDR_REG);
4200
4201                 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
4202         }
4203
4204         for_each_active_iommu(iommu, drhd)
4205                 kfree(iommu->iommu_state);
4206 }
4207
4208 static struct syscore_ops iommu_syscore_ops = {
4209         .resume         = iommu_resume,
4210         .suspend        = iommu_suspend,
4211 };
4212
4213 static void __init init_iommu_pm_ops(void)
4214 {
4215         register_syscore_ops(&iommu_syscore_ops);
4216 }
4217
4218 #else
4219 static inline void init_iommu_pm_ops(void) {}
4220 #endif  /* CONFIG_PM */
4221
4222
4223 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
4224 {
4225         struct acpi_dmar_reserved_memory *rmrr;
4226         struct dmar_rmrr_unit *rmrru;
4227
4228         rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
4229         if (!rmrru)
4230                 return -ENOMEM;
4231
4232         rmrru->hdr = header;
4233         rmrr = (struct acpi_dmar_reserved_memory *)header;
4234         rmrru->base_address = rmrr->base_address;
4235         rmrru->end_address = rmrr->end_address;
4236         rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
4237                                 ((void *)rmrr) + rmrr->header.length,
4238                                 &rmrru->devices_cnt);
4239         if (rmrru->devices_cnt && rmrru->devices == NULL) {
4240                 kfree(rmrru);
4241                 return -ENOMEM;
4242         }
4243
4244         list_add(&rmrru->list, &dmar_rmrr_units);
4245
4246         return 0;
4247 }
4248
4249 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
4250 {
4251         struct dmar_atsr_unit *atsru;
4252         struct acpi_dmar_atsr *tmp;
4253
4254         list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4255                 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
4256                 if (atsr->segment != tmp->segment)
4257                         continue;
4258                 if (atsr->header.length != tmp->header.length)
4259                         continue;
4260                 if (memcmp(atsr, tmp, atsr->header.length) == 0)
4261                         return atsru;
4262         }
4263
4264         return NULL;
4265 }
4266
4267 int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4268 {
4269         struct acpi_dmar_atsr *atsr;
4270         struct dmar_atsr_unit *atsru;
4271
4272         if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
4273                 return 0;
4274
4275         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4276         atsru = dmar_find_atsr(atsr);
4277         if (atsru)
4278                 return 0;
4279
4280         atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
4281         if (!atsru)
4282                 return -ENOMEM;
4283
4284         /*
4285          * If memory is allocated from slab by ACPI _DSM method, we need to
4286          * copy the memory content because the memory buffer will be freed
4287          * on return.
4288          */
4289         atsru->hdr = (void *)(atsru + 1);
4290         memcpy(atsru->hdr, hdr, hdr->length);
4291         atsru->include_all = atsr->flags & 0x1;
4292         if (!atsru->include_all) {
4293                 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
4294                                 (void *)atsr + atsr->header.length,
4295                                 &atsru->devices_cnt);
4296                 if (atsru->devices_cnt && atsru->devices == NULL) {
4297                         kfree(atsru);
4298                         return -ENOMEM;
4299                 }
4300         }
4301
4302         list_add_rcu(&atsru->list, &dmar_atsr_units);
4303
4304         return 0;
4305 }
4306
4307 static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
4308 {
4309         dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
4310         kfree(atsru);
4311 }
4312
4313 int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4314 {
4315         struct acpi_dmar_atsr *atsr;
4316         struct dmar_atsr_unit *atsru;
4317
4318         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4319         atsru = dmar_find_atsr(atsr);
4320         if (atsru) {
4321                 list_del_rcu(&atsru->list);
4322                 synchronize_rcu();
4323                 intel_iommu_free_atsr(atsru);
4324         }
4325
4326         return 0;
4327 }
4328
4329 int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
4330 {
4331         int i;
4332         struct device *dev;
4333         struct acpi_dmar_atsr *atsr;
4334         struct dmar_atsr_unit *atsru;
4335
4336         atsr = container_of(hdr, struct acpi_dmar_atsr, header);
4337         atsru = dmar_find_atsr(atsr);
4338         if (!atsru)
4339                 return 0;
4340
4341         if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
4342                 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
4343                                           i, dev)
4344                         return -EBUSY;
4345         }
4346
4347         return 0;
4348 }
4349
4350 static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
4351 {
4352         int sp, ret = 0;
4353         struct intel_iommu *iommu = dmaru->iommu;
4354
4355         if (g_iommus[iommu->seq_id])
4356                 return 0;
4357
4358         if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
4359                 pr_warn("%s: Doesn't support hardware pass through.\n",
4360                         iommu->name);
4361                 return -ENXIO;
4362         }
4363         if (!ecap_sc_support(iommu->ecap) &&
4364             domain_update_iommu_snooping(iommu)) {
4365                 pr_warn("%s: Doesn't support snooping.\n",
4366                         iommu->name);
4367                 return -ENXIO;
4368         }
4369         sp = domain_update_iommu_superpage(iommu) - 1;
4370         if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
4371                 pr_warn("%s: Doesn't support large page.\n",
4372                         iommu->name);
4373                 return -ENXIO;
4374         }
4375
4376         /*
4377          * Disable translation if already enabled prior to OS handover.
4378          */
4379         if (iommu->gcmd & DMA_GCMD_TE)
4380                 iommu_disable_translation(iommu);
4381
4382         g_iommus[iommu->seq_id] = iommu;
4383         ret = iommu_init_domains(iommu);
4384         if (ret == 0)
4385                 ret = iommu_alloc_root_entry(iommu);
4386         if (ret)
4387                 goto out;
4388
4389 #ifdef CONFIG_INTEL_IOMMU_SVM
4390         if (pasid_enabled(iommu))
4391                 intel_svm_alloc_pasid_tables(iommu);
4392 #endif
4393
4394         if (dmaru->ignored) {
4395                 /*
4396                  * we always have to disable PMRs or DMA may fail on this device
4397                  */
4398                 if (force_on)
4399                         iommu_disable_protect_mem_regions(iommu);
4400                 return 0;
4401         }
4402
4403         intel_iommu_init_qi(iommu);
4404         iommu_flush_write_buffer(iommu);
4405
4406 #ifdef CONFIG_INTEL_IOMMU_SVM
4407         if (pasid_enabled(iommu) && ecap_prs(iommu->ecap)) {
4408                 ret = intel_svm_enable_prq(iommu);
4409                 if (ret)
4410                         goto disable_iommu;
4411         }
4412 #endif
4413         ret = dmar_set_interrupt(iommu);
4414         if (ret)
4415                 goto disable_iommu;
4416
4417         iommu_set_root_entry(iommu);
4418         iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
4419         iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
4420         iommu_enable_translation(iommu);
4421
4422         iommu_disable_protect_mem_regions(iommu);
4423         return 0;
4424
4425 disable_iommu:
4426         disable_dmar_iommu(iommu);
4427 out:
4428         free_dmar_iommu(iommu);
4429         return ret;
4430 }
4431
4432 int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
4433 {
4434         int ret = 0;
4435         struct intel_iommu *iommu = dmaru->iommu;
4436
4437         if (!intel_iommu_enabled)
4438                 return 0;
4439         if (iommu == NULL)
4440                 return -EINVAL;
4441
4442         if (insert) {
4443                 ret = intel_iommu_add(dmaru);
4444         } else {
4445                 disable_dmar_iommu(iommu);
4446                 free_dmar_iommu(iommu);
4447         }
4448
4449         return ret;
4450 }
4451
4452 static void intel_iommu_free_dmars(void)
4453 {
4454         struct dmar_rmrr_unit *rmrru, *rmrr_n;
4455         struct dmar_atsr_unit *atsru, *atsr_n;
4456
4457         list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
4458                 list_del(&rmrru->list);
4459                 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
4460                 kfree(rmrru);
4461         }
4462
4463         list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
4464                 list_del(&atsru->list);
4465                 intel_iommu_free_atsr(atsru);
4466         }
4467 }
4468
4469 int dmar_find_matched_atsr_unit(struct pci_dev *dev)
4470 {
4471         int i, ret = 1;
4472         struct pci_bus *bus;
4473         struct pci_dev *bridge = NULL;
4474         struct device *tmp;
4475         struct acpi_dmar_atsr *atsr;
4476         struct dmar_atsr_unit *atsru;
4477
4478         dev = pci_physfn(dev);
4479         for (bus = dev->bus; bus; bus = bus->parent) {
4480                 bridge = bus->self;
4481                 /* If it's an integrated device, allow ATS */
4482                 if (!bridge)
4483                         return 1;
4484                 /* Connected via non-PCIe: no ATS */
4485                 if (!pci_is_pcie(bridge) ||
4486                     pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE)
4487                         return 0;
4488                 /* If we found the root port, look it up in the ATSR */
4489                 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
4490                         break;
4491         }
4492
4493         rcu_read_lock();
4494         list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
4495                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4496                 if (atsr->segment != pci_domain_nr(dev->bus))
4497                         continue;
4498
4499                 for_each_dev_scope(atsru->devices, atsru->devices_cnt, i, tmp)
4500                         if (tmp == &bridge->dev)
4501                                 goto out;
4502
4503                 if (atsru->include_all)
4504                         goto out;
4505         }
4506         ret = 0;
4507 out:
4508         rcu_read_unlock();
4509
4510         return ret;
4511 }
4512
4513 int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4514 {
4515         int ret = 0;
4516         struct dmar_rmrr_unit *rmrru;
4517         struct dmar_atsr_unit *atsru;
4518         struct acpi_dmar_atsr *atsr;
4519         struct acpi_dmar_reserved_memory *rmrr;
4520
4521         if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
4522                 return 0;
4523
4524         list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
4525                 rmrr = container_of(rmrru->hdr,
4526                                     struct acpi_dmar_reserved_memory, header);
4527                 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4528                         ret = dmar_insert_dev_scope(info, (void *)(rmrr + 1),
4529                                 ((void *)rmrr) + rmrr->header.length,
4530                                 rmrr->segment, rmrru->devices,
4531                                 rmrru->devices_cnt);
4532                         if(ret < 0)
4533                                 return ret;
4534                 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4535                         dmar_remove_dev_scope(info, rmrr->segment,
4536                                 rmrru->devices, rmrru->devices_cnt);
4537                 }
4538         }
4539
4540         list_for_each_entry(atsru, &dmar_atsr_units, list) {
4541                 if (atsru->include_all)
4542                         continue;
4543
4544                 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
4545                 if (info->event == BUS_NOTIFY_ADD_DEVICE) {
4546                         ret = dmar_insert_dev_scope(info, (void *)(atsr + 1),
4547                                         (void *)atsr + atsr->header.length,
4548                                         atsr->segment, atsru->devices,
4549                                         atsru->devices_cnt);
4550                         if (ret > 0)
4551                                 break;
4552                         else if(ret < 0)
4553                                 return ret;
4554                 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4555                         if (dmar_remove_dev_scope(info, atsr->segment,
4556                                         atsru->devices, atsru->devices_cnt))
4557                                 break;
4558                 }
4559         }
4560
4561         return 0;
4562 }
4563
4564 /*
4565  * Here we only respond to action of unbound device from driver.
4566  *
4567  * Added device is not attached to its DMAR domain here yet. That will happen
4568  * when mapping the device to iova.
4569  */
4570 static int device_notifier(struct notifier_block *nb,
4571                                   unsigned long action, void *data)
4572 {
4573         struct device *dev = data;
4574         struct dmar_domain *domain;
4575
4576         if (iommu_dummy(dev))
4577                 return 0;
4578
4579         if (action != BUS_NOTIFY_REMOVED_DEVICE)
4580                 return 0;
4581
4582         domain = find_domain(dev);
4583         if (!domain)
4584                 return 0;
4585
4586         dmar_remove_one_dev_info(domain, dev);
4587         if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices))
4588                 domain_exit(domain);
4589
4590         return 0;
4591 }
4592
4593 static struct notifier_block device_nb = {
4594         .notifier_call = device_notifier,
4595 };
4596
4597 static int intel_iommu_memory_notifier(struct notifier_block *nb,
4598                                        unsigned long val, void *v)
4599 {
4600         struct memory_notify *mhp = v;
4601         unsigned long long start, end;
4602         unsigned long start_vpfn, last_vpfn;
4603
4604         switch (val) {
4605         case MEM_GOING_ONLINE:
4606                 start = mhp->start_pfn << PAGE_SHIFT;
4607                 end = ((mhp->start_pfn + mhp->nr_pages) << PAGE_SHIFT) - 1;
4608                 if (iommu_domain_identity_map(si_domain, start, end)) {
4609                         pr_warn("Failed to build identity map for [%llx-%llx]\n",
4610                                 start, end);
4611                         return NOTIFY_BAD;
4612                 }
4613                 break;
4614
4615         case MEM_OFFLINE:
4616         case MEM_CANCEL_ONLINE:
4617                 start_vpfn = mm_to_dma_pfn(mhp->start_pfn);
4618                 last_vpfn = mm_to_dma_pfn(mhp->start_pfn + mhp->nr_pages - 1);
4619                 while (start_vpfn <= last_vpfn) {
4620                         struct iova *iova;
4621                         struct dmar_drhd_unit *drhd;
4622                         struct intel_iommu *iommu;
4623                         struct page *freelist;
4624
4625                         iova = find_iova(&si_domain->iovad, start_vpfn);
4626                         if (iova == NULL) {
4627                                 pr_debug("Failed get IOVA for PFN %lx\n",
4628                                          start_vpfn);
4629                                 break;
4630                         }
4631
4632                         iova = split_and_remove_iova(&si_domain->iovad, iova,
4633                                                      start_vpfn, last_vpfn);
4634                         if (iova == NULL) {
4635                                 pr_warn("Failed to split IOVA PFN [%lx-%lx]\n",
4636                                         start_vpfn, last_vpfn);
4637                                 return NOTIFY_BAD;
4638                         }
4639
4640                         freelist = domain_unmap(si_domain, iova->pfn_lo,
4641                                                iova->pfn_hi);
4642
4643                         rcu_read_lock();
4644                         for_each_active_iommu(iommu, drhd)
4645                                 iommu_flush_iotlb_psi(iommu, si_domain,
4646                                         iova->pfn_lo, iova_size(iova),
4647                                         !freelist, 0);
4648                         rcu_read_unlock();
4649                         dma_free_pagelist(freelist);
4650
4651                         start_vpfn = iova->pfn_hi + 1;
4652                         free_iova_mem(iova);
4653                 }
4654                 break;
4655         }
4656
4657         return NOTIFY_OK;
4658 }
4659
4660 static struct notifier_block intel_iommu_memory_nb = {
4661         .notifier_call = intel_iommu_memory_notifier,
4662         .priority = 0
4663 };
4664
4665 static void free_all_cpu_cached_iovas(unsigned int cpu)
4666 {
4667         int i;
4668
4669         for (i = 0; i < g_num_of_iommus; i++) {
4670                 struct intel_iommu *iommu = g_iommus[i];
4671                 struct dmar_domain *domain;
4672                 int did;
4673
4674                 if (!iommu)
4675                         continue;
4676
4677                 for (did = 0; did < cap_ndoms(iommu->cap); did++) {
4678                         domain = get_iommu_domain(iommu, (u16)did);
4679
4680                         if (!domain)
4681                                 continue;
4682                         free_cpu_cached_iovas(cpu, &domain->iovad);
4683                 }
4684         }
4685 }
4686
4687 static int intel_iommu_cpu_notifier(struct notifier_block *nfb,
4688                                     unsigned long action, void *v)
4689 {
4690         unsigned int cpu = (unsigned long)v;
4691
4692         switch (action) {
4693         case CPU_DEAD:
4694         case CPU_DEAD_FROZEN:
4695                 free_all_cpu_cached_iovas(cpu);
4696                 flush_unmaps_timeout(cpu);
4697                 break;
4698         }
4699         return NOTIFY_OK;
4700 }
4701
4702 static struct notifier_block intel_iommu_cpu_nb = {
4703         .notifier_call = intel_iommu_cpu_notifier,
4704 };
4705
4706 static ssize_t intel_iommu_show_version(struct device *dev,
4707                                         struct device_attribute *attr,
4708                                         char *buf)
4709 {
4710         struct intel_iommu *iommu = dev_get_drvdata(dev);
4711         u32 ver = readl(iommu->reg + DMAR_VER_REG);
4712         return sprintf(buf, "%d:%d\n",
4713                        DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver));
4714 }
4715 static DEVICE_ATTR(version, S_IRUGO, intel_iommu_show_version, NULL);
4716
4717 static ssize_t intel_iommu_show_address(struct device *dev,
4718                                         struct device_attribute *attr,
4719                                         char *buf)
4720 {
4721         struct intel_iommu *iommu = dev_get_drvdata(dev);
4722         return sprintf(buf, "%llx\n", iommu->reg_phys);
4723 }
4724 static DEVICE_ATTR(address, S_IRUGO, intel_iommu_show_address, NULL);
4725
4726 static ssize_t intel_iommu_show_cap(struct device *dev,
4727                                     struct device_attribute *attr,
4728                                     char *buf)
4729 {
4730         struct intel_iommu *iommu = dev_get_drvdata(dev);
4731         return sprintf(buf, "%llx\n", iommu->cap);
4732 }
4733 static DEVICE_ATTR(cap, S_IRUGO, intel_iommu_show_cap, NULL);
4734
4735 static ssize_t intel_iommu_show_ecap(struct device *dev,
4736                                     struct device_attribute *attr,
4737                                     char *buf)
4738 {
4739         struct intel_iommu *iommu = dev_get_drvdata(dev);
4740         return sprintf(buf, "%llx\n", iommu->ecap);
4741 }
4742 static DEVICE_ATTR(ecap, S_IRUGO, intel_iommu_show_ecap, NULL);
4743
4744 static ssize_t intel_iommu_show_ndoms(struct device *dev,
4745                                       struct device_attribute *attr,
4746                                       char *buf)
4747 {
4748         struct intel_iommu *iommu = dev_get_drvdata(dev);
4749         return sprintf(buf, "%ld\n", cap_ndoms(iommu->cap));
4750 }
4751 static DEVICE_ATTR(domains_supported, S_IRUGO, intel_iommu_show_ndoms, NULL);
4752
4753 static ssize_t intel_iommu_show_ndoms_used(struct device *dev,
4754                                            struct device_attribute *attr,
4755                                            char *buf)
4756 {
4757         struct intel_iommu *iommu = dev_get_drvdata(dev);
4758         return sprintf(buf, "%d\n", bitmap_weight(iommu->domain_ids,
4759                                                   cap_ndoms(iommu->cap)));
4760 }
4761 static DEVICE_ATTR(domains_used, S_IRUGO, intel_iommu_show_ndoms_used, NULL);
4762
4763 static struct attribute *intel_iommu_attrs[] = {
4764         &dev_attr_version.attr,
4765         &dev_attr_address.attr,
4766         &dev_attr_cap.attr,
4767         &dev_attr_ecap.attr,
4768         &dev_attr_domains_supported.attr,
4769         &dev_attr_domains_used.attr,
4770         NULL,
4771 };
4772
4773 static struct attribute_group intel_iommu_group = {
4774         .name = "intel-iommu",
4775         .attrs = intel_iommu_attrs,
4776 };
4777
4778 const struct attribute_group *intel_iommu_groups[] = {
4779         &intel_iommu_group,
4780         NULL,
4781 };
4782
4783 int __init intel_iommu_init(void)
4784 {
4785         int ret = -ENODEV;
4786         struct dmar_drhd_unit *drhd;
4787         struct intel_iommu *iommu;
4788
4789         /* VT-d is required for a TXT/tboot launch, so enforce that */
4790         force_on = tboot_force_iommu();
4791
4792         if (iommu_init_mempool()) {
4793                 if (force_on)
4794                         panic("tboot: Failed to initialize iommu memory\n");
4795                 return -ENOMEM;
4796         }
4797
4798         down_write(&dmar_global_lock);
4799         if (dmar_table_init()) {
4800                 if (force_on)
4801                         panic("tboot: Failed to initialize DMAR table\n");
4802                 goto out_free_dmar;
4803         }
4804
4805         if (dmar_dev_scope_init() < 0) {
4806                 if (force_on)
4807                         panic("tboot: Failed to initialize DMAR device scope\n");
4808                 goto out_free_dmar;
4809         }
4810
4811         if (no_iommu || dmar_disabled)
4812                 goto out_free_dmar;
4813
4814         if (list_empty(&dmar_rmrr_units))
4815                 pr_info("No RMRR found\n");
4816
4817         if (list_empty(&dmar_atsr_units))
4818                 pr_info("No ATSR found\n");
4819
4820         if (dmar_init_reserved_ranges()) {
4821                 if (force_on)
4822                         panic("tboot: Failed to reserve iommu ranges\n");
4823                 goto out_free_reserved_range;
4824         }
4825
4826         init_no_remapping_devices();
4827
4828         ret = init_dmars();
4829         if (ret) {
4830                 if (force_on)
4831                         panic("tboot: Failed to initialize DMARs\n");
4832                 pr_err("Initialization failed\n");
4833                 goto out_free_reserved_range;
4834         }
4835         up_write(&dmar_global_lock);
4836         pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
4837
4838 #ifdef CONFIG_SWIOTLB
4839         swiotlb = 0;
4840 #endif
4841         dma_ops = &intel_dma_ops;
4842
4843         init_iommu_pm_ops();
4844
4845         for_each_active_iommu(iommu, drhd)
4846                 iommu->iommu_dev = iommu_device_create(NULL, iommu,
4847                                                        intel_iommu_groups,
4848                                                        "%s", iommu->name);
4849
4850         bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
4851         bus_register_notifier(&pci_bus_type, &device_nb);
4852         if (si_domain && !hw_pass_through)
4853                 register_memory_notifier(&intel_iommu_memory_nb);
4854         register_hotcpu_notifier(&intel_iommu_cpu_nb);
4855
4856         intel_iommu_enabled = 1;
4857
4858         return 0;
4859
4860 out_free_reserved_range:
4861         put_iova_domain(&reserved_iova_list);
4862 out_free_dmar:
4863         intel_iommu_free_dmars();
4864         up_write(&dmar_global_lock);
4865         iommu_exit_mempool();
4866         return ret;
4867 }
4868
4869 static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4870 {
4871         struct intel_iommu *iommu = opaque;
4872
4873         domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4874         return 0;
4875 }
4876
4877 /*
4878  * NB - intel-iommu lacks any sort of reference counting for the users of
4879  * dependent devices.  If multiple endpoints have intersecting dependent
4880  * devices, unbinding the driver from any one of them will possibly leave
4881  * the others unable to operate.
4882  */
4883 static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
4884 {
4885         if (!iommu || !dev || !dev_is_pci(dev))
4886                 return;
4887
4888         pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
4889 }
4890
4891 static void __dmar_remove_one_dev_info(struct device_domain_info *info)
4892 {
4893         struct intel_iommu *iommu;
4894         unsigned long flags;
4895
4896         assert_spin_locked(&device_domain_lock);
4897
4898         if (WARN_ON(!info))
4899                 return;
4900
4901         iommu = info->iommu;
4902
4903         if (info->dev) {
4904                 iommu_disable_dev_iotlb(info);
4905                 domain_context_clear(iommu, info->dev);
4906         }
4907
4908         unlink_domain_info(info);
4909
4910         spin_lock_irqsave(&iommu->lock, flags);
4911         domain_detach_iommu(info->domain, iommu);
4912         spin_unlock_irqrestore(&iommu->lock, flags);
4913
4914         free_devinfo_mem(info);
4915 }
4916
4917 static void dmar_remove_one_dev_info(struct dmar_domain *domain,
4918                                      struct device *dev)
4919 {
4920         struct device_domain_info *info;
4921         unsigned long flags;
4922
4923         spin_lock_irqsave(&device_domain_lock, flags);
4924         info = dev->archdata.iommu;
4925         __dmar_remove_one_dev_info(info);
4926         spin_unlock_irqrestore(&device_domain_lock, flags);
4927 }
4928
4929 static int md_domain_init(struct dmar_domain *domain, int guest_width)
4930 {
4931         int adjust_width;
4932
4933         init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
4934                         DMA_32BIT_PFN);
4935         domain_reserve_special_ranges(domain);
4936
4937         /* calculate AGAW */
4938         domain->gaw = guest_width;
4939         adjust_width = guestwidth_to_adjustwidth(guest_width);
4940         domain->agaw = width_to_agaw(adjust_width);
4941
4942         domain->iommu_coherency = 0;
4943         domain->iommu_snooping = 0;
4944         domain->iommu_superpage = 0;
4945         domain->max_addr = 0;
4946
4947         /* always allocate the top pgd */
4948         domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
4949         if (!domain->pgd)
4950                 return -ENOMEM;
4951         domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
4952         return 0;
4953 }
4954
4955 static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
4956 {
4957         struct dmar_domain *dmar_domain;
4958         struct iommu_domain *domain;
4959
4960         if (type != IOMMU_DOMAIN_UNMANAGED)
4961                 return NULL;
4962
4963         dmar_domain = alloc_domain(DOMAIN_FLAG_VIRTUAL_MACHINE);
4964         if (!dmar_domain) {
4965                 pr_err("Can't allocate dmar_domain\n");
4966                 return NULL;
4967         }
4968         if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
4969                 pr_err("Domain initialization failed\n");
4970                 domain_exit(dmar_domain);
4971                 return NULL;
4972         }
4973         domain_update_iommu_cap(dmar_domain);
4974
4975         domain = &dmar_domain->domain;
4976         domain->geometry.aperture_start = 0;
4977         domain->geometry.aperture_end   = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
4978         domain->geometry.force_aperture = true;
4979
4980         return domain;
4981 }
4982
4983 static void intel_iommu_domain_free(struct iommu_domain *domain)
4984 {
4985         domain_exit(to_dmar_domain(domain));
4986 }
4987
4988 static int intel_iommu_attach_device(struct iommu_domain *domain,
4989                                      struct device *dev)
4990 {
4991         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
4992         struct intel_iommu *iommu;
4993         int addr_width;
4994         u8 bus, devfn;
4995
4996         if (device_is_rmrr_locked(dev)) {
4997                 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement.  Contact your platform vendor.\n");
4998                 return -EPERM;
4999         }
5000
5001         /* normally dev is not mapped */
5002         if (unlikely(domain_context_mapped(dev))) {
5003                 struct dmar_domain *old_domain;
5004
5005                 old_domain = find_domain(dev);
5006                 if (old_domain) {
5007                         rcu_read_lock();
5008                         dmar_remove_one_dev_info(old_domain, dev);
5009                         rcu_read_unlock();
5010
5011                         if (!domain_type_is_vm_or_si(old_domain) &&
5012                              list_empty(&old_domain->devices))
5013                                 domain_exit(old_domain);
5014                 }
5015         }
5016
5017         iommu = device_to_iommu(dev, &bus, &devfn);
5018         if (!iommu)
5019                 return -ENODEV;
5020
5021         /* check if this iommu agaw is sufficient for max mapped address */
5022         addr_width = agaw_to_width(iommu->agaw);
5023         if (addr_width > cap_mgaw(iommu->cap))
5024                 addr_width = cap_mgaw(iommu->cap);
5025
5026         if (dmar_domain->max_addr > (1LL << addr_width)) {
5027                 pr_err("%s: iommu width (%d) is not "
5028                        "sufficient for the mapped address (%llx)\n",
5029                        __func__, addr_width, dmar_domain->max_addr);
5030                 return -EFAULT;
5031         }
5032         dmar_domain->gaw = addr_width;
5033
5034         /*
5035          * Knock out extra levels of page tables if necessary
5036          */
5037         while (iommu->agaw < dmar_domain->agaw) {
5038                 struct dma_pte *pte;
5039
5040                 pte = dmar_domain->pgd;
5041                 if (dma_pte_present(pte)) {
5042                         dmar_domain->pgd = (struct dma_pte *)
5043                                 phys_to_virt(dma_pte_addr(pte));
5044                         free_pgtable_page(pte);
5045                 }
5046                 dmar_domain->agaw--;
5047         }
5048
5049         return domain_add_dev_info(dmar_domain, dev);
5050 }
5051
5052 static void intel_iommu_detach_device(struct iommu_domain *domain,
5053                                       struct device *dev)
5054 {
5055         dmar_remove_one_dev_info(to_dmar_domain(domain), dev);
5056 }
5057
5058 static int intel_iommu_map(struct iommu_domain *domain,
5059                            unsigned long iova, phys_addr_t hpa,
5060                            size_t size, int iommu_prot)
5061 {
5062         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5063         u64 max_addr;
5064         int prot = 0;
5065         int ret;
5066
5067         if (iommu_prot & IOMMU_READ)
5068                 prot |= DMA_PTE_READ;
5069         if (iommu_prot & IOMMU_WRITE)
5070                 prot |= DMA_PTE_WRITE;
5071         if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
5072                 prot |= DMA_PTE_SNP;
5073
5074         max_addr = iova + size;
5075         if (dmar_domain->max_addr < max_addr) {
5076                 u64 end;
5077
5078                 /* check if minimum agaw is sufficient for mapped address */
5079                 end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
5080                 if (end < max_addr) {
5081                         pr_err("%s: iommu width (%d) is not "
5082                                "sufficient for the mapped address (%llx)\n",
5083                                __func__, dmar_domain->gaw, max_addr);
5084                         return -EFAULT;
5085                 }
5086                 dmar_domain->max_addr = max_addr;
5087         }
5088         /* Round up size to next multiple of PAGE_SIZE, if it and
5089            the low bits of hpa would take us onto the next page */
5090         size = aligned_nrpages(hpa, size);
5091         ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
5092                                  hpa >> VTD_PAGE_SHIFT, size, prot);
5093         return ret;
5094 }
5095
5096 static size_t intel_iommu_unmap(struct iommu_domain *domain,
5097                                 unsigned long iova, size_t size)
5098 {
5099         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5100         struct page *freelist = NULL;
5101         struct intel_iommu *iommu;
5102         unsigned long start_pfn, last_pfn;
5103         unsigned int npages;
5104         int iommu_id, level = 0;
5105
5106         /* Cope with horrid API which requires us to unmap more than the
5107            size argument if it happens to be a large-page mapping. */
5108         BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
5109
5110         if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
5111                 size = VTD_PAGE_SIZE << level_to_offset_bits(level);
5112
5113         start_pfn = iova >> VTD_PAGE_SHIFT;
5114         last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
5115
5116         freelist = domain_unmap(dmar_domain, start_pfn, last_pfn);
5117
5118         npages = last_pfn - start_pfn + 1;
5119
5120         for_each_domain_iommu(iommu_id, dmar_domain) {
5121                 iommu = g_iommus[iommu_id];
5122
5123                 iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
5124                                       start_pfn, npages, !freelist, 0);
5125         }
5126
5127         dma_free_pagelist(freelist);
5128
5129         if (dmar_domain->max_addr == iova + size)
5130                 dmar_domain->max_addr = iova;
5131
5132         return size;
5133 }
5134
5135 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
5136                                             dma_addr_t iova)
5137 {
5138         struct dmar_domain *dmar_domain = to_dmar_domain(domain);
5139         struct dma_pte *pte;
5140         int level = 0;
5141         u64 phys = 0;
5142
5143         pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
5144         if (pte)
5145                 phys = dma_pte_addr(pte);
5146
5147         return phys;
5148 }
5149
5150 static bool intel_iommu_capable(enum iommu_cap cap)
5151 {
5152         if (cap == IOMMU_CAP_CACHE_COHERENCY)
5153                 return domain_update_iommu_snooping(NULL) == 1;
5154         if (cap == IOMMU_CAP_INTR_REMAP)
5155                 return irq_remapping_enabled == 1;
5156
5157         return false;
5158 }
5159
5160 static int intel_iommu_add_device(struct device *dev)
5161 {
5162         struct intel_iommu *iommu;
5163         struct iommu_group *group;
5164         u8 bus, devfn;
5165
5166         iommu = device_to_iommu(dev, &bus, &devfn);
5167         if (!iommu)
5168                 return -ENODEV;
5169
5170         iommu_device_link(iommu->iommu_dev, dev);
5171
5172         group = iommu_group_get_for_dev(dev);
5173
5174         if (IS_ERR(group))
5175                 return PTR_ERR(group);
5176
5177         iommu_group_put(group);
5178         return 0;
5179 }
5180
5181 static void intel_iommu_remove_device(struct device *dev)
5182 {
5183         struct intel_iommu *iommu;
5184         u8 bus, devfn;
5185
5186         iommu = device_to_iommu(dev, &bus, &devfn);
5187         if (!iommu)
5188                 return;
5189
5190         iommu_group_remove_device(dev);
5191
5192         iommu_device_unlink(iommu->iommu_dev, dev);
5193 }
5194
5195 #ifdef CONFIG_INTEL_IOMMU_SVM
5196 int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
5197 {
5198         struct device_domain_info *info;
5199         struct context_entry *context;
5200         struct dmar_domain *domain;
5201         unsigned long flags;
5202         u64 ctx_lo;
5203         int ret;
5204
5205         domain = get_valid_domain_for_dev(sdev->dev);
5206         if (!domain)
5207                 return -EINVAL;
5208
5209         spin_lock_irqsave(&device_domain_lock, flags);
5210         spin_lock(&iommu->lock);
5211
5212         ret = -EINVAL;
5213         info = sdev->dev->archdata.iommu;
5214         if (!info || !info->pasid_supported)
5215                 goto out;
5216
5217         context = iommu_context_addr(iommu, info->bus, info->devfn, 0);
5218         if (WARN_ON(!context))
5219                 goto out;
5220
5221         ctx_lo = context[0].lo;
5222
5223         sdev->did = domain->iommu_did[iommu->seq_id];
5224         sdev->sid = PCI_DEVID(info->bus, info->devfn);
5225
5226         if (!(ctx_lo & CONTEXT_PASIDE)) {
5227                 context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
5228                 context[1].lo = (u64)virt_to_phys(iommu->pasid_table) | ecap_pss(iommu->ecap);
5229                 wmb();
5230                 /* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
5231                  * extended to permit requests-with-PASID if the PASIDE bit
5232                  * is set. which makes sense. For CONTEXT_TT_PASS_THROUGH,
5233                  * however, the PASIDE bit is ignored and requests-with-PASID
5234                  * are unconditionally blocked. Which makes less sense.
5235                  * So convert from CONTEXT_TT_PASS_THROUGH to one of the new
5236                  * "guest mode" translation types depending on whether ATS
5237                  * is available or not. Annoyingly, we can't use the new
5238                  * modes *unless* PASIDE is set. */
5239                 if ((ctx_lo & CONTEXT_TT_MASK) == (CONTEXT_TT_PASS_THROUGH << 2)) {
5240                         ctx_lo &= ~CONTEXT_TT_MASK;
5241                         if (info->ats_supported)
5242                                 ctx_lo |= CONTEXT_TT_PT_PASID_DEV_IOTLB << 2;
5243                         else
5244                                 ctx_lo |= CONTEXT_TT_PT_PASID << 2;
5245                 }
5246                 ctx_lo |= CONTEXT_PASIDE;
5247                 if (iommu->pasid_state_table)
5248                         ctx_lo |= CONTEXT_DINVE;
5249                 if (info->pri_supported)
5250                         ctx_lo |= CONTEXT_PRS;
5251                 context[0].lo = ctx_lo;
5252                 wmb();
5253                 iommu->flush.flush_context(iommu, sdev->did, sdev->sid,
5254                                            DMA_CCMD_MASK_NOBIT,
5255                                            DMA_CCMD_DEVICE_INVL);
5256         }
5257
5258         /* Enable PASID support in the device, if it wasn't already */
5259         if (!info->pasid_enabled)
5260                 iommu_enable_dev_iotlb(info);
5261
5262         if (info->ats_enabled) {
5263                 sdev->dev_iotlb = 1;
5264                 sdev->qdep = info->ats_qdep;
5265                 if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
5266                         sdev->qdep = 0;
5267         }
5268         ret = 0;
5269
5270  out:
5271         spin_unlock(&iommu->lock);
5272         spin_unlock_irqrestore(&device_domain_lock, flags);
5273
5274         return ret;
5275 }
5276
5277 struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
5278 {
5279         struct intel_iommu *iommu;
5280         u8 bus, devfn;
5281
5282         if (iommu_dummy(dev)) {
5283                 dev_warn(dev,
5284                          "No IOMMU translation for device; cannot enable SVM\n");
5285                 return NULL;
5286         }
5287
5288         iommu = device_to_iommu(dev, &bus, &devfn);
5289         if ((!iommu)) {
5290                 dev_err(dev, "No IOMMU for device; cannot enable SVM\n");
5291                 return NULL;
5292         }
5293
5294         if (!iommu->pasid_table) {
5295                 dev_err(dev, "PASID not enabled on IOMMU; cannot enable SVM\n");
5296                 return NULL;
5297         }
5298
5299         return iommu;
5300 }
5301 #endif /* CONFIG_INTEL_IOMMU_SVM */
5302
5303 static const struct iommu_ops intel_iommu_ops = {
5304         .capable        = intel_iommu_capable,
5305         .domain_alloc   = intel_iommu_domain_alloc,
5306         .domain_free    = intel_iommu_domain_free,
5307         .attach_dev     = intel_iommu_attach_device,
5308         .detach_dev     = intel_iommu_detach_device,
5309         .map            = intel_iommu_map,
5310         .unmap          = intel_iommu_unmap,
5311         .map_sg         = default_iommu_map_sg,
5312         .iova_to_phys   = intel_iommu_iova_to_phys,
5313         .add_device     = intel_iommu_add_device,
5314         .remove_device  = intel_iommu_remove_device,
5315         .device_group   = pci_device_group,
5316         .pgsize_bitmap  = INTEL_IOMMU_PGSIZES,
5317 };
5318
5319 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
5320 {
5321         /* G4x/GM45 integrated gfx dmar support is totally busted. */
5322         pr_info("Disabling IOMMU for graphics on this chipset\n");
5323         dmar_map_gfx = 0;
5324 }
5325
5326 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_g4x_gfx);
5327 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_g4x_gfx);
5328 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_g4x_gfx);
5329 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_g4x_gfx);
5330 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_g4x_gfx);
5331 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_g4x_gfx);
5332 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_g4x_gfx);
5333
5334 static void quirk_iommu_rwbf(struct pci_dev *dev)
5335 {
5336         /*
5337          * Mobile 4 Series Chipset neglects to set RWBF capability,
5338          * but needs it. Same seems to hold for the desktop versions.
5339          */
5340         pr_info("Forcing write-buffer flush capability\n");
5341         rwbf_quirk = 1;
5342 }
5343
5344 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
5345 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
5346 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
5347 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
5348 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
5349 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
5350 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
5351
5352 #define GGC 0x52
5353 #define GGC_MEMORY_SIZE_MASK    (0xf << 8)
5354 #define GGC_MEMORY_SIZE_NONE    (0x0 << 8)
5355 #define GGC_MEMORY_SIZE_1M      (0x1 << 8)
5356 #define GGC_MEMORY_SIZE_2M      (0x3 << 8)
5357 #define GGC_MEMORY_VT_ENABLED   (0x8 << 8)
5358 #define GGC_MEMORY_SIZE_2M_VT   (0x9 << 8)
5359 #define GGC_MEMORY_SIZE_3M_VT   (0xa << 8)
5360 #define GGC_MEMORY_SIZE_4M_VT   (0xb << 8)
5361
5362 static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
5363 {
5364         unsigned short ggc;
5365
5366         if (pci_read_config_word(dev, GGC, &ggc))
5367                 return;
5368
5369         if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
5370                 pr_info("BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
5371                 dmar_map_gfx = 0;
5372         } else if (dmar_map_gfx) {
5373                 /* we have to ensure the gfx device is idle before we flush */
5374                 pr_info("Disabling batched IOTLB flush on Ironlake\n");
5375                 intel_iommu_strict = 1;
5376        }
5377 }
5378 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
5379 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt);
5380 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt);
5381 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt);
5382
5383 /* On Tylersburg chipsets, some BIOSes have been known to enable the
5384    ISOCH DMAR unit for the Azalia sound device, but not give it any
5385    TLB entries, which causes it to deadlock. Check for that.  We do
5386    this in a function called from init_dmars(), instead of in a PCI
5387    quirk, because we don't want to print the obnoxious "BIOS broken"
5388    message if VT-d is actually disabled.
5389 */
5390 static void __init check_tylersburg_isoch(void)
5391 {
5392         struct pci_dev *pdev;
5393         uint32_t vtisochctrl;
5394
5395         /* If there's no Azalia in the system anyway, forget it. */
5396         pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
5397         if (!pdev)
5398                 return;
5399         pci_dev_put(pdev);
5400
5401         /* System Management Registers. Might be hidden, in which case
5402            we can't do the sanity check. But that's OK, because the
5403            known-broken BIOSes _don't_ actually hide it, so far. */
5404         pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x342e, NULL);
5405         if (!pdev)
5406                 return;
5407
5408         if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
5409                 pci_dev_put(pdev);
5410                 return;
5411         }
5412
5413         pci_dev_put(pdev);
5414
5415         /* If Azalia DMA is routed to the non-isoch DMAR unit, fine. */
5416         if (vtisochctrl & 1)
5417                 return;
5418
5419         /* Drop all bits other than the number of TLB entries */
5420         vtisochctrl &= 0x1c;
5421
5422         /* If we have the recommended number of TLB entries (16), fine. */
5423         if (vtisochctrl == 0x10)
5424                 return;
5425
5426         /* Zero TLB entries? You get to ride the short bus to school. */
5427         if (!vtisochctrl) {
5428                 WARN(1, "Your BIOS is broken; DMA routed to ISOCH DMAR unit but no TLB space.\n"
5429                      "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
5430                      dmi_get_system_info(DMI_BIOS_VENDOR),
5431                      dmi_get_system_info(DMI_BIOS_VERSION),
5432                      dmi_get_system_info(DMI_PRODUCT_VERSION));
5433                 iommu_identity_mapping |= IDENTMAP_AZALIA;
5434                 return;
5435         }
5436
5437         pr_warn("Recommended TLB entries for ISOCH unit is 16; your BIOS set %d\n",
5438                vtisochctrl);
5439 }