2 * PCIe host controller driver for TEGRA SOCs
4 * Copyright (c) 2010, CompuLab, Ltd.
5 * Author: Mike Rapoport <mike@compulab.co.il>
7 * Based on NVIDIA PCIe driver
8 * Copyright (c) 2008-2014, NVIDIA Corporation. All rights reserved.
10 * Bits taken from arch/arm/mach-dove/pcie.c
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
27 #include <linux/kernel.h>
28 #include <linux/pci.h>
29 #include <linux/interrupt.h>
30 #include <linux/irq.h>
31 #include <linux/clk.h>
32 #include <linux/delay.h>
33 #include <linux/export.h>
34 #include <linux/clk/tegra.h>
35 #include <linux/msi.h>
36 #include <linux/slab.h>
37 #include <linux/platform_device.h>
38 #include <linux/regulator/consumer.h>
39 #include <linux/workqueue.h>
40 #include <linux/gpio.h>
41 #include <linux/clk.h>
42 #include <linux/clk/tegra.h>
43 #include <linux/async.h>
44 #include <linux/vmalloc.h>
45 #include <linux/pm_runtime.h>
46 #include <linux/tegra-powergate.h>
47 #include <linux/tegra-soc.h>
48 #include <linux/pci-tegra.h>
49 #include <linux/of_device.h>
50 #include <linux/of_gpio.h>
51 #include <linux/tegra_pm_domains.h>
53 #include <asm/sizes.h>
54 #include <asm/mach/pci.h>
57 #include <mach/tegra_usb_pad_ctrl.h>
58 #include <mach/io_dpd.h>
59 #include <mach/pinmux.h>
60 #include <mach/pinmux-t12.h>
62 /* register definitions */
63 #define AFI_OFFSET 0x3800
64 #define PADS_OFFSET 0x3000
65 #define RP_OFFSET 0x1000
67 #define AFI_AXI_BAR0_SZ 0x00
68 #define AFI_AXI_BAR1_SZ 0x04
69 #define AFI_AXI_BAR2_SZ 0x08
70 #define AFI_AXI_BAR3_SZ 0x0c
71 #define AFI_AXI_BAR4_SZ 0x10
72 #define AFI_AXI_BAR5_SZ 0x14
74 #define AFI_AXI_BAR0_START 0x18
75 #define AFI_AXI_BAR1_START 0x1c
76 #define AFI_AXI_BAR2_START 0x20
77 #define AFI_AXI_BAR3_START 0x24
78 #define AFI_AXI_BAR4_START 0x28
79 #define AFI_AXI_BAR5_START 0x2c
81 #define AFI_FPCI_BAR0 0x30
82 #define AFI_FPCI_BAR1 0x34
83 #define AFI_FPCI_BAR2 0x38
84 #define AFI_FPCI_BAR3 0x3c
85 #define AFI_FPCI_BAR4 0x40
86 #define AFI_FPCI_BAR5 0x44
88 #define AFI_CACHE_BAR0_SZ 0x48
89 #define AFI_CACHE_BAR0_ST 0x4c
90 #define AFI_CACHE_BAR1_SZ 0x50
91 #define AFI_CACHE_BAR1_ST 0x54
93 #define AFI_MSI_BAR_SZ 0x60
94 #define AFI_MSI_FPCI_BAR_ST 0x64
95 #define AFI_MSI_AXI_BAR_ST 0x68
97 #define AFI_MSI_VEC0_0 0x6c
98 #define AFI_MSI_VEC1_0 0x70
99 #define AFI_MSI_VEC2_0 0x74
100 #define AFI_MSI_VEC3_0 0x78
101 #define AFI_MSI_VEC4_0 0x7c
102 #define AFI_MSI_VEC5_0 0x80
103 #define AFI_MSI_VEC6_0 0x84
104 #define AFI_MSI_VEC7_0 0x88
106 #define AFI_MSI_EN_VEC0_0 0x8c
107 #define AFI_MSI_EN_VEC1_0 0x90
108 #define AFI_MSI_EN_VEC2_0 0x94
109 #define AFI_MSI_EN_VEC3_0 0x98
110 #define AFI_MSI_EN_VEC4_0 0x9c
111 #define AFI_MSI_EN_VEC5_0 0xa0
112 #define AFI_MSI_EN_VEC6_0 0xa4
113 #define AFI_MSI_EN_VEC7_0 0xa8
115 #define AFI_CONFIGURATION 0xac
116 #define AFI_CONFIGURATION_EN_FPCI (1 << 0)
118 #define AFI_FPCI_ERROR_MASKS 0xb0
120 #define AFI_INTR_MASK 0xb4
121 #define AFI_INTR_MASK_INT_MASK (1 << 0)
122 #define AFI_INTR_MASK_MSI_MASK (1 << 8)
124 #define AFI_INTR_CODE 0xb8
125 #define AFI_INTR_CODE_MASK 0x1f
126 #define AFI_INTR_MASTER_ABORT 4
127 #define AFI_INTR_LEGACY 6
128 #define AFI_INTR_PRSNT_SENSE 10
130 #define AFI_INTR_SIGNATURE 0xbc
131 #define AFI_SM_INTR_ENABLE 0xc4
133 #define AFI_AFI_INTR_ENABLE 0xc8
134 #define AFI_INTR_EN_INI_SLVERR (1 << 0)
135 #define AFI_INTR_EN_INI_DECERR (1 << 1)
136 #define AFI_INTR_EN_TGT_SLVERR (1 << 2)
137 #define AFI_INTR_EN_TGT_DECERR (1 << 3)
138 #define AFI_INTR_EN_TGT_WRERR (1 << 4)
139 #define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
140 #define AFI_INTR_EN_AXI_DECERR (1 << 6)
141 #define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
142 #define AFI_INTR_EN_PRSNT_SENSE (1 << 8)
144 #define AFI_PCIE_PME 0x0f0
145 #define AFI_PCIE_PME_TURN_OFF 0x101
146 #define AFI_PCIE_PME_ACK 0x420
148 #define AFI_PCIE_CONFIG 0x0f8
149 #define AFI_PCIE_CONFIG_PCIEC0_DISABLE_DEVICE (1 << 1)
150 #define AFI_PCIE_CONFIG_PCIEC1_DISABLE_DEVICE (1 << 2)
151 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
152 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20)
153 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20)
155 #define AFI_FUSE 0x104
156 #define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
158 #define AFI_PEX0_CTRL 0x110
159 #define AFI_PEX1_CTRL 0x118
160 #define AFI_PEX_CTRL_RST (1 << 0)
161 #define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
162 #define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
163 #define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4)
165 #define AFI_PLLE_CONTROL 0x160
166 #define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
167 #define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
169 #define AFI_PEXBIAS_CTRL_0 0x168
170 #define AFI_WR_SCRATCH_0 0x120
171 #define AFI_WR_SCRATCH_0_RESET_VAL 0x00202020
172 #define AFI_WR_SCRATCH_0_DEFAULT_VAL 0x00000000
174 #define AFI_MSG_0 0x190
175 #define AFI_MSG_PM_PME_MASK 0x00100010
176 #define AFI_MSG_INTX_MASK 0x1f001f00
177 #define AFI_MSG_PM_PME0 (1 << 4)
179 #define RP_VEND_XP 0x00000F00
180 #define RP_VEND_XP_DL_UP (1 << 30)
182 #define RP_LINK_CONTROL_STATUS 0x00000090
184 #define PADS_REFCLK_CFG0 0x000000C8
185 #define PADS_REFCLK_CFG1 0x000000CC
186 #define PADS_REFCLK_BIAS 0x000000D0
188 #define NV_PCIE2_RP_RSR 0x000000A0
189 #define NV_PCIE2_RP_RSR_PMESTAT (1 << 16)
191 #define NV_PCIE2_RP_INTR_BCR 0x0000003C
192 #define NV_PCIE2_RP_INTR_BCR_INTR_LINE (0xFF << 0)
194 #define NV_PCIE2_RP_PRIV_MISC 0x00000FE0
195 #define PCIE2_RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xE << 0)
196 #define PCIE2_RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xF << 0)
197 #define PCIE2_RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD (0xF << 16)
198 #define PCIE2_RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE (1 << 23)
199 #define PCIE2_RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD (0xF << 24)
200 #define PCIE2_RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE (1 << 31)
202 #define NV_PCIE2_RP_VEND_XP1 0x00000F04
203 #define NV_PCIE2_RP_VEND_XP1_LINK_PVT_CTL_L1_ASPM_SUPPORT (1 << 21)
205 #define NV_PCIE2_RP_VEND_CTL1 0x00000F48
206 #define PCIE2_RP_VEND_CTL1_ERPT (1 << 13)
208 #define NV_PCIE2_RP_VEND_XP_BIST 0x00000F4C
209 #define PCIE2_RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE (1 << 28)
211 #define NV_PCIE2_RP_ECTL_1_R2 0x00000FD8
212 #define PCIE2_RP_ECTL_1_R2_TX_CMADJ_1C (0xD << 8)
213 #define PCIE2_RP_ECTL_1_R2_TX_DRV_CNTL_1C (0x3 << 28)
216 #define TEGRA_PCIE_MSELECT_CLK_204 204000000
217 #define TEGRA_PCIE_MSELECT_CLK_408 408000000
218 #define TEGRA_PCIE_XCLK_500 500000000
219 #define TEGRA_PCIE_XCLK_250 250000000
222 * AXI address map for the PCIe aperture , defines 1GB in the AXI
223 * address map for PCIe.
225 * That address space is split into different regions, with sizes and
226 * offsets as follows. Except for the Register space, SW is free to slice the
227 * regions as it chooces.
229 * The split below seems to work fine for now.
231 * 0x0100_0000 to 0x01ff_ffff - Register space 16MB.
232 * 0x0200_0000 to 0x11ff_ffff - Config space 256MB.
233 * 0x1200_0000 to 0x1200_ffff - Downstream IO space
234 * ... Will be filled with other BARS like MSI/upstream IO etc.
235 * 0x1210_0000 to 0x320f_ffff - Prefetchable memory aperture
236 * 0x3210_0000 to 0x3fff_ffff - non-prefetchable memory aperture
238 #define TEGRA_PCIE_BASE 0x01000000
240 #define PCIE_REGS_SZ SZ_16M
241 #define PCIE_CFG_OFF (TEGRA_PCIE_BASE + PCIE_REGS_SZ)
242 #define PCIE_CFG_SZ SZ_256M
243 /* During the boot only registers/config and extended config apertures are
244 * mapped. Rest are mapped on demand by the PCI device drivers.
246 #define MMIO_BASE (PCIE_CFG_OFF + PCIE_CFG_SZ)
247 #define MMIO_SIZE SZ_64K
248 #define PREFETCH_MEM_BASE_0 (MMIO_BASE + SZ_1M)
249 #define PREFETCH_MEM_SIZE_0 SZ_512M
250 #define MEM_BASE_0 (PREFETCH_MEM_BASE_0 + PREFETCH_MEM_SIZE_0)
251 #define MEM_SIZE_0 (SZ_1G - MEM_BASE_0)
256 #define PR_FUNC_LINE pr_info("PCIE: %s(%d)\n", __func__, __LINE__)
258 #define PR_FUNC_LINE do {} while (0)
261 struct tegra_pcie_port {
268 struct tegra_pcie_info {
269 struct tegra_pcie_port port[MAX_PCIE_SUPPORTED_PORTS];
272 int power_rails_enabled;
273 int pcie_power_enabled;
274 struct work_struct hotplug_detect;
276 struct regulator *regulator_hvdd;
277 struct regulator *regulator_pexio;
278 struct regulator *regulator_avdd_plle;
279 struct clk *pcie_xclk;
280 struct clk *pcie_mselect;
282 struct tegra_pci_platform_data *plat_data;
283 struct list_head busses;
286 struct tegra_pcie_bus {
287 struct vm_struct *area;
288 struct list_head list;
292 static struct resource pcie_mem_space;
293 static struct resource pcie_prefetch_mem_space;
295 /* this flag enables features required either after boot or resume */
296 static bool resume_path;
297 /* used to avoid successive hotplug disconnect or connect */
298 static bool hotplug_event;
299 /* pcie mselect & xclk rate */
300 static unsigned long tegra_pcie_mselect_rate = TEGRA_PCIE_MSELECT_CLK_204;
301 static unsigned long tegra_pcie_xclk_rate = TEGRA_PCIE_XCLK_250;
303 static inline void afi_writel(u32 value, unsigned long offset)
305 writel(value, offset + AFI_OFFSET + tegra_pcie.regs);
308 static inline u32 afi_readl(unsigned long offset)
310 return readl(offset + AFI_OFFSET + tegra_pcie.regs);
313 /* Array of PCIe Controller Register offsets */
314 static u32 pex_controller_registers[] = {
319 static inline void pads_writel(u32 value, unsigned long offset)
321 writel(value, offset + PADS_OFFSET + tegra_pcie.regs);
324 static inline u32 pads_readl(unsigned long offset)
326 return readl(offset + PADS_OFFSET + tegra_pcie.regs);
329 static inline void rp_writel(u32 value, unsigned long offset, int rp)
331 BUG_ON(rp != 0 && rp != 1 && rp != 2);
332 offset += rp * (0x1UL << (rp - 1)) * RP_OFFSET;
333 writel(value, offset + tegra_pcie.regs);
336 static inline unsigned int rp_readl(unsigned long offset, int rp)
338 BUG_ON(rp != 0 && rp != 1 && rp != 2);
339 offset += rp * (0x1UL << (rp - 1)) * RP_OFFSET;
340 return readl(offset + tegra_pcie.regs);
343 static struct tegra_pcie_port *bus_to_port(int bus)
347 for (i = tegra_pcie.num_ports - 1; i >= 0; i--) {
348 int rbus = tegra_pcie.port[i].root_bus_nr;
349 if (rbus != -1 && rbus == bus)
353 return i >= 0 ? tegra_pcie.port + i : NULL;
357 * The configuration space mapping on Tegra is somewhat similar to the ECAM
358 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
359 * register accesses are mapped:
361 * [27:24] extended register number
363 * [15:11] device number
364 * [10: 8] function number
365 * [ 7: 0] register number
367 * Mapping the whole extended configuration space would required 256 MiB of
368 * virtual address space, only a small part of which will actually be used.
369 * To work around this, a 1 MiB of virtual addresses are allocated per bus
370 * when the bus is first accessed. When the physical range is mapped, the
371 * the bus number bits are hidden so that the extended register number bits
372 * appear as bits [19:16]. Therefore the virtual mapping looks like this:
374 * [19:16] extended register number
375 * [15:11] device number
376 * [10: 8] function number
377 * [ 7: 0] register number
379 * This is achieved by stitching together 16 chunks of 64 KiB of physical
380 * address space via the MMU.
382 static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
385 return ((where & 0xf00) << 8) | (PCI_SLOT(devfn) << 11) |
386 (PCI_FUNC(devfn) << 8) | (where & 0xfc);
389 static struct tegra_pcie_bus *tegra_pcie_bus_alloc(unsigned int busnr)
391 phys_addr_t cs = (phys_addr_t)PCIE_CFG_OFF;
392 struct tegra_pcie_bus *bus;
396 pgprot_t prot = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN |
397 L_PTE_MT_DEV_SHARED | L_PTE_SHARED;
399 pgprot_t prot = PTE_PRESENT | PTE_YOUNG | PTE_DIRTY | PTE_XN |
400 PTE_SHARED | PTE_TYPE_PAGE;
401 (void)pgprot_dmacoherent(prot); /* L_PTE_MT_DEV_SHARED */
405 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
407 return ERR_PTR(-ENOMEM);
409 INIT_LIST_HEAD(&bus->list);
412 /* allocate 1 MiB of virtual addresses */
413 bus->area = get_vm_area(SZ_1M, VM_IOREMAP);
419 /* map each of the 16 chunks of 64 KiB each.
421 * Note that each chunk still needs to increment by 16 MiB in
424 for (i = 0; i < 16; i++) {
425 unsigned long virt = (unsigned long)bus->area->addr +
427 phys_addr_t phys = cs + i * SZ_16M + busnr * SZ_64K;
429 err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
431 dev_err(tegra_pcie.dev, "ioremap_page_range() failed: %d\n",
439 vunmap(bus->area->addr);
446 * Look up a virtual address mapping for the specified bus number.
447 * If no such mapping existis, try to create one.
449 static void __iomem *tegra_pcie_bus_map(unsigned int busnr)
451 struct tegra_pcie_bus *bus;
453 list_for_each_entry(bus, &tegra_pcie.busses, list)
454 if (bus->nr == busnr)
455 return bus->area->addr;
457 bus = tegra_pcie_bus_alloc(busnr);
461 list_add_tail(&bus->list, &tegra_pcie.busses);
463 return bus->area->addr;
466 int tegra_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
467 int where, int size, u32 *val)
469 struct tegra_pcie_port *pp = bus_to_port(bus->number);
475 return PCIBIOS_DEVICE_NOT_FOUND;
477 addr = pp->base + (where & ~0x3);
479 addr = tegra_pcie_bus_map(bus->number);
481 dev_err(tegra_pcie.dev,
482 "failed to map cfg. space for bus %u\n",
485 return PCIBIOS_DEVICE_NOT_FOUND;
487 addr += tegra_pcie_conf_offset(devfn, where);
493 *val = (*val >> (8 * (where & 3))) & 0xff;
495 *val = (*val >> (8 * (where & 3))) & 0xffff;
497 return PCIBIOS_SUCCESSFUL;
499 EXPORT_SYMBOL(tegra_pcie_read_conf);
501 static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
502 int where, int size, u32 val)
504 struct tegra_pcie_port *pp = bus_to_port(bus->number);
510 /* pcie core is supposed to enable bus mastering and io/mem responses
511 * if its not setting then enable corresponding bits in pci_command
513 if (where == PCI_COMMAND) {
514 if (!(val & PCI_COMMAND_IO))
515 val |= PCI_COMMAND_IO;
516 if (!(val & PCI_COMMAND_MEMORY))
517 val |= PCI_COMMAND_MEMORY;
518 if (!(val & PCI_COMMAND_MASTER))
519 val |= PCI_COMMAND_MASTER;
520 if (!(val & PCI_COMMAND_SERR))
521 val |= PCI_COMMAND_SERR;
526 return PCIBIOS_DEVICE_NOT_FOUND;
528 addr = pp->base + (where & ~0x3);
530 addr = tegra_pcie_bus_map(bus->number);
532 dev_err(tegra_pcie.dev,
533 "failed to map cfg. space for bus %u\n",
535 return PCIBIOS_DEVICE_NOT_FOUND;
537 addr += tegra_pcie_conf_offset(devfn, where);
542 return PCIBIOS_SUCCESSFUL;
546 mask = ~(0xffff << ((where & 0x3) * 8));
548 mask = ~(0xff << ((where & 0x3) * 8));
550 return PCIBIOS_BAD_REGISTER_NUMBER;
552 tmp = readl(addr) & mask;
553 tmp |= val << ((where & 0x3) * 8);
556 return PCIBIOS_SUCCESSFUL;
559 static struct pci_ops tegra_pcie_ops = {
560 .read = tegra_pcie_read_conf,
561 .write = tegra_pcie_write_conf,
564 static void tegra_pcie_fixup_bridge(struct pci_dev *dev)
568 if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) {
569 pci_read_config_word(dev, PCI_COMMAND, ®);
570 reg |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
571 PCI_COMMAND_MASTER | PCI_COMMAND_SERR);
572 pci_write_config_word(dev, PCI_COMMAND, reg);
575 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_fixup_bridge);
577 /* Tegra PCIE root complex wrongly reports device class */
578 static void tegra_pcie_fixup_class(struct pci_dev *dev)
580 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
583 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
584 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
586 /* Tegra PCIE requires relaxed ordering */
587 static void tegra_pcie_relax_enable(struct pci_dev *dev)
589 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
591 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
593 static void tegra_pcie_preinit(void)
596 pcie_mem_space.name = "PCIe MEM Space";
597 pcie_mem_space.start = MEM_BASE_0;
598 pcie_mem_space.end = MEM_BASE_0 + MEM_SIZE_0 - 1;
599 pcie_mem_space.flags = IORESOURCE_MEM;
600 if (request_resource(&iomem_resource, &pcie_mem_space))
601 panic("can't allocate PCIe MEM space");
603 pcie_prefetch_mem_space.name = "PCIe PREFETCH MEM Space";
604 pcie_prefetch_mem_space.start = PREFETCH_MEM_BASE_0;
605 pcie_prefetch_mem_space.end = (PREFETCH_MEM_BASE_0
606 + PREFETCH_MEM_SIZE_0 - 1);
607 pcie_prefetch_mem_space.flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
608 if (request_resource(&iomem_resource, &pcie_prefetch_mem_space))
609 panic("can't allocate PCIe PREFETCH MEM space");
613 static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
615 struct tegra_pcie_port *pp;
618 if (nr >= tegra_pcie.num_ports)
621 pp = tegra_pcie.port + nr;
622 pp->root_bus_nr = sys->busnr;
624 pci_ioremap_io(nr * MMIO_SIZE, MMIO_BASE);
625 pci_add_resource_offset(
626 &sys->resources, &pcie_mem_space, sys->mem_offset);
627 pci_add_resource_offset(
628 &sys->resources, &pcie_prefetch_mem_space, sys->mem_offset);
633 static int tegra_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
635 return INT_PCIE_INTR;
638 static struct pci_bus *__init tegra_pcie_scan_bus(int nr,
639 struct pci_sys_data *sys)
641 struct tegra_pcie_port *pp;
644 if (nr >= tegra_pcie.num_ports)
647 pp = tegra_pcie.port + nr;
648 pp->root_bus_nr = sys->busnr;
650 return pci_scan_root_bus(NULL, sys->busnr, &tegra_pcie_ops, sys,
654 static struct hw_pci __initdata tegra_pcie_hw = {
655 .nr_controllers = MAX_PCIE_SUPPORTED_PORTS,
656 .preinit = tegra_pcie_preinit,
657 .setup = tegra_pcie_setup,
658 .scan = tegra_pcie_scan_bus,
659 .map_irq = tegra_pcie_map_irq,
662 #ifdef HOTPLUG_ON_SYSTEM_BOOT
663 /* It enumerates the devices when dock is connected after system boot */
664 /* this is similar to pcibios_init_hw in bios32.c */
665 static void __init tegra_pcie_hotplug_init(void)
667 struct pci_sys_data *sys = NULL;
670 if (is_dock_conn_at_boot)
674 tegra_pcie_preinit();
675 for (nr = 0; nr < tegra_pcie_hw.nr_controllers; nr++) {
676 sys = kzalloc(sizeof(struct pci_sys_data), GFP_KERNEL);
678 panic("PCI: unable to allocate sys data!");
680 #ifdef CONFIG_PCI_DOMAINS
681 sys->domain = tegra_pcie_hw.domain;
684 sys->swizzle = tegra_pcie_hw.swizzle;
685 sys->map_irq = tegra_pcie_hw.map_irq;
686 INIT_LIST_HEAD(&sys->resources);
688 ret = tegra_pcie_setup(nr, sys);
690 if (list_empty(&sys->resources)) {
691 pci_add_resource_offset(&sys->resources,
692 &ioport_resource, sys->io_offset);
693 pci_add_resource_offset(&sys->resources,
694 &iomem_resource, sys->mem_offset);
696 pci_create_root_bus(NULL, nr, &tegra_pcie_ops,
697 sys, &sys->resources);
700 is_dock_conn_at_boot = true;
704 static void tegra_pcie_enable_aer(int index, bool enable)
709 data = rp_readl(NV_PCIE2_RP_VEND_CTL1, index);
711 data |= PCIE2_RP_VEND_CTL1_ERPT;
713 data &= ~PCIE2_RP_VEND_CTL1_ERPT;
714 rp_writel(data, NV_PCIE2_RP_VEND_CTL1, index);
717 static int tegra_pcie_attach(void)
719 struct pci_bus *bus = NULL;
725 /* rescan and recreate all pcie data structures */
726 while ((bus = pci_find_next_bus(bus)) != NULL)
728 /* unhide AER capability */
729 tegra_pcie_enable_aer(0, true);
731 hotplug_event = false;
735 static int tegra_pcie_detach(void)
737 struct pci_dev *pdev = NULL;
742 hotplug_event = true;
744 /* hide AER capability to avoid log spew */
745 tegra_pcie_enable_aer(0, false);
746 /* remove all pcie data structures */
747 for_each_pci_dev(pdev) {
748 pci_stop_and_remove_bus_device(pdev);
754 static void tegra_pcie_prsnt_map_override(int index, bool prsnt)
759 /* currently only hotplug on root port 0 supported */
760 data = rp_readl(NV_PCIE2_RP_PRIV_MISC, index);
761 data &= ~PCIE2_RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
763 data |= PCIE2_RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
765 data |= PCIE2_RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
766 rp_writel(data, NV_PCIE2_RP_PRIV_MISC, index);
769 static void work_hotplug_handler(struct work_struct *work)
771 struct tegra_pcie_info *pcie_driver =
772 container_of(work, struct tegra_pcie_info, hotplug_detect);
776 if (pcie_driver->plat_data->gpio_hot_plug == -1)
778 val = gpio_get_value(pcie_driver->plat_data->gpio_hot_plug);
780 pr_info("PCIE Hotplug: Connected\n");
783 pr_info("PCIE Hotplug: DisConnected\n");
788 static irqreturn_t gpio_pcie_detect_isr(int irq, void *arg)
791 schedule_work(&tegra_pcie.hotplug_detect);
795 static void notify_device_isr(u32 mesg)
797 pr_debug(KERN_INFO "Legacy INTx interrupt occurred %x\n", mesg);
798 /* TODO: Need to call pcie device isr instead of ignoring interrupt */
799 /* same comment applies to below handler also */
802 static void handle_sb_intr(void)
807 mesg = afi_readl(AFI_MSG_0);
809 if (mesg & AFI_MSG_INTX_MASK)
810 /* notify device isr for INTx messages from pcie devices */
811 notify_device_isr(mesg);
812 else if (mesg & AFI_MSG_PM_PME_MASK) {
814 /* handle PME messages */
815 idx = (mesg & AFI_MSG_PM_PME0) ? 0 : 1;
816 mesg = rp_readl(NV_PCIE2_RP_RSR, idx);
817 mesg |= NV_PCIE2_RP_RSR_PMESTAT;
818 rp_writel(mesg, NV_PCIE2_RP_RSR, idx);
820 afi_writel(mesg, AFI_MSG_0);
823 static irqreturn_t tegra_pcie_isr(int irq, void *arg)
825 const char *err_msg[] = {
833 "Response decoding error",
834 "AXI response decoding error",
835 "Transcation timeout",
837 "Slot Clock request change",
838 "TMS Clock clamp change",
840 "Peer to Peer error",
846 if (!tegra_pcie.regs) {
847 pr_info("PCIE: PCI/AFI registers are unmapped\n");
850 code = afi_readl(AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
851 signature = afi_readl(AFI_INTR_SIGNATURE);
853 if (code == AFI_INTR_LEGACY)
856 afi_writel(0, AFI_INTR_CODE);
858 if (code >= ARRAY_SIZE(err_msg))
862 * do not pollute kernel log with master abort reports since they
863 * happen a lot during enumeration
865 if (code == AFI_INTR_MASTER_ABORT)
866 pr_debug("PCIE: %s, signature: %08x\n",
867 err_msg[code], signature);
868 else if ((code != AFI_INTR_LEGACY) && (code != AFI_INTR_PRSNT_SENSE))
869 pr_err("PCIE: %s, signature: %08x\n", err_msg[code], signature);
875 * PCIe support functions
877 static void tegra_pcie_setup_translations(void)
884 /* Bar 0: type 1 extended configuration space */
885 fpci_bar = 0xfe100000;
887 axi_address = PCIE_CFG_OFF;
888 afi_writel(axi_address, AFI_AXI_BAR0_START);
889 afi_writel(size >> 12, AFI_AXI_BAR0_SZ);
890 afi_writel(fpci_bar, AFI_FPCI_BAR0);
892 /* Bar 1: downstream IO bar */
893 fpci_bar = 0xfdfc0000;
895 axi_address = MMIO_BASE;
896 afi_writel(axi_address, AFI_AXI_BAR1_START);
897 afi_writel(size >> 12, AFI_AXI_BAR1_SZ);
898 afi_writel(fpci_bar, AFI_FPCI_BAR1);
900 /* Bar 2: prefetchable memory BAR */
901 fpci_bar = (((PREFETCH_MEM_BASE_0 >> 12) & 0xfffff) << 4) | 0x1;
902 size = PREFETCH_MEM_SIZE_0;
903 axi_address = PREFETCH_MEM_BASE_0;
904 afi_writel(axi_address, AFI_AXI_BAR2_START);
905 afi_writel(size >> 12, AFI_AXI_BAR2_SZ);
906 afi_writel(fpci_bar, AFI_FPCI_BAR2);
908 /* Bar 3: non prefetchable memory BAR */
909 fpci_bar = (((MEM_BASE_0 >> 12) & 0xfffff) << 4) | 0x1;
911 axi_address = MEM_BASE_0;
912 afi_writel(axi_address, AFI_AXI_BAR3_START);
913 afi_writel(size >> 12, AFI_AXI_BAR3_SZ);
914 afi_writel(fpci_bar, AFI_FPCI_BAR3);
916 /* NULL out the remaining BAR as it is not used */
917 afi_writel(0, AFI_AXI_BAR4_START);
918 afi_writel(0, AFI_AXI_BAR4_SZ);
919 afi_writel(0, AFI_FPCI_BAR4);
921 afi_writel(0, AFI_AXI_BAR5_START);
922 afi_writel(0, AFI_AXI_BAR5_SZ);
923 afi_writel(0, AFI_FPCI_BAR5);
925 /* map all upstream transactions as uncached */
926 afi_writel(PHYS_OFFSET, AFI_CACHE_BAR0_ST);
927 afi_writel(0, AFI_CACHE_BAR0_SZ);
928 afi_writel(0, AFI_CACHE_BAR1_ST);
929 afi_writel(0, AFI_CACHE_BAR1_SZ);
932 afi_writel(0, AFI_MSI_FPCI_BAR_ST);
933 afi_writel(0, AFI_MSI_BAR_SZ);
934 afi_writel(0, AFI_MSI_AXI_BAR_ST);
935 afi_writel(0, AFI_MSI_BAR_SZ);
938 static int tegra_pcie_enable_pads(bool enable)
943 if (!tegra_platform_is_fpga()) {
944 /* WAR for Eye diagram failure on lanes for T124 platforms */
945 pads_writel(0x44ac44ac, PADS_REFCLK_CFG0);
946 pads_writel(0x00000028, PADS_REFCLK_BIAS);
947 /* T124 PCIe pad programming is moved to XUSB_PADCTL space */
948 err = pcie_phy_pad_enable(enable,
949 tegra_get_lane_owner_info() >> 1);
951 pr_err("%s unable to initalize pads\n", __func__);
956 static int tegra_pcie_enable_controller(void)
959 int i, ret = 0, lane_owner;
962 /* Enable slot clock and ensure reset signal is assert */
963 for (i = 0; i < ARRAY_SIZE(pex_controller_registers); i++) {
964 reg = pex_controller_registers[i];
965 val = afi_readl(reg) | AFI_PEX_CTRL_REFCLK_EN |
966 AFI_PEX_CTRL_CLKREQ_EN;
967 /* Since CLKREQ# pinmux pins may float in some platfoms */
968 /* resulting in disappear of refclk specially at higher temp */
969 /* overrided CLKREQ to always drive refclk */
970 if (!tegra_pcie.plat_data->has_clkreq)
971 val |= AFI_PEX_CTRL_OVERRIDE_EN;
972 val &= ~AFI_PEX_CTRL_RST;
973 afi_writel(val, reg);
976 /* Enable PLL power down */
977 val = afi_readl(AFI_PLLE_CONTROL);
978 val &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
979 val |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
980 afi_writel(val, AFI_PLLE_CONTROL);
982 afi_writel(0, AFI_PEXBIAS_CTRL_0);
985 /* Enable all PCIE controller and */
986 /* system management configuration of PCIE crossbar */
987 val = afi_readl(AFI_PCIE_CONFIG);
988 val &= ~(AFI_PCIE_CONFIG_PCIEC0_DISABLE_DEVICE |
989 AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK);
990 if (tegra_platform_is_fpga()) {
991 /* FPGA supports only x2_x1 bar config */
992 val |= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
994 /* Extract 2 upper bits from odmdata[28:30] and configure */
995 /* T124 pcie lanes in X2_X1/X4_X1 config based on them */
996 lane_owner = tegra_get_lane_owner_info() >> 1;
997 if (lane_owner == PCIE_LANES_X2_X1) {
998 val |= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
999 if (tegra_pcie.plat_data->port_status[1])
1000 val &= ~AFI_PCIE_CONFIG_PCIEC1_DISABLE_DEVICE;
1002 val |= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1003 if ((tegra_pcie.plat_data->port_status[1]) &&
1004 (lane_owner == PCIE_LANES_X4_X1))
1005 val &= ~AFI_PCIE_CONFIG_PCIEC1_DISABLE_DEVICE;
1008 afi_writel(val, AFI_PCIE_CONFIG);
1010 /* Enable Gen 2 capability of PCIE */
1011 val = afi_readl(AFI_FUSE) & ~AFI_FUSE_PCIE_T0_GEN2_DIS;
1012 afi_writel(val, AFI_FUSE);
1014 /* Finally enable PCIe */
1015 val = afi_readl(AFI_CONFIGURATION);
1016 val |= AFI_CONFIGURATION_EN_FPCI;
1017 afi_writel(val, AFI_CONFIGURATION);
1019 val = (AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
1020 AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
1021 AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR |
1022 AFI_INTR_EN_AXI_DECERR | AFI_INTR_EN_PRSNT_SENSE);
1023 afi_writel(val, AFI_AFI_INTR_ENABLE);
1024 afi_writel(0xffffffff, AFI_SM_INTR_ENABLE);
1026 /* FIXME: No MSI for now, only INT */
1027 afi_writel(AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
1029 /* Disable all execptions */
1030 afi_writel(0, AFI_FPCI_ERROR_MASKS);
1031 /* Take the PCIe interface module out of reset */
1032 tegra_periph_reset_deassert(tegra_pcie.pcie_xclk);
1034 /* deassert PEX reset signal */
1035 for (i = 0; i < ARRAY_SIZE(pex_controller_registers); i++) {
1036 val = afi_readl(pex_controller_registers[i]);
1037 val |= AFI_PEX_CTRL_RST;
1038 afi_writel(val, pex_controller_registers[i]);
1043 #ifdef USE_REGULATORS
1044 static int tegra_pcie_enable_regulators(void)
1047 if (tegra_pcie.power_rails_enabled) {
1048 pr_debug("PCIE: Already power rails enabled");
1051 tegra_pcie.power_rails_enabled = 1;
1053 if (tegra_pcie.regulator_hvdd == NULL) {
1054 pr_info("PCIE.C: %s : regulator hvdd_pex\n", __func__);
1055 tegra_pcie.regulator_hvdd =
1056 regulator_get(tegra_pcie.dev, "hvdd_pex");
1057 if (IS_ERR(tegra_pcie.regulator_hvdd)) {
1058 pr_err("%s: unable to get hvdd_pex regulator\n",
1060 tegra_pcie.regulator_hvdd = 0;
1064 if (tegra_pcie.regulator_pexio == NULL) {
1065 pr_info("PCIE.C: %s : regulator pexio\n", __func__);
1066 tegra_pcie.regulator_pexio =
1067 regulator_get(tegra_pcie.dev, "avdd_pex_pll");
1068 if (IS_ERR(tegra_pcie.regulator_pexio)) {
1069 pr_err("%s: unable to get pexio regulator\n", __func__);
1070 tegra_pcie.regulator_pexio = 0;
1074 /*SATA and PCIE use same PLLE, In default configuration,
1075 * and we set default AVDD_PLLE with SATA.
1076 * So if use default board, you have to turn on (LDO2) AVDD_PLLE.
1078 if (tegra_pcie.regulator_avdd_plle == NULL) {
1079 pr_info("PCIE.C: %s : regulator avdd_plle\n", __func__);
1080 tegra_pcie.regulator_avdd_plle = regulator_get(tegra_pcie.dev,
1082 if (IS_ERR(tegra_pcie.regulator_avdd_plle)) {
1083 pr_err("%s: unable to get avdd_plle regulator\n",
1085 tegra_pcie.regulator_avdd_plle = 0;
1088 if (tegra_pcie.regulator_hvdd)
1089 regulator_enable(tegra_pcie.regulator_hvdd);
1090 if (tegra_pcie.regulator_pexio)
1091 regulator_enable(tegra_pcie.regulator_pexio);
1092 if (tegra_pcie.regulator_avdd_plle)
1093 regulator_enable(tegra_pcie.regulator_avdd_plle);
1098 static int tegra_pcie_disable_regulators(void)
1103 if (tegra_pcie.power_rails_enabled == 0) {
1104 pr_debug("PCIE: Already power rails disabled");
1107 if (tegra_pcie.regulator_hvdd)
1108 err = regulator_disable(tegra_pcie.regulator_hvdd);
1111 if (tegra_pcie.regulator_pexio)
1112 err = regulator_disable(tegra_pcie.regulator_pexio);
1115 if (tegra_pcie.regulator_avdd_plle)
1116 err = regulator_disable(tegra_pcie.regulator_avdd_plle);
1117 tegra_pcie.power_rails_enabled = 0;
1123 static int tegra_pcie_power_ungate(void)
1128 err = tegra_unpowergate_partition_with_clk_on(TEGRA_POWERGATE_PCIE);
1130 pr_err("PCIE: powerup sequence failed: %d\n", err);
1134 tegra_periph_reset_assert(tegra_pcie.pcie_xclk);
1135 err = clk_prepare_enable(tegra_pcie.pcie_mselect);
1137 pr_err("PCIE: mselect clk enable failed: %d\n", err);
1140 err = clk_enable(tegra_pcie.pcie_xclk);
1142 pr_err("PCIE: pciex clk enable failed: %d\n", err);
1149 static int tegra_pcie_map_resources(void)
1152 /* Allocate config space virtual memory */
1154 #define PROT_DEVICE_GRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_GRE))
1155 tegra_pcie.regs = __ioremap(TEGRA_PCIE_BASE, PCIE_REGS_SZ,
1156 __pgprot(PROT_DEVICE_GRE));
1158 tegra_pcie.regs = ioremap_nocache(TEGRA_PCIE_BASE, PCIE_REGS_SZ);
1160 if (tegra_pcie.regs == NULL) {
1161 pr_err("PCIE: Failed to map PCI/AFI registers\n");
1167 void tegra_pcie_unmap_resources(void)
1170 if (tegra_pcie.regs) {
1171 iounmap(tegra_pcie.regs);
1172 tegra_pcie.regs = 0;
1176 static bool tegra_pcie_is_fpga_pcie(void)
1178 #define CLK_RST_BOND_OUT_REG 0x60006078
1179 #define CLK_RST_BOND_OUT_REG_PCIE (1 << 6)
1184 val = readl(ioremap(CLK_RST_BOND_OUT_REG, 4));
1185 /* return if current netlist does not contain PCIE */
1186 if (val & CLK_RST_BOND_OUT_REG_PCIE)
1191 static int tegra_pcie_fpga_phy_init(void)
1193 #define FPGA_GEN2_SPEED_SUPPORT 0x90000001
1196 if (!tegra_pcie_is_fpga_pcie())
1199 /* Do reset for FPGA pcie phy */
1200 afi_writel(AFI_WR_SCRATCH_0_RESET_VAL, AFI_WR_SCRATCH_0);
1202 afi_writel(AFI_WR_SCRATCH_0_DEFAULT_VAL, AFI_WR_SCRATCH_0);
1204 afi_writel(AFI_WR_SCRATCH_0_RESET_VAL, AFI_WR_SCRATCH_0);
1206 /* required for gen2 speed support on FPGA */
1207 rp_writel(FPGA_GEN2_SPEED_SUPPORT, NV_PCIE2_RP_VEND_XP_BIST, 0);
1212 static void tegra_pcie_pme_turnoff(void)
1217 if (tegra_platform_is_fpga() && !tegra_pcie_is_fpga_pcie())
1219 data = afi_readl(AFI_PCIE_PME);
1220 data |= AFI_PCIE_PME_TURN_OFF;
1221 afi_writel(data, AFI_PCIE_PME);
1223 data = afi_readl(AFI_PCIE_PME);
1224 } while (!(data & AFI_PCIE_PME_ACK));
1226 /* Required for PLL power down */
1227 data = afi_readl(AFI_PLLE_CONTROL);
1228 data |= AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
1229 afi_writel(data, AFI_PLLE_CONTROL);
1232 static struct tegra_io_dpd pexbias_io = {
1234 .io_dpd_reg_index = 0,
1237 static struct tegra_io_dpd pexclk1_io = {
1239 .io_dpd_reg_index = 0,
1242 static struct tegra_io_dpd pexclk2_io = {
1244 .io_dpd_reg_index = 0,
1247 static int tegra_pcie_power_on(void)
1252 if (tegra_pcie.pcie_power_enabled) {
1253 pr_debug("PCIE: Already powered on");
1256 tegra_pcie.pcie_power_enabled = 1;
1257 pm_runtime_get_sync(tegra_pcie.dev);
1259 if (!tegra_platform_is_fpga()) {
1260 /* disable PEX IOs DPD mode to turn on pcie */
1261 tegra_io_dpd_disable(&pexbias_io);
1262 tegra_io_dpd_disable(&pexclk1_io);
1263 tegra_io_dpd_disable(&pexclk2_io);
1265 err = tegra_pcie_power_ungate();
1267 pr_err("PCIE: Failed to power ungate\n");
1270 err = tegra_pcie_map_resources();
1272 pr_err("PCIE: Failed to map resources\n");
1275 if (tegra_platform_is_fpga()) {
1276 err = tegra_pcie_fpga_phy_init();
1278 pr_err("PCIE: Failed to initialize FPGA Phy\n");
1283 pm_runtime_put(tegra_pcie.dev);
1287 static int tegra_pcie_power_off(bool all)
1292 if (tegra_pcie.pcie_power_enabled == 0) {
1293 pr_debug("PCIE: Already powered off");
1297 tegra_pcie_prsnt_map_override(0, false);
1298 tegra_pcie_pme_turnoff();
1299 tegra_pcie_enable_pads(false);
1301 tegra_pcie_unmap_resources();
1302 if (tegra_pcie.pcie_mselect)
1303 clk_disable(tegra_pcie.pcie_mselect);
1304 if (tegra_pcie.pcie_xclk)
1305 clk_disable(tegra_pcie.pcie_xclk);
1306 err = tegra_powergate_partition_with_clk_off(TEGRA_POWERGATE_PCIE);
1310 if (!tegra_platform_is_fpga()) {
1311 /* put PEX pads into DPD mode to save additional power */
1312 tegra_io_dpd_enable(&pexbias_io);
1313 tegra_io_dpd_enable(&pexclk1_io);
1314 tegra_io_dpd_enable(&pexclk2_io);
1316 pm_runtime_put(tegra_pcie.dev);
1318 tegra_pcie.pcie_power_enabled = 0;
1323 static int tegra_pcie_clocks_get(void)
1326 /* get the PCIEXCLK */
1327 tegra_pcie.pcie_xclk = clk_get_sys("tegra_pcie", "pciex");
1328 if (IS_ERR_OR_NULL(tegra_pcie.pcie_xclk)) {
1329 pr_err("%s: unable to get PCIE Xclock\n", __func__);
1332 tegra_pcie.pcie_mselect = clk_get_sys("tegra_pcie", "mselect");
1333 if (IS_ERR_OR_NULL(tegra_pcie.pcie_mselect)) {
1334 pr_err("%s: unable to get PCIE mselect clock\n", __func__);
1340 static void tegra_pcie_clocks_put(void)
1343 if (tegra_pcie.pcie_xclk)
1344 clk_put(tegra_pcie.pcie_xclk);
1345 if (tegra_pcie.pcie_mselect)
1346 clk_put(tegra_pcie.pcie_mselect);
1349 static int tegra_pcie_get_resources(void)
1354 tegra_pcie.power_rails_enabled = 0;
1355 tegra_pcie.pcie_power_enabled = 0;
1357 err = tegra_pcie_clocks_get();
1359 pr_err("PCIE: failed to get clocks: %d\n", err);
1362 err = tegra_pcie_power_on();
1364 pr_err("PCIE: Failed to power on: %d\n", err);
1367 err = clk_set_rate(tegra_pcie.pcie_mselect, tegra_pcie_mselect_rate);
1371 err = clk_set_rate(tegra_pcie.pcie_xclk, tegra_pcie_xclk_rate);
1374 err = request_irq(INT_PCIE_INTR, tegra_pcie_isr,
1375 IRQF_SHARED, "PCIE", &tegra_pcie);
1377 pr_err("PCIE: Failed to register IRQ: %d\n", err);
1380 set_irq_flags(INT_PCIE_INTR, IRQF_VALID);
1384 tegra_pcie_power_off(false);
1386 tegra_pcie_clocks_put();
1391 * FIXME: If there are no PCIe cards attached, then calling this function
1392 * can result in the increase of the bootup time as there are big timeout
1395 #define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
1396 static bool tegra_pcie_check_link(struct tegra_pcie_port *pp, int idx,
1405 timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1407 reg = readl(pp->base + RP_VEND_XP);
1409 if (reg & RP_VEND_XP_DL_UP)
1417 pr_err("PCIE: port %d: link down, retrying\n", idx);
1421 timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1423 reg = readl(pp->base + RP_LINK_CONTROL_STATUS);
1425 if (reg & 0x20000000)
1434 /* Pulse the PEX reset */
1435 reg = afi_readl(reset_reg) & ~AFI_PEX_CTRL_RST;
1436 afi_writel(reg, reset_reg);
1437 reg = afi_readl(reset_reg) | AFI_PEX_CTRL_RST;
1438 afi_writel(reg, reset_reg);
1446 static void tegra_pcie_apply_sw_war(int index, bool enum_done)
1449 struct pci_dev *pdev = NULL;
1453 /* disable msi for port driver to avoid panic */
1454 for_each_pci_dev(pdev)
1455 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)
1456 pdev->msi_enabled = 0;
1458 /* WAR for Eye diagram failure on lanes for T124 platforms */
1459 data = rp_readl(NV_PCIE2_RP_ECTL_1_R2, index);
1460 data |= PCIE2_RP_ECTL_1_R2_TX_CMADJ_1C;
1461 data |= PCIE2_RP_ECTL_1_R2_TX_DRV_CNTL_1C;
1462 rp_writel(data, NV_PCIE2_RP_ECTL_1_R2, index);
1463 /* Avoid warning during enumeration for invalid IRQ of RP */
1464 data = rp_readl(NV_PCIE2_RP_INTR_BCR, index);
1465 data |= NV_PCIE2_RP_INTR_BCR_INTR_LINE;
1466 rp_writel(data, NV_PCIE2_RP_INTR_BCR, index);
1470 /* Enable various features of root port */
1471 static void tegra_pcie_enable_rp_features(int index)
1476 /* Power mangagement settings */
1477 /* Enable clock clamping by default and enable card detect */
1478 data = rp_readl(NV_PCIE2_RP_PRIV_MISC, index);
1479 data |= PCIE2_RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD |
1480 PCIE2_RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE |
1481 PCIE2_RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD |
1482 PCIE2_RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE;
1483 rp_writel(data, NV_PCIE2_RP_PRIV_MISC, index);
1485 /* Enable ASPM - L1 state support by default */
1486 data = rp_readl(NV_PCIE2_RP_VEND_XP1, index);
1487 data |= NV_PCIE2_RP_VEND_XP1_LINK_PVT_CTL_L1_ASPM_SUPPORT;
1488 rp_writel(data, NV_PCIE2_RP_VEND_XP1, index);
1490 /* LTSSM wait for DLLP to finish before entering L1 or L2/L3 */
1491 /* to avoid truncating of PM mesgs resulting in reciever errors */
1492 data = rp_readl(NV_PCIE2_RP_VEND_XP_BIST, index);
1493 data |= PCIE2_RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE;
1494 rp_writel(data, NV_PCIE2_RP_VEND_XP_BIST, index);
1496 /* unhide AER capability */
1497 tegra_pcie_enable_aer(index, true);
1499 tegra_pcie_apply_sw_war(index, false);
1502 static void tegra_pcie_disable_ctlr(int index)
1507 data = afi_readl(AFI_PCIE_CONFIG);
1509 data |= AFI_PCIE_CONFIG_PCIEC1_DISABLE_DEVICE;
1511 data |= AFI_PCIE_CONFIG_PCIEC0_DISABLE_DEVICE;
1512 afi_writel(data, AFI_PCIE_CONFIG);
1515 static void tegra_pcie_add_port(int index, u32 offset, u32 reset_reg)
1517 struct tegra_pcie_port *pp;
1520 tegra_pcie_prsnt_map_override(index, true);
1522 pp = tegra_pcie.port + tegra_pcie.num_ports;
1524 pp->base = tegra_pcie.regs + offset;
1525 pp->link_up = tegra_pcie_check_link(pp, index, reset_reg);
1529 pr_info("PCIE: port %d: link down, ignoring\n", index);
1530 tegra_pcie_disable_ctlr(index);
1533 tegra_pcie_enable_rp_features(index);
1535 tegra_pcie.num_ports++;
1537 /* initialize root bus in boot path only */
1539 pp->root_bus_nr = -1;
1542 void tegra_pcie_check_ports(void)
1544 int port, rp_offset = 0;
1545 int ctrl_offset = AFI_PEX0_CTRL;
1548 /* reset number of ports */
1549 tegra_pcie.num_ports = 0;
1551 for (port = 0; port < MAX_PCIE_SUPPORTED_PORTS; port++) {
1552 ctrl_offset += (port * 8);
1553 rp_offset = (rp_offset + RP_OFFSET) * port;
1554 if (tegra_pcie.plat_data->port_status[port])
1555 tegra_pcie_add_port(port, rp_offset, ctrl_offset);
1558 EXPORT_SYMBOL(tegra_pcie_check_ports);
1560 int tegra_pcie_get_test_info(void __iomem **regs)
1562 *regs = tegra_pcie.regs;
1563 return tegra_pcie.num_ports;
1565 EXPORT_SYMBOL(tegra_pcie_get_test_info);
1567 static int tegra_pcie_conf_gpios(void)
1572 if (gpio_is_valid(tegra_pcie.plat_data->gpio_hot_plug)) {
1573 /* configure gpio for hotplug detection */
1574 dev_info(tegra_pcie.dev, "acquiring hotplug_detect = %d\n",
1575 tegra_pcie.plat_data->gpio_hot_plug);
1576 err = devm_gpio_request(tegra_pcie.dev,
1577 tegra_pcie.plat_data->gpio_hot_plug,
1578 "pcie_hotplug_detect");
1580 dev_err(tegra_pcie.dev, "%s: gpio_request failed %d\n",
1584 err = gpio_direction_input(
1585 tegra_pcie.plat_data->gpio_hot_plug);
1587 dev_err(tegra_pcie.dev,
1588 "%s: gpio_direction_input failed %d\n",
1592 irq = gpio_to_irq(tegra_pcie.plat_data->gpio_hot_plug);
1594 dev_err(tegra_pcie.dev,
1595 "Unable to get irq for hotplug_detect\n");
1598 err = devm_request_irq(tegra_pcie.dev, (unsigned int)irq,
1599 gpio_pcie_detect_isr,
1600 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
1601 "pcie_hotplug_detect",
1602 (void *)tegra_pcie.plat_data);
1604 dev_err(tegra_pcie.dev,
1605 "Unable to claim irq for hotplug_detect\n");
1609 if (gpio_is_valid(tegra_pcie.plat_data->gpio_x1_slot)) {
1610 err = devm_gpio_request(tegra_pcie.dev,
1611 tegra_pcie.plat_data->gpio_x1_slot, "pcie_x1_slot");
1613 dev_err(tegra_pcie.dev,
1614 "%s: pcie_x1_slot gpio_request failed %d\n",
1618 err = gpio_direction_output(
1619 tegra_pcie.plat_data->gpio_x1_slot, 1);
1621 dev_err(tegra_pcie.dev,
1622 "%s: pcie_x1_slot gpio_direction_output failed %d\n",
1626 gpio_set_value_cansleep(
1627 tegra_pcie.plat_data->gpio_x1_slot, 1);
1629 if (gpio_is_valid(tegra_pcie.plat_data->gpio_wake)) {
1630 err = devm_gpio_request(tegra_pcie.dev,
1631 tegra_pcie.plat_data->gpio_wake, "pcie_wake");
1633 dev_err(tegra_pcie.dev,
1634 "%s: pcie_wake gpio_request failed %d\n",
1638 err = gpio_direction_input(
1639 tegra_pcie.plat_data->gpio_wake);
1641 dev_err(tegra_pcie.dev,
1642 "%s: pcie_wake gpio_direction_input failed %d\n",
1650 static int tegra_pcie_scale_voltage(bool isGen2)
1656 if (tegra_pcie_xclk_rate == TEGRA_PCIE_XCLK_500 &&
1657 tegra_pcie_mselect_rate == TEGRA_PCIE_MSELECT_CLK_408)
1659 /* Scale up voltage for Gen2 speed */
1660 tegra_pcie_xclk_rate = TEGRA_PCIE_XCLK_500;
1661 tegra_pcie_mselect_rate = TEGRA_PCIE_MSELECT_CLK_408;
1663 if (tegra_pcie_xclk_rate == TEGRA_PCIE_XCLK_250 &&
1664 tegra_pcie_mselect_rate == TEGRA_PCIE_MSELECT_CLK_204)
1666 /* Scale down voltage for Gen1 speed */
1667 tegra_pcie_xclk_rate = TEGRA_PCIE_XCLK_250;
1668 tegra_pcie_mselect_rate = TEGRA_PCIE_MSELECT_CLK_204;
1670 err = clk_set_rate(tegra_pcie.pcie_xclk, tegra_pcie_xclk_rate);
1673 err = clk_set_rate(tegra_pcie.pcie_mselect, tegra_pcie_mselect_rate);
1679 static bool tegra_pcie_change_link_speed(struct pci_dev *pdev, bool isGen2)
1681 u16 val, link_sts_up_spd, link_sts_dn_spd;
1682 u16 link_cap_up_spd, link_cap_dn_spd;
1683 struct pci_dev *up_dev, *dn_dev;
1686 /* skip if current device is not PCI express capable */
1687 /* or is either a root port or downstream port */
1688 if (!pci_is_pcie(pdev))
1690 if ((pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM) ||
1691 (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT))
1694 /* initialize upstream/endpoint and downstream/root port device ptr */
1696 dn_dev = pdev->bus->self;
1698 /* read link status register to find current speed */
1699 pcie_capability_read_word(up_dev, PCI_EXP_LNKSTA, &link_sts_up_spd);
1700 link_sts_up_spd &= PCI_EXP_LNKSTA_CLS;
1701 pcie_capability_read_word(dn_dev, PCI_EXP_LNKSTA, &link_sts_dn_spd);
1702 link_sts_dn_spd &= PCI_EXP_LNKSTA_CLS;
1703 /* read link capability register to find max speed supported */
1704 pcie_capability_read_word(up_dev, PCI_EXP_LNKCAP, &link_cap_up_spd);
1705 link_cap_up_spd &= PCI_EXP_LNKCAP_SLS;
1706 pcie_capability_read_word(dn_dev, PCI_EXP_LNKCAP, &link_cap_dn_spd);
1707 link_cap_dn_spd &= PCI_EXP_LNKCAP_SLS;
1708 /* skip if both devices across the link are already trained to gen2 */
1710 if (((link_cap_up_spd >= PCI_EXP_LNKSTA_CLS_5_0GB) &&
1711 (link_cap_dn_spd >= PCI_EXP_LNKSTA_CLS_5_0GB)) &&
1712 ((link_sts_up_spd != PCI_EXP_LNKSTA_CLS_5_0GB) ||
1713 (link_sts_dn_spd != PCI_EXP_LNKSTA_CLS_5_0GB)))
1718 /* gen1 should be supported by default by all pcie cards */
1719 if ((link_sts_up_spd != PCI_EXP_LNKSTA_CLS_2_5GB) ||
1720 (link_sts_dn_spd != PCI_EXP_LNKSTA_CLS_2_5GB))
1727 if (tegra_pcie_scale_voltage(isGen2))
1729 /* Set Link Speed */
1730 pcie_capability_read_word(dn_dev, PCI_EXP_LNKCTL2, &val);
1731 val &= ~PCI_EXP_LNKSTA_CLS;
1733 val |= PCI_EXP_LNKSTA_CLS_5_0GB;
1735 val |= PCI_EXP_LNKSTA_CLS_2_5GB;
1736 pcie_capability_write_word(dn_dev, PCI_EXP_LNKCTL2, val);
1738 /* Retrain the link */
1739 pcie_capability_read_word(dn_dev, PCI_EXP_LNKCTL, &val);
1740 val |= PCI_EXP_LNKCTL_RL;
1741 pcie_capability_write_word(dn_dev, PCI_EXP_LNKCTL, val);
1748 bool tegra_pcie_link_speed(bool isGen2)
1750 struct pci_dev *pdev = NULL;
1754 /* Voltage scaling should happen before any device transition */
1755 /* to Gen2 or after all devices has transitioned to Gen1 */
1756 for_each_pci_dev(pdev) {
1757 if (tegra_pcie_change_link_speed(pdev, isGen2))
1762 EXPORT_SYMBOL(tegra_pcie_link_speed);
1764 /* support PLL power down in L1 dynamically based on platform */
1765 static void tegra_pcie_pll_pdn(void)
1767 struct pci_dev *pdev = NULL;
1770 /* CLKREQ# to PD if device connected to RP doesn't have CLKREQ# */
1771 /* capability(no PLL power down in L1 here) and PU if they have */
1772 for_each_pci_dev(pdev) {
1773 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)
1776 if ((pci_pcie_type(pdev->bus->self) ==
1777 PCI_EXP_TYPE_ROOT_PORT)) {
1780 pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &val);
1781 if (val & PCI_EXP_LNKCAP_CLKPM) {
1782 tegra_pinmux_set_pullupdown(
1783 TEGRA_PINGROUP_PEX_L0_CLKREQ_N,
1784 TEGRA_PUPD_PULL_UP);
1785 tegra_pinmux_set_pullupdown(
1786 TEGRA_PINGROUP_PEX_L1_CLKREQ_N,
1787 TEGRA_PUPD_PULL_UP);
1789 tegra_pinmux_set_pullupdown(
1790 TEGRA_PINGROUP_PEX_L0_CLKREQ_N,
1791 TEGRA_PUPD_PULL_DOWN);
1792 tegra_pinmux_set_pullupdown(
1793 TEGRA_PINGROUP_PEX_L1_CLKREQ_N,
1794 TEGRA_PUPD_PULL_DOWN);
1801 /* Enable ASPM support of all devices based on it's capability */
1802 static void tegra_pcie_enable_aspm(void)
1804 struct pci_dev *pdev = NULL;
1805 u16 val = 0, aspm = 0;
1808 if (!pcie_aspm_support_enabled()) {
1809 pr_info("PCIE: ASPM not enabled\n");
1812 for_each_pci_dev(pdev) {
1813 /* Find ASPM capability */
1814 pcie_capability_read_word(pdev, PCI_EXP_LNKCAP, &aspm);
1815 aspm &= PCI_EXP_LNKCAP_ASPMS;
1817 /* Enable ASPM support as per capability */
1818 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &val);
1820 pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, val);
1821 #if defined CONFIG_ARCH_TEGRA_12x_SOC
1822 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL,
1823 PCI_EXP_LNKCTL_ASPM_L0S);
1828 static void tegra_pcie_enable_features(void)
1832 /* configure all links to gen2 speed by default */
1833 if (!tegra_pcie_link_speed(true))
1834 pr_info("PCIE: No Link speed change happened\n");
1836 tegra_pcie_pll_pdn();
1837 tegra_pcie_enable_aspm();
1838 tegra_pcie_apply_sw_war(0, true);
1841 static int __init tegra_pcie_init(void)
1845 pcibios_min_mem = 0x03000000ul;
1846 pcibios_min_io = 0x1000ul;
1849 INIT_LIST_HEAD(&tegra_pcie.busses);
1850 INIT_WORK(&tegra_pcie.hotplug_detect, work_hotplug_handler);
1851 err = tegra_pcie_get_resources();
1853 pr_err("PCIE: get resources failed\n");
1856 err = tegra_pcie_enable_pads(true);
1858 pr_err("PCIE: enable pads failed\n");
1859 tegra_pcie_power_off(false);
1862 err = tegra_pcie_enable_controller();
1864 pr_err("PCIE: enable controller failed\n");
1867 err = tegra_pcie_conf_gpios();
1869 pr_err("PCIE: configuring gpios failed\n");
1872 /* setup the AFI address translations */
1873 tegra_pcie_setup_translations();
1874 tegra_pcie_check_ports();
1876 if (tegra_pcie.num_ports)
1877 pci_common_init(&tegra_pcie_hw);
1879 pr_err("PCIE: no ports detected\n");
1882 tegra_pcie_enable_features();
1883 /* register pcie device as wakeup source */
1884 device_init_wakeup(tegra_pcie.dev, true);
1888 tegra_pcie_power_off(true);
1892 static void tegra_pcie_read_plat_data(void)
1894 struct device_node *node = tegra_pcie.dev->of_node;
1897 of_property_read_u32(node, "nvidia,port0_status",
1898 &tegra_pcie.plat_data->port_status[0]);
1899 of_property_read_u32(node, "nvidia,port1_status",
1900 &tegra_pcie.plat_data->port_status[1]);
1901 tegra_pcie.plat_data->gpio_hot_plug =
1902 of_get_named_gpio(node, "nvidia,hot-plug-gpio", 0);
1903 tegra_pcie.plat_data->gpio_wake =
1904 of_get_named_gpio(node, "nvidia,wake-gpio", 0);
1905 tegra_pcie.plat_data->gpio_x1_slot =
1906 of_get_named_gpio(node, "nvidia,x1-slot-gpio", 0);
1907 tegra_pcie.plat_data->has_clkreq =
1908 of_property_read_bool(node, "has_clkreq");
1911 static struct of_device_id tegra_pcie_of_match[] = {
1912 { .compatible = "nvidia,tegra124-pcie", },
1916 static int __init tegra_pcie_probe(struct platform_device *pdev)
1921 tegra_pcie.dev = &pdev->dev;
1922 if (tegra_pcie.dev->of_node) {
1923 /* use DT way to init platform data */
1924 tegra_pcie.plat_data = devm_kzalloc(tegra_pcie.dev,
1925 sizeof(*tegra_pcie.plat_data), GFP_KERNEL);
1926 if (!tegra_pcie.plat_data) {
1927 dev_err(tegra_pcie.dev, "memory alloc failed\n");
1930 tegra_pcie_read_plat_data();
1932 dev_dbg(tegra_pcie.dev, "PCIE.C: %s : _port_status[0] %d\n",
1933 __func__, tegra_pcie.plat_data->port_status[0]);
1934 dev_dbg(tegra_pcie.dev, "PCIE.C: %s : _port_status[1] %d\n",
1935 __func__, tegra_pcie.plat_data->port_status[1]);
1937 /* Enable Runtime PM for PCIe, TODO: Need to add PCIe host device */
1938 pm_runtime_enable(tegra_pcie.dev);
1940 ret = tegra_pcie_init();
1942 tegra_pd_remove_device(tegra_pcie.dev);
1948 static int tegra_pcie_suspend_noirq(struct device *dev)
1953 /* configure PE_WAKE signal as wake sources */
1954 if (gpio_is_valid(tegra_pcie.plat_data->gpio_wake) &&
1955 device_may_wakeup(dev)) {
1956 ret = enable_irq_wake(gpio_to_irq(
1957 tegra_pcie.plat_data->gpio_wake));
1960 "ID wake-up event failed with error %d\n", ret);
1964 return tegra_pcie_power_off(true);
1967 static bool tegra_pcie_enable_msi(bool);
1969 static int tegra_pcie_resume_noirq(struct device *dev)
1976 if (gpio_is_valid(tegra_pcie.plat_data->gpio_wake) &&
1977 device_may_wakeup(dev)) {
1978 ret = disable_irq_wake(gpio_to_irq(
1979 tegra_pcie.plat_data->gpio_wake));
1982 "ID wake-up event failed with error %d\n", ret);
1986 /* give 100ms for 1.05v to come up */
1988 ret = tegra_pcie_power_on();
1990 pr_err("PCIE: Failed to power on: %d\n", ret);
1993 tegra_pcie_enable_pads(true);
1994 tegra_pcie_enable_controller();
1995 tegra_pcie_setup_translations();
1996 /* Set up MSI registers, if MSI have been enabled */
1997 tegra_pcie_enable_msi(true);
1999 tegra_pcie_check_ports();
2000 if (!tegra_pcie.num_ports) {
2001 tegra_pcie_power_off(true);
2004 resume_path = false;
2010 static int tegra_pcie_resume(struct device *dev)
2013 tegra_pcie_enable_features();
2018 static int tegra_pcie_remove(struct platform_device *pdev)
2020 struct tegra_pcie_bus *bus;
2023 list_for_each_entry(bus, &tegra_pcie.busses, list) {
2024 vunmap(bus->area->addr);
2027 tegra_pcie_detach();
2028 tegra_pd_remove_device(tegra_pcie.dev);
2034 static const struct dev_pm_ops tegra_pcie_pm_ops = {
2035 .suspend_noirq = tegra_pcie_suspend_noirq,
2036 .resume_noirq = tegra_pcie_resume_noirq,
2037 .resume = tegra_pcie_resume,
2041 /* driver data is accessed after init, so use __refdata instead of __initdata */
2042 static struct platform_driver __refdata tegra_pcie_driver = {
2043 .probe = tegra_pcie_probe,
2044 .remove = tegra_pcie_remove,
2046 .name = "tegra-pcie",
2047 .owner = THIS_MODULE,
2049 .pm = &tegra_pcie_pm_ops,
2051 .of_match_table = tegra_pcie_of_match,
2055 static int __init tegra_pcie_init_driver(void)
2057 if (tegra_platform_is_linsim() || tegra_platform_is_qt())
2059 return platform_driver_register(&tegra_pcie_driver);
2062 static void __exit_refok tegra_pcie_exit_driver(void)
2064 if (tegra_platform_is_linsim() || tegra_platform_is_qt())
2066 platform_driver_unregister(&tegra_pcie_driver);
2069 module_init(tegra_pcie_init_driver);
2070 module_exit(tegra_pcie_exit_driver);
2072 static struct irq_chip tegra_irq_chip_msi_pcie = {
2074 .irq_mask = mask_msi_irq,
2075 .irq_unmask = unmask_msi_irq,
2076 .irq_enable = unmask_msi_irq,
2077 .irq_disable = mask_msi_irq,
2080 /* 1:1 matching of these to the MSI vectors, 1 per bit */
2081 /* and each mapping matches one of the available interrupts */
2082 /* irq should equal INT_PCI_MSI_BASE + index */
2083 struct msi_map_entry {
2089 /* hardware supports 256 max*/
2090 #if (INT_PCI_MSI_NR > 256)
2091 #error "INT_PCI_MSI_NR too big"
2094 #define MSI_MAP_SIZE (INT_PCI_MSI_NR)
2095 static struct msi_map_entry msi_map[MSI_MAP_SIZE];
2097 static void msi_map_init(void)
2101 for (i = 0; i < MSI_MAP_SIZE; i++) {
2102 msi_map[i].used = false;
2103 msi_map[i].index = i;
2108 /* returns an index into the map*/
2109 static struct msi_map_entry *msi_map_get(void)
2111 struct msi_map_entry *retval = NULL;
2114 for (i = 0; i < MSI_MAP_SIZE; i++) {
2115 if (!msi_map[i].used) {
2116 retval = msi_map + i;
2117 retval->irq = INT_PCI_MSI_BASE + i;
2118 retval->used = true;
2126 void msi_map_release(struct msi_map_entry *entry)
2129 entry->used = false;
2134 static irqreturn_t tegra_pcie_msi_isr(int irq, void *arg)
2136 int i, offset, index;
2140 /* suppress print spews in debug mode */
2146 for (i = 0; i < 8; i++) {
2147 reg = afi_readl(AFI_MSI_VEC0_0 + i * 4);
2148 while (reg != 0x00000000) {
2149 offset = find_first_bit((unsigned long int *)®, 32);
2150 index = i * 32 + offset;
2151 /* clear the interrupt */
2152 afi_writel(1ul << index, AFI_MSI_VEC0_0 + i * 4);
2153 if (index < MSI_MAP_SIZE) {
2154 if (msi_map[index].used)
2155 generic_handle_irq(msi_map[index].irq);
2157 pr_info("unexpected MSI (1)\n");
2159 /* that's weird who triggered this?*/
2161 pr_info("unexpected MSI (2)\n");
2163 /* see if there's any more pending in this vector */
2164 reg = afi_readl(AFI_MSI_VEC0_0 + i * 4);
2171 static bool tegra_pcie_enable_msi(bool no_init)
2174 static uintptr_t msi_base;
2178 /* if not already initialized and no_init, nothing to do */
2184 /* enables MSI interrupts. */
2185 if (request_irq(INT_PCIE_MSI, tegra_pcie_msi_isr,
2186 IRQF_SHARED, "PCIe-MSI", tegra_pcie_msi_isr)) {
2187 pr_err("%s: Cannot register IRQ %u\n",
2188 __func__, INT_PCIE_MSI);
2191 /* setup AFI/FPCI range */
2192 /* FIXME do this better! should be based on PAGE_SIZE */
2193 msi_base = __get_free_pages(GFP_KERNEL, 3);
2195 pr_err("PCIE: Insufficient memory\n");
2198 msi_base = virt_to_phys((void *)msi_base);
2201 afi_writel(msi_base>>8, AFI_MSI_FPCI_BAR_ST);
2202 afi_writel(msi_base, AFI_MSI_AXI_BAR_ST);
2203 /* this register is in 4K increments */
2204 afi_writel(1, AFI_MSI_BAR_SZ);
2206 /* enable all MSI vectors */
2207 afi_writel(0xffffffff, AFI_MSI_EN_VEC0_0);
2208 afi_writel(0xffffffff, AFI_MSI_EN_VEC1_0);
2209 afi_writel(0xffffffff, AFI_MSI_EN_VEC2_0);
2210 afi_writel(0xffffffff, AFI_MSI_EN_VEC3_0);
2211 afi_writel(0xffffffff, AFI_MSI_EN_VEC4_0);
2212 afi_writel(0xffffffff, AFI_MSI_EN_VEC5_0);
2213 afi_writel(0xffffffff, AFI_MSI_EN_VEC6_0);
2214 afi_writel(0xffffffff, AFI_MSI_EN_VEC7_0);
2216 /* and unmask the MSI interrupt */
2218 reg |= (AFI_INTR_MASK_INT_MASK | AFI_INTR_MASK_MSI_MASK);
2219 afi_writel(reg, AFI_INTR_MASK);
2221 set_irq_flags(INT_PCIE_MSI, IRQF_VALID);
2227 /* called by arch_setup_msi_irqs in drivers/pci/msi.c */
2228 int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
2230 int retval = -EINVAL;
2232 struct msi_map_entry *map_entry = NULL;
2235 if (!tegra_pcie_enable_msi(false))
2238 map_entry = msi_map_get();
2239 if (map_entry == NULL)
2242 retval = irq_alloc_desc(map_entry->irq);
2245 irq_set_chip_and_handler(map_entry->irq,
2246 &tegra_irq_chip_msi_pcie,
2249 retval = irq_set_msi_desc(map_entry->irq, desc);
2252 set_irq_flags(map_entry->irq, IRQF_VALID);
2254 msg.address_lo = afi_readl(AFI_MSI_AXI_BAR_ST);
2255 /* 32 bit address only */
2257 msg.data = map_entry->index;
2259 write_msi_msg(map_entry->irq, &msg);
2265 irq_free_desc(map_entry->irq);
2266 msi_map_release(map_entry);
2273 void arch_teardown_msi_irq(unsigned int irq)
2278 for (i = 0; i < MSI_MAP_SIZE; i++) {
2279 if ((msi_map[i].used) && (msi_map[i].irq == irq)) {
2280 irq_free_desc(msi_map[i].irq);
2281 msi_map_release(msi_map + i);