2 * PCIe host controller driver for TEGRA SOCs
4 * Copyright (c) 2010, CompuLab, Ltd.
5 * Author: Mike Rapoport <mike@compulab.co.il>
7 * Based on NVIDIA PCIe driver
8 * Copyright (c) 2008-2016, NVIDIA Corporation. All rights reserved.
10 * Bits taken from arch/arm/mach-dove/pcie.c
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
27 #include <linux/kernel.h>
28 #include <linux/module.h>
29 #include <linux/debugfs.h>
30 #include <linux/uaccess.h>
31 #include <linux/pci.h>
32 #include <linux/interrupt.h>
33 #include <linux/irq.h>
34 #include <linux/irqdomain.h>
35 #include <linux/clk.h>
36 #include <linux/delay.h>
37 #include <linux/export.h>
38 #include <linux/clk/tegra.h>
39 #include <linux/msi.h>
40 #include <linux/slab.h>
41 #include <linux/platform_device.h>
42 #include <linux/regulator/consumer.h>
43 #include <linux/workqueue.h>
44 #include <linux/gpio.h>
45 #include <linux/clk.h>
46 #include <linux/clk/tegra.h>
47 #include <linux/async.h>
48 #include <linux/vmalloc.h>
49 #include <linux/pm_runtime.h>
50 #include <linux/tegra-powergate.h>
51 #include <linux/tegra-soc.h>
52 #include <linux/of_device.h>
53 #include <linux/of_address.h>
54 #include <linux/of_gpio.h>
55 #include <linux/of_pci.h>
56 #include <linux/tegra_prod.h>
57 #include <linux/tegra_pm_domains.h>
58 #include <linux/pinctrl/pinctrl.h>
59 #include <linux/pinctrl/consumer.h>
60 #include <linux/pinctrl/pinconf-tegra.h>
62 #include <asm/sizes.h>
63 #include <asm/mach/pci.h>
66 #include <mach/tegra_usb_pad_ctrl.h>
67 #include <mach/io_dpd.h>
68 #include <linux/pci-tegra.h>
70 #define PCI_CFG_SPACE_SIZE 256
71 #define PCI_EXT_CFG_SPACE_SIZE 4096
73 #define AFI_AXI_BAR0_SZ 0x00
74 #define AFI_AXI_BAR1_SZ 0x04
75 #define AFI_AXI_BAR2_SZ 0x08
76 #define AFI_AXI_BAR3_SZ 0x0c
77 #define AFI_AXI_BAR4_SZ 0x10
78 #define AFI_AXI_BAR5_SZ 0x14
80 #define AFI_AXI_BAR0_START 0x18
81 #define AFI_AXI_BAR1_START 0x1c
82 #define AFI_AXI_BAR2_START 0x20
83 #define AFI_AXI_BAR3_START 0x24
84 #define AFI_AXI_BAR4_START 0x28
85 #define AFI_AXI_BAR5_START 0x2c
87 #define AFI_FPCI_BAR0 0x30
88 #define AFI_FPCI_BAR1 0x34
89 #define AFI_FPCI_BAR2 0x38
90 #define AFI_FPCI_BAR3 0x3c
91 #define AFI_FPCI_BAR4 0x40
92 #define AFI_FPCI_BAR5 0x44
94 #define AFI_CACHE_BAR0_SZ 0x48
95 #define AFI_CACHE_BAR0_ST 0x4c
96 #define AFI_CACHE_BAR1_SZ 0x50
97 #define AFI_CACHE_BAR1_ST 0x54
99 #define AFI_MSI_BAR_SZ 0x60
100 #define AFI_MSI_FPCI_BAR_ST 0x64
101 #define AFI_MSI_AXI_BAR_ST 0x68
103 #define AFI_MSI_VEC0_0 0x6c
104 #define AFI_MSI_VEC1_0 0x70
105 #define AFI_MSI_VEC2_0 0x74
106 #define AFI_MSI_VEC3_0 0x78
107 #define AFI_MSI_VEC4_0 0x7c
108 #define AFI_MSI_VEC5_0 0x80
109 #define AFI_MSI_VEC6_0 0x84
110 #define AFI_MSI_VEC7_0 0x88
112 #define AFI_MSI_EN_VEC0_0 0x8c
113 #define AFI_MSI_EN_VEC1_0 0x90
114 #define AFI_MSI_EN_VEC2_0 0x94
115 #define AFI_MSI_EN_VEC3_0 0x98
116 #define AFI_MSI_EN_VEC4_0 0x9c
117 #define AFI_MSI_EN_VEC5_0 0xa0
118 #define AFI_MSI_EN_VEC6_0 0xa4
119 #define AFI_MSI_EN_VEC7_0 0xa8
121 #define AFI_CONFIGURATION 0xac
122 #define AFI_CONFIGURATION_EN_FPCI (1 << 0)
124 #define AFI_FPCI_ERROR_MASKS 0xb0
126 #define AFI_INTR_MASK 0xb4
127 #define AFI_INTR_MASK_INT_MASK (1 << 0)
128 #define AFI_INTR_MASK_MSI_MASK (1 << 8)
130 #define AFI_INTR_CODE 0xb8
131 #define AFI_INTR_CODE_MASK 0x1f
132 #define AFI_INTR_MASTER_ABORT 4
133 #define AFI_INTR_LEGACY 6
134 #define AFI_INTR_PRSNT_SENSE 10
136 #define AFI_INTR_SIGNATURE 0xbc
137 #define AFI_SM_INTR_ENABLE 0xc4
139 #define AFI_AFI_INTR_ENABLE 0xc8
140 #define AFI_INTR_EN_INI_SLVERR (1 << 0)
141 #define AFI_INTR_EN_INI_DECERR (1 << 1)
142 #define AFI_INTR_EN_TGT_SLVERR (1 << 2)
143 #define AFI_INTR_EN_TGT_DECERR (1 << 3)
144 #define AFI_INTR_EN_TGT_WRERR (1 << 4)
145 #define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
146 #define AFI_INTR_EN_AXI_DECERR (1 << 6)
147 #define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
148 #define AFI_INTR_EN_PRSNT_SENSE (1 << 8)
150 #define AFI_PCIE_PME 0x0f0
151 #define AFI_PCIE_PME_TURN_OFF 0x101
152 #define AFI_PCIE_PME_ACK 0x420
154 #define AFI_PCIE_CONFIG 0x0f8
155 #define AFI_PCIE_CONFIG_PCIEC0_DISABLE_DEVICE (1 << 1)
156 #define AFI_PCIE_CONFIG_PCIEC1_DISABLE_DEVICE (1 << 2)
157 #define AFI_PCIE_CONFIG_XBAR_CONFIG_MASK (0xf << 20)
158 #define AFI_PCIE_CONFIG_XBAR_CONFIG_X2_X1 (0x0 << 20)
159 #define AFI_PCIE_CONFIG_XBAR_CONFIG_X4_X1 (0x1 << 20)
161 #define AFI_FUSE 0x104
162 #define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
164 #define AFI_PEX0_CTRL 0x110
165 #define AFI_PEX1_CTRL 0x118
166 #define AFI_PEX_CTRL_RST (1 << 0)
167 #define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
168 #define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
169 #define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4)
171 #define AFI_PLLE_CONTROL 0x160
172 #define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
173 #define AFI_PLLE_CONTROL_BYPASS_PCIE2PLLE_CONTROL (1 << 8)
174 #define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
175 #define AFI_PLLE_CONTROL_PCIE2PLLE_CONTROL_EN (1 << 0)
177 #define AFI_PEXBIAS_CTRL_0 0x168
178 #define AFI_WR_SCRATCH_0 0x120
179 #define AFI_WR_SCRATCH_0_RESET_VAL 0x00202020
180 #define AFI_WR_SCRATCH_0_DEFAULT_VAL 0x00000000
182 #define AFI_MSG_0 0x190
183 #define AFI_MSG_PM_PME_MASK 0x00100010
184 #define AFI_MSG_INTX_MASK 0x1f001f00
185 #define AFI_MSG_PM_PME0 (1 << 4)
186 #define AFI_MSG_RP_INT_MASK 0x10001000
188 #define RP_VEND_XP 0x00000F00
189 #define RP_VEND_XP_OPPORTUNISTIC_ACK (1 << 27)
190 #define RP_VEND_XP_OPPORTUNISTIC_UPDATEFC (1 << 28)
191 #define RP_VEND_XP_DL_UP (1 << 30)
192 #define RP_VEND_XP_UPDATE_FC_THRESHOLD (0xFF << 18)
194 #define RP_LINK_CONTROL_STATUS 0x00000090
195 #define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
196 #define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
197 #define RP_LINK_CONTROL_STATUS_NEG_LINK_WIDTH (0x3F << 20)
198 #define RP_LINK_CONTROL_STATUS_L0s_ENABLED 0x00000001
199 #define RP_LINK_CONTROL_STATUS_L1_ENABLED 0x00000002
201 #define RP_LINK_CONTROL_STATUS_2 0x000000B0
202 #define RP_LINK_CONTROL_STATUS_2_TRGT_LNK_SPD_MASK 0x0000000F
203 #define RP_LINK_CONTROL_STATUS_2_TRGT_LNK_SPD_GEN1 0x00000001
204 #define RP_LINK_CONTROL_STATUS_2_TRGT_LNK_SPD_GEN2 0x00000002
206 #define NV_PCIE2_RP_RSR 0x000000A0
207 #define NV_PCIE2_RP_RSR_PMESTAT (1 << 16)
209 #define NV_PCIE2_RP_INTR_BCR 0x0000003C
210 #define NV_PCIE2_RP_INTR_BCR_INTR_LINE (0xFF << 0)
211 #define NV_PCIE2_RP_INTR_BCR_SB_RESET (0x1 << 22)
213 #define NV_PCIE2_RP_PRIV_XP_DL 0x00000494
214 #define PCIE2_RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD (0x1FF << 1)
216 #define NV_PCIE2_RP_RX_HDR_LIMIT 0x00000E00
217 #define PCIE2_RP_RX_HDR_LIMIT_PW_MASK (0xFF00)
218 #define PCIE2_RP_RX_HDR_LIMIT_PW (0x0E << 8)
220 #define NV_PCIE2_RP_TX_HDR_LIMIT 0x00000E08
221 #define PCIE2_RP_TX_HDR_LIMIT_NPT_0 32
222 #define PCIE2_RP_TX_HDR_LIMIT_NPT_1 4
224 #define NV_PCIE2_RP_TIMEOUT0 0x00000E24
225 #define PCIE2_RP_TIMEOUT0_PAD_PWRUP_MASK (0xFF)
226 #define PCIE2_RP_TIMEOUT0_PAD_PWRUP (0xA)
227 #define PCIE2_RP_TIMEOUT0_PAD_PWRUP_CM_MASK (0xFFFF00)
228 #define PCIE2_RP_TIMEOUT0_PAD_PWRUP_CM (0x180 << 8)
229 #define PCIE2_RP_TIMEOUT0_PAD_SPDCHNG_GEN2_MASK (0xFF << 24)
230 #define PCIE2_RP_TIMEOUT0_PAD_SPDCHNG_GEN2 (0xA << 24)
232 #define NV_PCIE2_RP_TIMEOUT1 0x00000E28
233 #define PCIE2_RP_TIMEOUT1_RCVRY_SPD_SUCCESS_EIDLE_MASK (0xFF << 16)
234 #define PCIE2_RP_TIMEOUT1_RCVRY_SPD_SUCCESS_EIDLE (0x10 << 16)
235 #define PCIE2_RP_TIMEOUT1_RCVRY_SPD_UNSUCCESS_EIDLE_MASK (0xFF << 24)
236 #define PCIE2_RP_TIMEOUT1_RCVRY_SPD_UNSUCCESS_EIDLE (0x74 << 24)
238 #define NV_PCIE2_RP_LTSSM_DBGREG 0x00000E44
239 #define PCIE2_RP_LTSSM_DBGREG_LINKFSM15 (1 << 15)
240 #define PCIE2_RP_LTSSM_DBGREG_LINKFSM16 (1 << 16)
241 #define PCIE2_RP_LTSSM_DBGREG_LINKFSM17 (1 << 17)
243 #define NV_PCIE2_RP_XP_REF 0x00000F30
244 #define PCIE2_RP_XP_REF_MICROSECOND_LIMIT_MASK (0xFF)
245 #define PCIE2_RP_XP_REF_MICROSECOND_LIMIT (0x14)
246 #define PCIE2_RP_XP_REF_MICROSECOND_ENABLE (1 << 8)
247 #define PCIE2_RP_XP_REF_CPL_TO_OVERRIDE (1 << 13)
248 #define PCIE2_RP_XP_REF_CPL_TO_CUSTOM_VALUE_MASK (0x1FFFF << 14)
249 #define PCIE2_RP_XP_REF_CPL_TO_CUSTOM_VALUE (0x1770 << 14)
251 #define NV_PCIE2_RP_PRIV_MISC 0x00000FE0
252 #define PCIE2_RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xE << 0)
253 #define PCIE2_RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xF << 0)
254 #define PCIE2_RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD (0xF << 16)
255 #define PCIE2_RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE (1 << 23)
256 #define PCIE2_RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD (0xF << 24)
257 #define PCIE2_RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE (1 << 31)
259 #define NV_PCIE2_RP_VEND_XP1 0x00000F04
260 #define NV_PCIE2_RP_VEND_XP2 0x00000F08
261 #define NV_PCIE2_RP_VEND_XP_LINK_PVT_CTL_L1_ASPM_SUPPORT (1 << 21)
262 #define NV_PCIE2_RP_VEND_XP1_RNCTRL_MAXWIDTH_MASK (0x3F << 0)
263 #define NV_PCIE2_RP_VEND_XP1_RNCTRL_EN (1 << 7)
265 #define NV_PCIE2_RP_VEND_CTL0 0x00000F44
266 #define PCIE2_RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK (0xF << 12)
267 #define PCIE2_RP_VEND_CTL0_DSK_RST_PULSE_WIDTH (0x9 << 12)
269 #define NV_PCIE2_RP_VEND_CTL1 0x00000F48
270 #define PCIE2_RP_VEND_CTL1_ERPT (1 << 13)
272 #define NV_PCIE2_RP_VEND_XP_BIST 0x00000F4C
273 #define PCIE2_RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE (1 << 28)
275 #define NV_PCIE2_RP_VEND_XP_PAD_PWRDN 0x00000F50
276 #define NV_PCIE2_RP_VEND_XP_PAD_PWRDN_L1_EN (1 << 0)
277 #define NV_PCIE2_RP_VEND_XP_PAD_PWRDN_DYNAMIC_EN (1 << 1)
278 #define NV_PCIE2_RP_VEND_XP_PAD_PWRDN_DISABLED_EN (1 << 2)
279 #define NV_PCIE2_RP_VEND_XP_PAD_PWRDN_L1_CLKREQ_EN (1 << 15)
280 #define NV_PCIE2_RP_VEND_XP_PAD_PWRDN_SLEEP_MODE_DYNAMIC_L1PP (3 << 5)
281 #define NV_PCIE2_RP_VEND_XP_PAD_PWRDN_SLEEP_MODE_L1_L1P (2 << 3)
282 #define NV_PCIE2_RP_VEND_XP_PAD_PWRDN_SLEEP_MODE_L1_L1PP (3 << 3)
283 #define NV_PCIE2_RP_VEND_XP_PAD_PWRDN_SLEEP_MODE_L1_CLKREQ_L1P (2 << 16)
284 #define NV_PCIE2_RP_VEND_XP_PAD_PWRDN_SLEEP_MODE_L1_CLKREQ_L1PP (3 << 16)
286 #define NV_PCIE2_RP_PRIV_XP_RX_L0S_ENTRY_COUNT 0x00000F8C
287 #define NV_PCIE2_RP_PRIV_XP_TX_L0S_ENTRY_COUNT 0x00000F90
288 #define NV_PCIE2_RP_PRIV_XP_TX_L1_ENTRY_COUNT 0x00000F94
289 #define NV_PCIE2_RP_LTR_REP_VAL 0x00000C10
290 #define NV_PCIE2_RP_L1_1_ENTRY_COUNT 0x00000C14
291 #define PCIE2_RP_L1_1_ENTRY_COUNT_RESET (1 << 31)
292 #define NV_PCIE2_RP_L1_2_ENTRY_COUNT 0x00000C18
293 #define PCIE2_RP_L1_2_ENTRY_COUNT_RESET (1 << 31)
295 #define NV_PCIE2_RP_VEND_CTL2 0x00000FA8
296 #define PCIE2_RP_VEND_CTL2_PCA_ENABLE (1 << 7)
298 #define NV_PCIE2_RP_PRIV_XP_CONFIG 0x00000FAC
299 #define NV_PCIE2_RP_PRIV_XP_CONFIG_LOW_PWR_DURATION_MASK 0x3
300 #define NV_PCIE2_RP_PRIV_XP_DURATION_IN_LOW_PWR_100NS 0x00000FB0
302 #define NV_PCIE2_RP_XP_CTL_1 0x00000FEC
303 #define PCIE2_RP_XP_CTL_1_SPARE_BIT29 (1 << 29)
305 #define NV_PCIE2_RP_L1_PM_SUBSTATES_CYA 0x00000C00
306 #define PCIE2_RP_L1_PM_SUBSTATES_CYA_CM_RTIME_MASK (0xFF << 8)
307 #define PCIE2_RP_L1_PM_SUBSTATES_CYA_CM_RTIME_SHIFT (8)
308 #define PCIE2_RP_L1_PM_SUBSTATES_CYA_T_PWRN_SCL_MASK (0x3 << 16)
309 #define PCIE2_RP_L1_PM_SUBSTATES_CYA_T_PWRN_SCL_SHIFT (16)
310 #define PCIE2_RP_L1_PM_SUBSTATES_CYA_T_PWRN_VAL_MASK (0xF8 << 19)
311 #define PCIE2_RP_L1_PM_SUBSTATES_CYA_T_PWRN_VAL_SHIFT (19)
313 #define NV_PCIE2_RP_L1_PM_SUBSTATES_1_CYA 0x00000C04
314 #define PCIE2_RP_L1_PM_SUBSTATES_1_CYA_PWR_OFF_DLY_MASK (0x1FFF)
315 #define PCIE2_RP_L1_PM_SUBSTATES_1_CYA_PWR_OFF_DLY (0x26)
316 #define PCIE2_RP_L1_PM_SUBSTATES_1_CYA_CLKREQ_ASSERTED_DLY_MASK (0x1FF << 13)
317 #define PCIE2_RP_L1_PM_SUBSTATES_1_CYA_CLKREQ_ASSERTED_DLY (0x27 << 13)
319 #define NV_PCIE2_RP_L1_PM_SUBSTATES_2_CYA 0x00000C08
320 #define PCIE2_RP_L1_PM_SUBSTATES_2_CYA_T_L1_2_DLY_MASK (0x1FFF)
321 #define PCIE2_RP_L1_PM_SUBSTATES_2_CYA_T_L1_2_DLY (0x4D)
322 #define PCIE2_RP_L1_PM_SUBSTATES_2_CYA_MICROSECOND_MASK (0xFF << 13)
323 #define PCIE2_RP_L1_PM_SUBSTATES_2_CYA_MICROSECOND (0x13 << 13)
324 #define PCIE2_RP_L1_PM_SUBSTATES_2_CYA_MICROSECOND_COMP_MASK (0xF << 21)
325 #define PCIE2_RP_L1_PM_SUBSTATES_2_CYA_MICROSECOND_COMP (0x2 << 21)
327 #define PCIE2_RP_L1_PM_SS_CONTROL 0x00000148
328 #define PCIE2_RP_L1_PM_SS_CONTROL_ASPM_L11_ENABLE 0x00000008
329 #define PCIE2_RP_L1_PM_SS_CONTROL_ASPM_L12_ENABLE 0x00000004
331 #define TEGRA_PCIE_MSELECT_CLK_204 204000000
332 #define TEGRA_PCIE_MSELECT_CLK_408 408000000
333 #define TEGRA_PCIE_XCLK_500 500000000
334 #define TEGRA_PCIE_XCLK_250 250000000
335 #define TEGRA_PCIE_EMC_CLK_102 102000000
336 #define TEGRA_PCIE_EMC_CLK_528 528000000
338 #define INT_PCI_MSI_NR (32 * 8)
341 #if DEBUG || defined(CONFIG_PCI_DEBUG)
342 #define PR_FUNC_LINE pr_info("PCIE: %s(%d)\n", __func__, __LINE__)
344 #define PR_FUNC_LINE do {} while (0)
347 /* Pinctrl configuration paramaters */
348 #if defined(CONFIG_ARCH_TEGRA_21x_SOC)
349 #define pinctrl_compatible "nvidia,tegra210-pinmux"
350 #define pin_pex_l0_clkreq "pex_l0_clkreq_n_pa1"
351 #define pin_pex_l1_clkreq "pex_l1_clkreq_n_pa4"
353 #define pinctrl_compatible "nvidia,tegra124-pinmux"
354 #define pin_pex_l0_clkreq "pex_l0_clkreq_n_pdd2"
355 #define pin_pex_l1_clkreq "pex_l1_clkreq_n_pdd6"
358 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
359 static struct of_device_id tegra_pcie_pd[] = {
360 { .compatible = "nvidia, tegra210-pcie-pd", },
361 { .compatible = "nvidia, tegra132-pcie-pd", },
362 { .compatible = "nvidia, tegra124-pcie-pd", },
367 #if defined(CONFIG_ARCH_TEGRA_21x_SOC)
368 static u32 rp_to_lane_map[2][4] = { {1, 2, 3, 4}, {0} };
371 struct tegra_pcie_soc_data {
372 unsigned int num_ports;
373 char **pcie_regulator_names;
374 int num_pcie_regulators;
378 struct msi_chip chip;
379 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
380 struct irq_domain *domain;
386 static inline struct tegra_msi *to_tegra_msi(struct msi_chip *chip)
388 return container_of(chip, struct tegra_msi, chip);
398 struct list_head buses;
399 struct list_head sys;
401 struct resource *afi_res;
402 struct resource *pads_res;
406 struct resource prefetch;
407 struct resource busn;
409 struct tegra_msi msi;
411 struct clk *pcie_xclk;
412 struct clk *pcie_afi;
413 struct clk *pcie_pcie;
414 struct clk *pcie_pll_e;
415 struct clk *pcie_mselect;
416 struct clk *pcie_emc;
418 struct list_head ports;
421 int power_rails_enabled;
422 int pcie_power_enabled;
423 struct work_struct hotplug_detect;
425 struct regulator **pcie_regulators;
427 struct tegra_pci_platform_data *plat_data;
428 struct tegra_pcie_soc_data *soc_data;
429 struct dentry *debugfs;
430 struct delayed_work detect_delay;
431 struct tegra_prod_list *prod_list;
434 struct tegra_pcie_port {
435 struct tegra_pcie *pcie;
436 struct list_head list;
437 struct resource regs;
441 int gpio_presence_detection;
442 bool disable_clock_request;
444 struct dentry *port_debugfs;
447 struct tegra_pcie_bus {
448 struct vm_struct *area;
449 struct list_head list;
453 /* used to avoid successive hotplug disconnect or connect */
454 static bool hotplug_event;
455 /* pcie mselect, xclk and emc rate */
456 static unsigned long tegra_pcie_mselect_rate = TEGRA_PCIE_MSELECT_CLK_204;
457 static unsigned long tegra_pcie_xclk_rate = TEGRA_PCIE_XCLK_250;
458 static unsigned long tegra_pcie_emc_rate = TEGRA_PCIE_EMC_CLK_102;
459 static u32 is_gen2_speed;
461 static u16 config_offset;
462 static u32 config_val;
463 static u16 config_aspm_state;
465 static inline struct tegra_pcie *sys_to_pcie(struct pci_sys_data *sys)
467 return sys->private_data;
470 static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
471 unsigned long offset)
473 writel(value, offset + pcie->afi);
476 static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
478 return readl(offset + pcie->afi);
481 static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
482 unsigned long offset)
484 writel(value, offset + pcie->pads);
487 static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
489 return readl(offset + pcie->pads);
492 static inline void rp_writel(struct tegra_pcie_port *port, u32 value,
493 unsigned long offset)
495 writel(value, offset + port->base);
498 static inline unsigned int rp_readl(struct tegra_pcie_port *port,
499 unsigned long offset)
501 return readl(offset + port->base);
505 * The configuration space mapping on Tegra is somewhat similar to the ECAM
506 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
507 * register accesses are mapped:
509 * [27:24] extended register number
511 * [15:11] device number
512 * [10: 8] function number
513 * [ 7: 0] register number
515 * Mapping the whole extended configuration space would required 256 MiB of
516 * virtual address space, only a small part of which will actually be used.
517 * To work around this, a 1 MiB of virtual addresses are allocated per bus
518 * when the bus is first accessed. When the physical range is mapped, the
519 * the bus number bits are hidden so that the extended register number bits
520 * appear as bits [19:16]. Therefore the virtual mapping looks like this:
522 * [19:16] extended register number
523 * [15:11] device number
524 * [10: 8] function number
525 * [ 7: 0] register number
527 * This is achieved by stitching together 16 chunks of 64 KiB of physical
528 * address space via the MMU.
530 static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
533 return ((where & 0xf00) << 8) | (PCI_SLOT(devfn) << 11) |
534 (PCI_FUNC(devfn) << 8) | (where & 0xfc);
537 static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
540 phys_addr_t cs = pcie->cs->start;
541 struct tegra_pcie_bus *bus;
545 pgprot_t prot = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN |
546 L_PTE_MT_DEV_SHARED | L_PTE_SHARED;
548 pgprot_t prot = PTE_PRESENT | PTE_YOUNG | PTE_DIRTY | PTE_XN |
549 PTE_SHARED | PTE_TYPE_PAGE;
550 (void)pgprot_dmacoherent(prot); /* L_PTE_MT_DEV_SHARED */
554 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
556 return ERR_PTR(-ENOMEM);
558 INIT_LIST_HEAD(&bus->list);
561 /* allocate 1 MiB of virtual addresses */
562 bus->area = get_vm_area(SZ_1M, VM_IOREMAP);
568 /* map each of the 16 chunks of 64 KiB each.
570 * Note that each chunk still needs to increment by 16 MiB in
573 for (i = 0; i < 16; i++) {
574 unsigned long virt = (unsigned long)bus->area->addr +
576 phys_addr_t phys = cs + i * SZ_16M + busnr * SZ_64K;
578 err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
580 dev_err(pcie->dev, "ioremap_page_range() failed: %d\n",
588 vunmap(bus->area->addr);
594 static void *tegra_pcie_bus_map(struct tegra_pcie *pcie,
597 struct tegra_pcie_bus *bus;
599 list_for_each_entry(bus, &pcie->buses, list)
600 if (bus->nr == busnr)
601 return bus->area->addr;
607 static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
611 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
612 void __iomem *addr = NULL;
614 if (bus->number == 0) {
615 unsigned int slot = PCI_SLOT(devfn);
616 struct tegra_pcie_port *port;
618 list_for_each_entry(port, &pcie->ports, list) {
619 if ((port->index + 1 == slot) && port->status) {
620 addr = port->base + (where & ~3);
625 addr = (void __iomem *)tegra_pcie_bus_map(pcie, bus->number);
628 "failed to map cfg. space for bus %u\n",
633 addr += tegra_pcie_conf_offset(devfn, where);
639 static int tegra_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
640 int where, int size, u32 *value)
644 addr = tegra_pcie_conf_address(bus, devfn, where);
647 return PCIBIOS_DEVICE_NOT_FOUND;
650 *value = readl(addr);
653 *value = (*value >> (8 * (where & 3))) & 0xff;
655 *value = (*value >> (8 * (where & 3))) & 0xffff;
657 return PCIBIOS_SUCCESSFUL;
660 static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
661 int where, int size, u32 value)
666 addr = tegra_pcie_conf_address(bus, devfn, where);
668 return PCIBIOS_DEVICE_NOT_FOUND;
672 return PCIBIOS_SUCCESSFUL;
676 mask = ~(0xffff << ((where & 0x3) * 8));
678 mask = ~(0xff << ((where & 0x3) * 8));
680 return PCIBIOS_BAD_REGISTER_NUMBER;
682 tmp = readl(addr) & mask;
683 tmp |= value << ((where & 0x3) * 8);
686 return PCIBIOS_SUCCESSFUL;
689 static struct pci_ops tegra_pcie_ops = {
690 .read = tegra_pcie_read_conf,
691 .write = tegra_pcie_write_conf,
694 static void tegra_pcie_fixup_bridge(struct pci_dev *dev)
698 if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) {
699 pci_read_config_word(dev, PCI_COMMAND, ®);
700 reg |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
701 PCI_COMMAND_MASTER | PCI_COMMAND_SERR);
702 pci_write_config_word(dev, PCI_COMMAND, reg);
705 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_fixup_bridge);
707 /* Tegra PCIE root complex wrongly reports device class */
708 static void tegra_pcie_fixup_class(struct pci_dev *dev)
710 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
713 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
714 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
716 /* Tegra PCIE requires relaxed ordering */
717 static void tegra_pcie_relax_enable(struct pci_dev *dev)
719 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
721 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
723 static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
725 struct tegra_pcie *pcie = sys_to_pcie(sys);
729 err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->mem);
733 err = devm_request_resource(pcie->dev, &iomem_resource,
736 devm_release_resource(pcie->dev, &pcie->mem);
740 pci_add_resource_offset(
741 &sys->resources, &pcie->mem, sys->mem_offset);
742 pci_add_resource_offset(
743 &sys->resources, &pcie->prefetch, sys->mem_offset);
744 pci_add_resource(&sys->resources, &pcie->busn);
746 pci_ioremap_io(nr * resource_size(&pcie->io), pcie->io.start);
751 static int tegra_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
753 struct tegra_pcie *pcie = sys_to_pcie(dev->bus->sysdata);
757 static void tegra_pcie_add_bus(struct pci_bus *bus)
759 struct tegra_pcie_bus *tbus;
760 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
763 /* bus 0 is root complex whose config space is already mapped */
766 if (IS_ENABLED(CONFIG_PCI_MSI))
767 bus->msi = &pcie->msi.chip;
769 /* Allocate memory for new bus */
770 tbus = tegra_pcie_bus_alloc(pcie, bus->number);
773 list_add_tail(&tbus->list, &pcie->buses);
776 static struct pci_bus *tegra_pcie_scan_bus(int nr,
777 struct pci_sys_data *sys)
779 struct tegra_pcie *pcie = sys_to_pcie(sys);
784 bus = pci_create_root_bus(pcie->dev, sys->busnr, &tegra_pcie_ops, sys,
789 pci_scan_child_bus(bus);
794 static void tegra_pcie_teardown(int nr, struct pci_sys_data *sys)
796 struct tegra_pcie *pcie = sys_to_pcie(sys);
797 pci_iounmap_io(nr * resource_size(&pcie->io));
800 static struct hw_pci tegra_pcie_hw = {
802 .setup = tegra_pcie_setup,
803 .scan = tegra_pcie_scan_bus,
804 .map_irq = tegra_pcie_map_irq,
805 .add_bus = tegra_pcie_add_bus,
806 .teardown = tegra_pcie_teardown,
809 #ifdef HOTPLUG_ON_SYSTEM_BOOT
810 /* It enumerates the devices when dock is connected after system boot */
811 /* this is similar to pcibios_init_hw in bios32.c */
812 static void __init tegra_pcie_hotplug_init(void)
814 struct pci_sys_data *sys = NULL;
817 if (is_dock_conn_at_boot)
821 tegra_pcie_preinit();
822 for (nr = 0; nr < tegra_pcie_hw.nr_controllers; nr++) {
823 sys = kzalloc(sizeof(struct pci_sys_data), GFP_KERNEL);
825 panic("PCI: unable to allocate sys data!");
827 #ifdef CONFIG_PCI_DOMAINS
828 sys->domain = tegra_pcie_hw.domain;
831 sys->swizzle = tegra_pcie_hw.swizzle;
832 sys->map_irq = tegra_pcie_hw.map_irq;
833 INIT_LIST_HEAD(&sys->resources);
835 ret = tegra_pcie_setup(nr, sys);
837 if (list_empty(&sys->resources)) {
838 pci_add_resource_offset(&sys->resources,
839 &ioport_resource, sys->io_offset);
840 pci_add_resource_offset(&sys->resources,
841 &iomem_resource, sys->mem_offset);
843 pci_create_root_bus(NULL, nr, &tegra_pcie_ops,
844 sys, &sys->resources);
847 is_dock_conn_at_boot = true;
851 static void tegra_pcie_enable_aer(struct tegra_pcie_port *port, bool enable)
856 data = rp_readl(port, NV_PCIE2_RP_VEND_CTL1);
858 data |= PCIE2_RP_VEND_CTL1_ERPT;
860 data &= ~PCIE2_RP_VEND_CTL1_ERPT;
861 rp_writel(port, data, NV_PCIE2_RP_VEND_CTL1);
864 static int tegra_pcie_attach(struct tegra_pcie *pcie)
866 struct pci_bus *bus = NULL;
867 struct tegra_pcie_port *port;
873 /* rescan and recreate all pcie data structures */
874 while ((bus = pci_find_next_bus(bus)) != NULL)
876 /* unhide AER capability */
877 list_for_each_entry(port, &pcie->ports, list)
879 tegra_pcie_enable_aer(port, true);
881 hotplug_event = false;
885 static int tegra_pcie_detach(struct tegra_pcie *pcie)
887 struct pci_dev *pdev = NULL;
888 struct tegra_pcie_port *port;
893 hotplug_event = true;
895 /* hide AER capability to avoid log spew */
896 list_for_each_entry(port, &pcie->ports, list)
898 tegra_pcie_enable_aer(port, false);
900 /* remove all pcie data structures */
901 for_each_pci_dev(pdev) {
902 pci_stop_and_remove_bus_device(pdev);
908 static void tegra_pcie_prsnt_map_override(struct tegra_pcie_port *port,
914 /* currently only hotplug on root port 0 supported */
915 data = rp_readl(port, NV_PCIE2_RP_PRIV_MISC);
916 data &= ~PCIE2_RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
918 data |= PCIE2_RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
920 data |= PCIE2_RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
921 rp_writel(port, data, NV_PCIE2_RP_PRIV_MISC);
924 static void work_hotplug_handler(struct work_struct *work)
926 struct tegra_pcie *pcie_driver =
927 container_of(work, struct tegra_pcie, hotplug_detect);
931 if (pcie_driver->plat_data->gpio_hot_plug == -1)
933 val = gpio_get_value(pcie_driver->plat_data->gpio_hot_plug);
935 dev_info(pcie_driver->dev, "PCIE Hotplug: Connected\n");
936 tegra_pcie_attach(pcie_driver);
938 dev_info(pcie_driver->dev, "PCIE Hotplug: DisConnected\n");
939 tegra_pcie_detach(pcie_driver);
943 static irqreturn_t gpio_pcie_detect_isr(int irq, void *arg)
945 struct tegra_pcie *pcie = arg;
947 schedule_work(&pcie->hotplug_detect);
951 static void handle_sb_intr(struct tegra_pcie *pcie)
956 mesg = afi_readl(pcie, AFI_MSG_0);
957 if (mesg & AFI_MSG_INTX_MASK)
958 /* notify device isr for INTx messages from pcie devices */
960 "Legacy INTx interrupt occurred %x\n", mesg);
961 else if (mesg & AFI_MSG_PM_PME_MASK) {
962 struct tegra_pcie_port *port, *tmp;
963 /* handle PME messages */
964 list_for_each_entry_safe(port, tmp, &pcie->ports, list)
965 if (port->index == (mesg & AFI_MSG_PM_PME0))
967 mesg = rp_readl(port, NV_PCIE2_RP_RSR);
968 mesg |= NV_PCIE2_RP_RSR_PMESTAT;
969 rp_writel(port, mesg, NV_PCIE2_RP_RSR);
971 afi_writel(pcie, mesg, AFI_MSG_0);
974 static irqreturn_t tegra_pcie_isr(int irq, void *arg)
976 const char *err_msg[] = {
984 "Response decoding error",
985 "AXI response decoding error",
986 "Transcation timeout",
988 "Slot Clock request change",
989 "TMS Clock clamp change",
991 "Peer to Peer error",
993 struct tegra_pcie *pcie = arg;
997 code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
998 signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
1000 if (code == AFI_INTR_LEGACY)
1001 handle_sb_intr(pcie);
1002 afi_writel(pcie, 0, AFI_INTR_CODE);
1004 if (code >= ARRAY_SIZE(err_msg))
1008 * do not pollute kernel log with master abort reports since they
1009 * happen a lot during enumeration
1011 if (code == AFI_INTR_MASTER_ABORT)
1012 pr_debug("PCIE: %s, signature: %08x\n",
1013 err_msg[code], signature);
1014 else if ((code != AFI_INTR_LEGACY) && (code != AFI_INTR_PRSNT_SENSE))
1015 dev_err(pcie->dev, "PCIE: %s, signature: %08x\n",
1016 err_msg[code], signature);
1022 * FPCI map is as follows:
1023 * - 0xfdfc000000: I/O space
1024 * - 0xfdfe000000: type 0 configuration space
1025 * - 0xfdff000000: type 1 configuration space
1026 * - 0xfe00000000: type 0 extended configuration space
1027 * - 0xfe10000000: type 1 extended configuration space
1029 static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
1031 u32 fpci_bar, size, axi_address;
1033 /* Bar 0: type 1 extended configuration space */
1034 fpci_bar = 0xfe100000;
1035 size = resource_size(pcie->cs);
1036 axi_address = pcie->cs->start;
1037 afi_writel(pcie, axi_address, AFI_AXI_BAR0_START);
1038 afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
1039 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR0);
1041 /* Bar 1: downstream IO bar */
1042 fpci_bar = 0xfdfc0000;
1043 size = resource_size(&pcie->io);
1044 axi_address = pcie->io.start;
1045 afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
1046 afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
1047 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
1049 /* Bar 2: prefetchable memory BAR */
1050 fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
1051 size = resource_size(&pcie->prefetch);
1052 axi_address = pcie->prefetch.start;
1053 afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
1054 afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
1055 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
1057 /* Bar 3: non prefetchable memory BAR */
1058 fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
1059 size = resource_size(&pcie->mem);
1060 axi_address = pcie->mem.start;
1061 afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
1062 afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
1063 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
1065 /* NULL out the remaining BARs as they are not used */
1066 afi_writel(pcie, 0, AFI_AXI_BAR4_START);
1067 afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
1068 afi_writel(pcie, 0, AFI_FPCI_BAR4);
1070 afi_writel(pcie, 0, AFI_AXI_BAR5_START);
1071 afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
1072 afi_writel(pcie, 0, AFI_FPCI_BAR5);
1074 /* map all upstream transactions as uncached */
1075 afi_writel(pcie, PHYS_OFFSET, AFI_CACHE_BAR0_ST);
1076 afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
1077 afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
1078 afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
1080 /* MSI translations are setup only when needed */
1081 afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
1082 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
1083 afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
1084 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
1087 static int tegra_pcie_enable_pads(struct tegra_pcie *pcie, bool enable)
1094 if (pex_usb_pad_pll_reset_deassert())
1095 dev_err(pcie->dev, "failed to deassert pex pll\n");
1098 if (!tegra_platform_is_fpga()) {
1099 /* PCIe pad programming done in shared XUSB_PADCTL space */
1100 err = pcie_phy_pad_enable(enable,
1101 pcie->plat_data->lane_map);
1104 "%s unable to initalize pads\n", __func__);
1107 if (!enable || err) {
1108 if (pex_usb_pad_pll_reset_assert())
1109 dev_err(pcie->dev, "failed to assert pex pll\n");
1115 static void tegra_pcie_enable_wrap(void)
1117 #if defined(CONFIG_ARCH_TEGRA_21x_SOC)
1119 void __iomem *msel_base;
1122 #define MSELECT_CONFIG_BASE 0x50060000
1123 #define MSELECT_CONFIG_WRAP_TO_INCR_SLAVE1 BIT(28)
1124 #define MSELECT_CONFIG_ERR_RESP_EN_SLAVE1 BIT(24)
1126 /* Config MSELECT to support wrap trans for normal NC & GRE mapping */
1127 msel_base = ioremap(MSELECT_CONFIG_BASE, 4);
1128 val = readl(msel_base);
1129 /* Enable WRAP_TO_INCR_SLAVE1 */
1130 val |= MSELECT_CONFIG_WRAP_TO_INCR_SLAVE1;
1131 /* Disable ERR_RESP_EN_SLAVE1 */
1132 val &= ~MSELECT_CONFIG_ERR_RESP_EN_SLAVE1;
1133 writel(val, msel_base);
1138 static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
1141 struct tegra_pcie_port *port, *tmp;
1144 tegra_pcie_enable_wrap();
1145 /* Enable PLL power down */
1146 val = afi_readl(pcie, AFI_PLLE_CONTROL);
1147 val &= ~AFI_PLLE_CONTROL_BYPASS_PCIE2PLLE_CONTROL;
1148 val &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
1149 val |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
1150 val |= AFI_PLLE_CONTROL_PCIE2PLLE_CONTROL_EN;
1151 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
1152 if (port->disable_clock_request) {
1153 val &= ~AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
1157 afi_writel(pcie, val, AFI_PLLE_CONTROL);
1159 afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
1161 /* Enable all PCIE controller and */
1162 /* system management configuration of PCIE crossbar */
1163 val = afi_readl(pcie, AFI_PCIE_CONFIG);
1164 val &= ~AFI_PCIE_CONFIG_PCIEC0_DISABLE_DEVICE;
1165 if (tegra_platform_is_fpga()) {
1166 /* FPGA supports only x2_x1 bar config */
1167 val &= ~AFI_PCIE_CONFIG_XBAR_CONFIG_MASK;
1168 val |= AFI_PCIE_CONFIG_XBAR_CONFIG_X2_X1;
1170 if (pcie->plat_data->lane_map & PCIE_LANES_X0_X1)
1171 val &= ~AFI_PCIE_CONFIG_PCIEC1_DISABLE_DEVICE;
1172 #if !defined(CONFIG_ARCH_TEGRA_21x_SOC)
1173 val &= ~AFI_PCIE_CONFIG_XBAR_CONFIG_MASK;
1174 if (pcie->plat_data->lane_map & PCIE_LANES_X4_X0)
1175 val |= AFI_PCIE_CONFIG_XBAR_CONFIG_X4_X1;
1178 afi_writel(pcie, val, AFI_PCIE_CONFIG);
1180 /* Enable Gen 2 capability of PCIE */
1181 val = afi_readl(pcie, AFI_FUSE) & ~AFI_FUSE_PCIE_T0_GEN2_DIS;
1182 afi_writel(pcie, val, AFI_FUSE);
1184 /* Finally enable PCIe */
1185 val = afi_readl(pcie, AFI_CONFIGURATION);
1186 val |= AFI_CONFIGURATION_EN_FPCI;
1187 afi_writel(pcie, val, AFI_CONFIGURATION);
1189 val = (AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
1190 AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
1191 AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR |
1192 AFI_INTR_EN_AXI_DECERR | AFI_INTR_EN_PRSNT_SENSE);
1193 afi_writel(pcie, val, AFI_AFI_INTR_ENABLE);
1194 afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
1196 /* FIXME: No MSI for now, only INT */
1197 afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
1199 /* Disable all execptions */
1200 afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
1205 static int tegra_pcie_enable_regulators(struct tegra_pcie *pcie)
1209 if (pcie->power_rails_enabled) {
1210 dev_info(pcie->dev, "PCIE: Already power rails enabled\n");
1213 pcie->power_rails_enabled = 1;
1214 dev_info(pcie->dev, "PCIE: Enable power rails\n");
1216 for (i = 0; i < pcie->soc_data->num_pcie_regulators; i++) {
1217 if (pcie->pcie_regulators[i])
1218 if (regulator_enable(pcie->pcie_regulators[i]))
1219 dev_err(pcie->dev, "%s: can't enable regulator %s\n",
1221 pcie->soc_data->pcie_regulator_names[i]);
1228 static int tegra_pcie_disable_regulators(struct tegra_pcie *pcie)
1232 if (pcie->power_rails_enabled == 0) {
1233 dev_info(pcie->dev, "PCIE: Already power rails disabled\n");
1236 dev_info(pcie->dev, "PCIE: Disable power rails\n");
1238 for (i = 0; i < pcie->soc_data->num_pcie_regulators; i++) {
1239 if (pcie->pcie_regulators[i] != NULL)
1240 if (regulator_disable(pcie->pcie_regulators[i]))
1241 dev_err(pcie->dev, "%s: can't disable regulator %s\n",
1243 pcie->soc_data->pcie_regulator_names[i]);
1246 pcie->power_rails_enabled = 0;
1251 static int tegra_pcie_power_ungate(struct tegra_pcie *pcie)
1257 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1258 partition_id = tegra_pd_get_powergate_id(tegra_pcie_pd);
1259 if (partition_id < 0)
1262 partition_id = TEGRA_POWERGATE_PCIE;
1265 err = clk_prepare_enable(pcie->pcie_pll_e);
1267 dev_err(pcie->dev, "PCIE: PLLE clk enable failed: %d\n", err);
1271 err = tegra_unpowergate_partition_with_clk_on(partition_id);
1273 dev_err(pcie->dev, "PCIE: powerup sequence failed: %d\n", err);
1277 err = clk_prepare_enable(pcie->pcie_mselect);
1279 dev_err(pcie->dev, "PCIE: mselect clk enable failed: %d\n",
1283 err = clk_enable(pcie->pcie_xclk);
1285 dev_err(pcie->dev, "PCIE: pciex clk enable failed: %d\n", err);
1288 err = clk_prepare_enable(pcie->pcie_emc);
1290 dev_err(pcie->dev, "PCIE: emc clk enable failed: %d\n", err);
1297 static int tegra_pcie_map_resources(struct tegra_pcie *pcie)
1299 struct platform_device *pdev = to_platform_device(pcie->dev);
1300 struct resource *pads, *afi, *res;
1303 pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
1305 pcie->pads_res = __devm_request_region(&pdev->dev, &iomem_resource,
1306 pads->start, resource_size(pads),
1309 if (!pcie->pads_res) {
1311 "PCIE: Failed to request region for pad registers\n");
1315 pcie->pads = devm_ioremap_nocache(&pdev->dev, pads->start,
1316 resource_size(pads));
1317 if (!(pcie->pads)) {
1318 dev_err(pcie->dev, "PCIE: Failed to map PAD registers\n");
1319 return -EADDRNOTAVAIL;
1322 afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
1324 pcie->afi_res = __devm_request_region(&pdev->dev, &iomem_resource,
1325 afi->start, resource_size(afi),
1328 if (!pcie->afi_res) {
1330 "PCIE: Failed to request region for afi registers\n");
1334 pcie->afi = devm_ioremap_nocache(&pdev->dev, afi->start,
1335 resource_size(afi));
1337 dev_err(pcie->dev, "PCIE: Failed to map AFI registers\n");
1338 return -EADDRNOTAVAIL;
1341 /* request configuration space, but remap later, on demand */
1342 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1344 pcie->cs = __devm_request_region(&pdev->dev, &iomem_resource,
1345 res->start, resource_size(res), "pcie-config-space");
1347 dev_err(&pdev->dev, "PCIE: Failed to request region for CS registers\n");
1354 static void tegra_pcie_unmap_resources(struct tegra_pcie *pcie)
1356 struct platform_device *pdev = to_platform_device(pcie->dev);
1361 __devm_release_region(&pdev->dev, &iomem_resource,
1363 resource_size(pcie->cs));
1365 __devm_release_region(&pdev->dev, &iomem_resource,
1366 pcie->afi_res->start,
1367 resource_size(pcie->afi_res));
1369 __devm_release_region(&pdev->dev, &iomem_resource,
1370 pcie->pads_res->start,
1371 resource_size(pcie->pads_res));
1374 devm_iounmap(&pdev->dev, pcie->pads);
1378 devm_iounmap(&pdev->dev, pcie->afi);
1383 static int tegra_pcie_fpga_phy_init(struct tegra_pcie *pcie)
1385 #define FPGA_GEN2_SPEED_SUPPORT 0x90000001
1386 struct tegra_pcie_port *port;
1389 /* Do reset for FPGA pcie phy */
1390 afi_writel(pcie, AFI_WR_SCRATCH_0_RESET_VAL, AFI_WR_SCRATCH_0);
1392 afi_writel(pcie, AFI_WR_SCRATCH_0_DEFAULT_VAL, AFI_WR_SCRATCH_0);
1394 afi_writel(pcie, AFI_WR_SCRATCH_0_RESET_VAL, AFI_WR_SCRATCH_0);
1396 /* required for gen2 speed support on FPGA */
1397 list_for_each_entry(port, &pcie->ports, list)
1399 FPGA_GEN2_SPEED_SUPPORT, NV_PCIE2_RP_VEND_XP_BIST);
1404 static void tegra_pcie_pme_turnoff(struct tegra_pcie *pcie)
1409 if (tegra_platform_is_fpga())
1411 data = afi_readl(pcie, AFI_PCIE_PME);
1412 data |= AFI_PCIE_PME_TURN_OFF;
1413 afi_writel(pcie, data, AFI_PCIE_PME);
1415 data = afi_readl(pcie, AFI_PCIE_PME);
1416 } while (!(data & AFI_PCIE_PME_ACK));
1418 /* Required for PLL power down */
1419 data = afi_readl(pcie, AFI_PLLE_CONTROL);
1420 data |= AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
1421 afi_writel(pcie, data, AFI_PLLE_CONTROL);
1424 static struct tegra_io_dpd pexbias_io = {
1426 .io_dpd_reg_index = 0,
1429 static struct tegra_io_dpd pexclk1_io = {
1431 .io_dpd_reg_index = 0,
1434 static struct tegra_io_dpd pexclk2_io = {
1436 .io_dpd_reg_index = 0,
1439 static int tegra_pcie_power_on(struct tegra_pcie *pcie)
1444 if (pcie->pcie_power_enabled) {
1445 dev_info(pcie->dev, "PCIE: Already powered on");
1448 pcie->pcie_power_enabled = 1;
1449 pm_runtime_get_sync(pcie->dev);
1451 if (!tegra_platform_is_fpga()) {
1452 /* disable PEX IOs DPD mode to turn on pcie */
1453 tegra_io_dpd_disable(&pexbias_io);
1454 tegra_io_dpd_disable(&pexclk1_io);
1455 tegra_io_dpd_disable(&pexclk2_io);
1457 err = tegra_pcie_map_resources(pcie);
1459 dev_err(pcie->dev, "PCIE: Failed to map resources\n");
1460 goto err_map_resource;
1462 err = tegra_pcie_power_ungate(pcie);
1464 dev_err(pcie->dev, "PCIE: Failed to power ungate\n");
1465 goto err_power_ungate;
1467 if (tegra_platform_is_fpga()) {
1468 err = tegra_pcie_fpga_phy_init(pcie);
1470 dev_err(pcie->dev, "PCIE: Failed to initialize FPGA Phy\n");
1474 tegra_pcie_unmap_resources(pcie);
1476 if (!tegra_platform_is_fpga()) {
1477 /* put PEX pads into DPD mode to save additional power */
1478 tegra_io_dpd_enable(&pexbias_io);
1479 tegra_io_dpd_enable(&pexclk1_io);
1480 tegra_io_dpd_enable(&pexclk2_io);
1482 pm_runtime_put(pcie->dev);
1483 pcie->pcie_power_enabled = 0;
1488 static int tegra_pcie_power_off(struct tegra_pcie *pcie, bool all)
1491 struct tegra_pcie_port *port;
1495 if (pcie->pcie_power_enabled == 0) {
1496 dev_info(pcie->dev, "PCIE: Already powered off");
1500 list_for_each_entry(port, &pcie->ports, list) {
1501 tegra_pcie_prsnt_map_override(port, false);
1503 tegra_pcie_pme_turnoff(pcie);
1504 tegra_pcie_enable_pads(pcie, false);
1506 tegra_pcie_unmap_resources(pcie);
1507 if (pcie->pcie_mselect)
1508 clk_disable(pcie->pcie_mselect);
1509 if (pcie->pcie_xclk)
1510 clk_disable(pcie->pcie_xclk);
1512 clk_disable(pcie->pcie_emc);
1513 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1514 partition_id = tegra_pd_get_powergate_id(tegra_pcie_pd);
1515 if (partition_id < 0)
1518 partition_id = TEGRA_POWERGATE_PCIE;
1520 err = tegra_powergate_partition_with_clk_off(partition_id);
1524 if (pcie->pcie_pll_e)
1525 clk_disable(pcie->pcie_pll_e);
1527 if (!tegra_platform_is_fpga()) {
1528 /* put PEX pads into DPD mode to save additional power */
1529 tegra_io_dpd_enable(&pexbias_io);
1530 tegra_io_dpd_enable(&pexclk1_io);
1531 tegra_io_dpd_enable(&pexclk2_io);
1533 pm_runtime_put(pcie->dev);
1535 pcie->pcie_power_enabled = 0;
1540 static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
1543 /* get the PCIEXCLK */
1544 pcie->pcie_xclk = clk_get_sys("tegra_pcie", "pciex");
1545 if (IS_ERR_OR_NULL(pcie->pcie_xclk)) {
1546 dev_err(pcie->dev, "unable to get PCIE Xclock\n");
1549 pcie->pcie_afi = clk_get_sys("tegra_pcie", "afi");
1550 if (IS_ERR_OR_NULL(pcie->pcie_afi)) {
1551 clk_put(pcie->pcie_xclk);
1552 dev_err(pcie->dev, "unable to get PCIE afi clock\n");
1555 pcie->pcie_pcie = clk_get_sys("tegra_pcie", "pcie");
1556 if (IS_ERR_OR_NULL(pcie->pcie_pcie)) {
1557 clk_put(pcie->pcie_afi);
1558 clk_put(pcie->pcie_xclk);
1559 dev_err(pcie->dev, "unable to get PCIE pcie clock\n");
1563 pcie->pcie_pll_e = clk_get_sys("tegra_pcie", "pll_e");
1564 if (IS_ERR_OR_NULL(pcie->pcie_pll_e)) {
1565 clk_put(pcie->pcie_afi);
1566 clk_put(pcie->pcie_xclk);
1567 clk_put(pcie->pcie_pcie);
1568 dev_err(pcie->dev, "unable to get PCIE PLLE clock\n");
1572 pcie->pcie_mselect = clk_get_sys("tegra_pcie", "mselect");
1573 if (IS_ERR_OR_NULL(pcie->pcie_mselect)) {
1574 clk_put(pcie->pcie_pll_e);
1575 clk_put(pcie->pcie_pcie);
1576 clk_put(pcie->pcie_afi);
1577 clk_put(pcie->pcie_xclk);
1579 "unable to get PCIE mselect clock\n");
1582 pcie->pcie_emc = clk_get_sys("tegra_pcie", "emc");
1583 if (IS_ERR_OR_NULL(pcie->pcie_emc)) {
1585 "unable to get PCIE emc clock\n");
1591 static void tegra_pcie_clocks_put(struct tegra_pcie *pcie)
1594 if (pcie->pcie_xclk)
1595 clk_put(pcie->pcie_xclk);
1596 if (pcie->pcie_pcie)
1597 clk_put(pcie->pcie_pcie);
1599 clk_put(pcie->pcie_afi);
1600 if (pcie->pcie_mselect)
1601 clk_put(pcie->pcie_mselect);
1602 if (pcie->pcie_pll_e)
1603 clk_put(pcie->pcie_pll_e);
1604 if (pcie->pcie_mselect)
1605 clk_put(pcie->pcie_mselect);
1607 clk_put(pcie->pcie_emc);
1610 static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1612 struct platform_device *pdev = to_platform_device(pcie->dev);
1616 pcie->power_rails_enabled = 0;
1617 pcie->pcie_power_enabled = 0;
1619 err = tegra_pcie_clocks_get(pcie);
1621 dev_err(pcie->dev, "PCIE: failed to get clocks: %d\n", err);
1625 err = tegra_pcie_enable_regulators(pcie);
1627 dev_err(pcie->dev, "PCIE: Failed to enable regulators\n");
1628 goto err_enable_reg;
1630 err = tegra_pcie_power_on(pcie);
1632 dev_err(pcie->dev, "PCIE: Failed to power on: %d\n", err);
1636 err = clk_set_rate(pcie->pcie_mselect, tegra_pcie_mselect_rate);
1639 "PCIE: Failed to set mselect rate: %d\n", err);
1643 err = clk_set_rate(pcie->pcie_xclk, tegra_pcie_xclk_rate);
1645 dev_err(pcie->dev, "PCIE: Failed to set xclk rate: %d\n", err);
1649 err = clk_set_rate(pcie->pcie_emc, tegra_pcie_emc_rate);
1653 err = platform_get_irq_byname(pdev, "intr");
1655 dev_err(pcie->dev, "failed to get IRQ: %d\n", err);
1661 err = devm_request_irq(&pdev->dev, pcie->irq, tegra_pcie_isr,
1662 IRQF_SHARED, "PCIE", pcie);
1664 dev_err(pcie->dev, "PCIE: Failed to register IRQ: %d\n", err);
1667 set_irq_flags(pcie->irq, IRQF_VALID);
1672 tegra_pcie_power_off(pcie, true);
1674 tegra_pcie_disable_regulators(pcie);
1676 tegra_pcie_clocks_put(pcie);
1681 static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
1683 unsigned long ret = 0;
1685 switch (port->index) {
1687 ret = AFI_PEX0_CTRL;
1691 ret = AFI_PEX1_CTRL;
1698 static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
1700 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
1701 unsigned long value;
1705 /* pulse reset signal */
1706 value = afi_readl(port->pcie, ctrl);
1707 value &= ~AFI_PEX_CTRL_RST;
1708 afi_writel(port->pcie, value, ctrl);
1710 usleep_range(1000, 2000);
1712 value = afi_readl(port->pcie, ctrl);
1713 value |= AFI_PEX_CTRL_RST;
1714 afi_writel(port->pcie, value, ctrl);
1717 static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
1719 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
1720 unsigned long value;
1724 /* enable reference clock. Enable SW override so as to allow device
1725 to get enumerated. SW override will be removed after enumeration
1727 value = afi_readl(port->pcie, ctrl);
1728 value |= (AFI_PEX_CTRL_REFCLK_EN | AFI_PEX_CTRL_OVERRIDE_EN);
1729 /* t124 doesn't support pll power down due to RTL bug and some */
1730 /* platforms don't support clkreq, both needs to disable clkreq and */
1731 /* enable refclk override to have refclk always ON independent of EP */
1732 if (port->disable_clock_request)
1733 value |= AFI_PEX_CTRL_CLKREQ_EN;
1735 value &= ~AFI_PEX_CTRL_CLKREQ_EN;
1737 afi_writel(port->pcie, value, ctrl);
1739 tegra_pcie_port_reset(port);
1742 static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
1747 data = afi_readl(port->pcie, AFI_PCIE_CONFIG);
1749 data |= AFI_PCIE_CONFIG_PCIEC1_DISABLE_DEVICE;
1751 data |= AFI_PCIE_CONFIG_PCIEC0_DISABLE_DEVICE;
1752 afi_writel(port->pcie, data, AFI_PCIE_CONFIG);
1755 #if defined(CONFIG_ARCH_TEGRA_21x_SOC)
1756 static bool get_rdet_status(u32 index)
1760 for (i = 0; i < ARRAY_SIZE(rp_to_lane_map[index]); i++)
1761 flag |= tegra_phy_get_lane_rdet(rp_to_lane_map[index][i]);
1767 * FIXME: If there are no PCIe cards attached, then calling this function
1768 * can result in the increase of the bootup time as there are big timeout
1771 #define TEGRA_PCIE_LINKUP_TIMEOUT 350 /* up to 350 ms */
1772 static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
1774 unsigned int retries = 3;
1775 unsigned long value;
1778 #if defined(CONFIG_ARCH_TEGRA_21x_SOC)
1779 if (!get_rdet_status(port->index))
1783 unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1786 value = readl(port->base + RP_LINK_CONTROL_STATUS);
1787 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
1789 usleep_range(1000, 2000);
1790 } while (--timeout);
1791 dev_info(port->pcie->dev, "link %u down, retrying\n",
1793 tegra_pcie_port_reset(port);
1794 } while (--retries);
1799 #if defined(CONFIG_ARCH_TEGRA_21x_SOC)
1800 static bool t210_war;
1802 static void tegra_pcie_apply_sw_war(struct tegra_pcie_port *port,
1806 #if defined(CONFIG_ARCH_TEGRA_21x_SOC)
1807 struct tegra_pcie *pcie = port->pcie;
1809 struct pci_dev *pdev = NULL;
1812 #if defined(CONFIG_ARCH_TEGRA_21x_SOC)
1813 /* T210 WAR for perf bugs required when LPDDR4 */
1814 /* memory is used with both ctlrs in X4_X1 config */
1815 if (pcie->plat_data->has_memtype_lpddr4 &&
1816 (pcie->plat_data->lane_map == PCIE_LANES_X4_X1) &&
1817 (pcie->num_ports == pcie->soc_data->num_ports))
1821 /* disable msi for port driver to avoid panic */
1822 for_each_pci_dev(pdev)
1823 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)
1824 pdev->msi_enabled = 0;
1827 /* Some of the old PCIe end points don't get enumerated
1828 * if RP advertises both Gen-1 and Gen-2 speeds. Hence, the
1829 * strategy followed here is to initially advertise only
1830 * Gen-1 and after link is up, check end point's capability
1831 * for Gen-2 and retrain link to Gen-2 speed
1833 data = rp_readl(port, RP_LINK_CONTROL_STATUS_2);
1834 data &= ~RP_LINK_CONTROL_STATUS_2_TRGT_LNK_SPD_MASK;
1835 data |= RP_LINK_CONTROL_STATUS_2_TRGT_LNK_SPD_GEN1;
1836 rp_writel(port, data, RP_LINK_CONTROL_STATUS_2);
1838 /* Avoid warning during enumeration for invalid IRQ of RP */
1839 data = rp_readl(port, NV_PCIE2_RP_INTR_BCR);
1840 data |= NV_PCIE2_RP_INTR_BCR_INTR_LINE;
1841 rp_writel(port, data, NV_PCIE2_RP_INTR_BCR);
1842 #if defined(CONFIG_ARCH_TEGRA_21x_SOC)
1843 /* resize buffers for better perf, bug#1447522 */
1845 struct tegra_pcie_port *temp_port;
1846 list_for_each_entry(temp_port, &pcie->ports, list) {
1847 data = rp_readl(temp_port,
1848 NV_PCIE2_RP_XP_CTL_1);
1849 data |= PCIE2_RP_XP_CTL_1_SPARE_BIT29;
1850 rp_writel(temp_port, data,
1851 NV_PCIE2_RP_XP_CTL_1);
1853 data = rp_readl(temp_port,
1854 NV_PCIE2_RP_TX_HDR_LIMIT);
1855 if (temp_port->index)
1856 data |= PCIE2_RP_TX_HDR_LIMIT_NPT_1;
1858 data |= PCIE2_RP_TX_HDR_LIMIT_NPT_0;
1859 rp_writel(temp_port, data,
1860 NV_PCIE2_RP_TX_HDR_LIMIT);
1863 /* Bug#1461732 WAR, set clkreq asserted delay greater than */
1864 /* power off time (2us) to avoid RP wakeup in L1.2_ENTRY */
1865 data = rp_readl(port, NV_PCIE2_RP_L1_PM_SUBSTATES_1_CYA);
1866 data &= ~PCIE2_RP_L1_PM_SUBSTATES_1_CYA_CLKREQ_ASSERTED_DLY_MASK;
1867 data |= PCIE2_RP_L1_PM_SUBSTATES_1_CYA_CLKREQ_ASSERTED_DLY;
1868 rp_writel(port, data, NV_PCIE2_RP_L1_PM_SUBSTATES_1_CYA);
1870 /* take care of link speed change error in corner cases */
1871 data = rp_readl(port, NV_PCIE2_RP_VEND_CTL0);
1872 data &= ~PCIE2_RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK;
1873 data |= PCIE2_RP_VEND_CTL0_DSK_RST_PULSE_WIDTH;
1874 rp_writel(port, data, NV_PCIE2_RP_VEND_CTL0);
1876 data = rp_readl(port, NV_PCIE2_RP_VEND_XP_PAD_PWRDN);
1877 data |= NV_PCIE2_RP_VEND_XP_PAD_PWRDN_DISABLED_EN;
1878 data |= NV_PCIE2_RP_VEND_XP_PAD_PWRDN_DYNAMIC_EN;
1879 data |= NV_PCIE2_RP_VEND_XP_PAD_PWRDN_L1_EN;
1880 data |= NV_PCIE2_RP_VEND_XP_PAD_PWRDN_L1_CLKREQ_EN;
1881 data |= NV_PCIE2_RP_VEND_XP_PAD_PWRDN_SLEEP_MODE_DYNAMIC_L1PP;
1882 data |= NV_PCIE2_RP_VEND_XP_PAD_PWRDN_SLEEP_MODE_L1_L1PP;
1883 data |= NV_PCIE2_RP_VEND_XP_PAD_PWRDN_SLEEP_MODE_L1_CLKREQ_L1PP;
1884 rp_writel(port, data, NV_PCIE2_RP_VEND_XP_PAD_PWRDN);
1886 /* Do timer settings only if clk25m freq equal to 19.2 MHz */
1887 if (clk_get_rate(clk_get_sys(NULL, "clk_m")) != 19200000)
1889 data = rp_readl(port, NV_PCIE2_RP_TIMEOUT0);
1890 data &= ~PCIE2_RP_TIMEOUT0_PAD_PWRUP_MASK;
1891 data |= PCIE2_RP_TIMEOUT0_PAD_PWRUP;
1892 data &= ~PCIE2_RP_TIMEOUT0_PAD_PWRUP_CM_MASK;
1893 data |= PCIE2_RP_TIMEOUT0_PAD_PWRUP_CM;
1894 data &= ~PCIE2_RP_TIMEOUT0_PAD_SPDCHNG_GEN2_MASK;
1895 data |= PCIE2_RP_TIMEOUT0_PAD_SPDCHNG_GEN2;
1896 rp_writel(port, data, NV_PCIE2_RP_TIMEOUT0);
1898 data = rp_readl(port, NV_PCIE2_RP_TIMEOUT1);
1899 data &= ~PCIE2_RP_TIMEOUT1_RCVRY_SPD_SUCCESS_EIDLE_MASK;
1900 data |= PCIE2_RP_TIMEOUT1_RCVRY_SPD_SUCCESS_EIDLE;
1901 data &= ~PCIE2_RP_TIMEOUT1_RCVRY_SPD_UNSUCCESS_EIDLE_MASK;
1902 data |= PCIE2_RP_TIMEOUT1_RCVRY_SPD_UNSUCCESS_EIDLE;
1903 rp_writel(port, data, NV_PCIE2_RP_TIMEOUT1);
1905 data = rp_readl(port, NV_PCIE2_RP_XP_REF);
1906 data &= ~PCIE2_RP_XP_REF_MICROSECOND_LIMIT_MASK;
1907 data |= PCIE2_RP_XP_REF_MICROSECOND_LIMIT;
1908 data |= PCIE2_RP_XP_REF_MICROSECOND_ENABLE;
1909 data |= PCIE2_RP_XP_REF_CPL_TO_OVERRIDE;
1910 data &= ~PCIE2_RP_XP_REF_CPL_TO_CUSTOM_VALUE_MASK;
1911 data |= PCIE2_RP_XP_REF_CPL_TO_CUSTOM_VALUE;
1912 rp_writel(port, data, NV_PCIE2_RP_XP_REF);
1914 data = rp_readl(port, NV_PCIE2_RP_L1_PM_SUBSTATES_1_CYA);
1915 data &= ~PCIE2_RP_L1_PM_SUBSTATES_1_CYA_PWR_OFF_DLY_MASK;
1916 data |= PCIE2_RP_L1_PM_SUBSTATES_1_CYA_PWR_OFF_DLY;
1917 rp_writel(port, data, NV_PCIE2_RP_L1_PM_SUBSTATES_1_CYA);
1919 data = rp_readl(port, NV_PCIE2_RP_L1_PM_SUBSTATES_2_CYA);
1920 data &= ~PCIE2_RP_L1_PM_SUBSTATES_2_CYA_T_L1_2_DLY_MASK;
1921 data |= PCIE2_RP_L1_PM_SUBSTATES_2_CYA_T_L1_2_DLY;
1922 data &= ~PCIE2_RP_L1_PM_SUBSTATES_2_CYA_MICROSECOND_MASK;
1923 data |= PCIE2_RP_L1_PM_SUBSTATES_2_CYA_MICROSECOND;
1924 data &= ~PCIE2_RP_L1_PM_SUBSTATES_2_CYA_MICROSECOND_COMP_MASK;
1925 data |= PCIE2_RP_L1_PM_SUBSTATES_2_CYA_MICROSECOND_COMP;
1926 rp_writel(port, data, NV_PCIE2_RP_L1_PM_SUBSTATES_2_CYA);
1928 /* WAR for RAW violation on T124/T132 platforms */
1929 data = rp_readl(port, NV_PCIE2_RP_RX_HDR_LIMIT);
1930 data &= ~PCIE2_RP_RX_HDR_LIMIT_PW_MASK;
1931 data |= PCIE2_RP_RX_HDR_LIMIT_PW;
1932 rp_writel(port, data, NV_PCIE2_RP_RX_HDR_LIMIT);
1934 data = rp_readl(port, NV_PCIE2_RP_PRIV_XP_DL);
1935 data |= PCIE2_RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD;
1936 rp_writel(port, data, NV_PCIE2_RP_PRIV_XP_DL);
1938 data = rp_readl(port, RP_VEND_XP);
1939 data |= RP_VEND_XP_UPDATE_FC_THRESHOLD;
1940 rp_writel(port, data, RP_VEND_XP);
1942 data = rp_readl(port, NV_PCIE2_RP_VEND_XP_PAD_PWRDN);
1943 data |= NV_PCIE2_RP_VEND_XP_PAD_PWRDN_DISABLED_EN;
1944 data |= NV_PCIE2_RP_VEND_XP_PAD_PWRDN_DYNAMIC_EN;
1945 data |= NV_PCIE2_RP_VEND_XP_PAD_PWRDN_L1_EN;
1946 data |= NV_PCIE2_RP_VEND_XP_PAD_PWRDN_L1_CLKREQ_EN;
1947 data |= NV_PCIE2_RP_VEND_XP_PAD_PWRDN_SLEEP_MODE_DYNAMIC_L1PP;
1948 data |= NV_PCIE2_RP_VEND_XP_PAD_PWRDN_SLEEP_MODE_L1_L1P;
1949 data |= NV_PCIE2_RP_VEND_XP_PAD_PWRDN_SLEEP_MODE_L1_CLKREQ_L1P;
1950 rp_writel(port, data, NV_PCIE2_RP_VEND_XP_PAD_PWRDN);
1955 /* Enable various features of root port */
1956 static void tegra_pcie_enable_rp_features(struct tegra_pcie_port *port)
1961 if (port->pcie->prod_list) {
1962 if (tegra_prod_set_by_name(
1963 &(port->pcie->pads),
1965 port->pcie->prod_list)) {
1966 dev_info(port->pcie->dev,
1967 "pad prod settings are not found in DT\n");
1970 if (tegra_prod_set_by_name(
1973 port->pcie->prod_list)) {
1974 dev_info(port->pcie->dev,
1975 "RP prod settings are not found in DT\n");
1979 /* Optimal settings to enhance bandwidth */
1980 data = rp_readl(port, RP_VEND_XP);
1981 data |= RP_VEND_XP_OPPORTUNISTIC_ACK;
1982 data |= RP_VEND_XP_OPPORTUNISTIC_UPDATEFC;
1983 rp_writel(port, data, RP_VEND_XP);
1985 /* Power mangagement settings */
1986 /* Enable clock clamping by default and enable card detect */
1987 data = rp_readl(port, NV_PCIE2_RP_PRIV_MISC);
1988 data |= PCIE2_RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD |
1989 PCIE2_RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE |
1990 PCIE2_RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD |
1991 PCIE2_RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE;
1992 rp_writel(port, data, NV_PCIE2_RP_PRIV_MISC);
1994 /* Enable ASPM - L1 state support by default */
1995 data = rp_readl(port, NV_PCIE2_RP_VEND_XP1);
1996 data |= NV_PCIE2_RP_VEND_XP_LINK_PVT_CTL_L1_ASPM_SUPPORT;
1997 rp_writel(port, data, NV_PCIE2_RP_VEND_XP1);
1999 /* LTSSM wait for DLLP to finish before entering L1 or L2/L3 */
2000 /* to avoid truncating of PM mesgs resulting in reciever errors */
2001 data = rp_readl(port, NV_PCIE2_RP_VEND_XP_BIST);
2002 data |= PCIE2_RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE;
2003 rp_writel(port, data, NV_PCIE2_RP_VEND_XP_BIST);
2005 /* unhide AER capability */
2006 tegra_pcie_enable_aer(port, true);
2008 #if defined(CONFIG_ARCH_TEGRA_21x_SOC)
2009 /* program timers for L1 substate support */
2010 /* set cm_rtime = 30us and t_pwr_on = 70us as per HW team */
2011 data = rp_readl(port, NV_PCIE2_RP_L1_PM_SUBSTATES_CYA);
2012 data &= ~PCIE2_RP_L1_PM_SUBSTATES_CYA_CM_RTIME_MASK;
2013 data |= (0x1E << PCIE2_RP_L1_PM_SUBSTATES_CYA_CM_RTIME_SHIFT);
2014 rp_writel(port, data, NV_PCIE2_RP_L1_PM_SUBSTATES_CYA);
2016 data = rp_readl(port, NV_PCIE2_RP_L1_PM_SUBSTATES_CYA);
2017 data &= ~(PCIE2_RP_L1_PM_SUBSTATES_CYA_T_PWRN_SCL_MASK |
2018 PCIE2_RP_L1_PM_SUBSTATES_CYA_T_PWRN_VAL_MASK);
2019 data |= (1 << PCIE2_RP_L1_PM_SUBSTATES_CYA_T_PWRN_SCL_SHIFT) |
2020 (7 << PCIE2_RP_L1_PM_SUBSTATES_CYA_T_PWRN_VAL_SHIFT);
2021 rp_writel(port, data, NV_PCIE2_RP_L1_PM_SUBSTATES_CYA);
2023 tegra_pcie_apply_sw_war(port, false);
2026 static void tegra_pcie_update_lane_width(struct tegra_pcie_port *port)
2028 port->lanes = rp_readl(port, RP_LINK_CONTROL_STATUS);
2029 port->lanes = (port->lanes &
2030 RP_LINK_CONTROL_STATUS_NEG_LINK_WIDTH) >> 20;
2033 static void tegra_pcie_update_pads2plle(struct tegra_pcie_port *port)
2035 unsigned long ctrl = 0;
2038 ctrl = tegra_pcie_port_get_pex_ctrl(port);
2039 /* AFI_PEX_STATUS is AFI_PEX_CTRL + 4 */
2040 val = afi_readl(port->pcie, ctrl + 4);
2042 val = afi_readl(port->pcie, AFI_PLLE_CONTROL);
2043 val &= ~AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
2044 afi_writel(port->pcie, val, AFI_PLLE_CONTROL);
2048 #if defined(CONFIG_ARCH_TEGRA_21x_SOC)
2049 static void mbist_war(struct tegra_pcie *pcie, bool apply)
2051 struct tegra_pcie_port *port, *tmp;
2054 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2055 /* nature of MBIST bug is such that it needs to be applied
2056 * only for RootPort-0 even if there are no devices
2057 * connected to it */
2058 if (port->index == 0) {
2059 data = rp_readl(port, NV_PCIE2_RP_VEND_CTL2);
2061 data |= PCIE2_RP_VEND_CTL2_PCA_ENABLE;
2063 data &= ~PCIE2_RP_VEND_CTL2_PCA_ENABLE;
2064 rp_writel(port, data, NV_PCIE2_RP_VEND_CTL2);
2070 static void tegra_pcie_check_ports(struct tegra_pcie *pcie)
2072 struct tegra_pcie_port *port, *tmp;
2075 pcie->num_ports = 0;
2077 #if defined(CONFIG_ARCH_TEGRA_21x_SOC)
2078 mbist_war(pcie, true);
2080 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2081 dev_info(pcie->dev, "probing port %u, using %u lanes and lane map as 0x%x\n",
2082 port->index, port->lanes, pcie->plat_data->lane_map);
2084 tegra_pcie_port_enable(port);
2085 tegra_pcie_enable_rp_features(port);
2086 /* override presence detection */
2087 if (gpio_is_valid(port->gpio_presence_detection))
2088 tegra_pcie_prsnt_map_override(port,
2089 !(gpio_get_value_cansleep(
2090 port->gpio_presence_detection)));
2092 tegra_pcie_prsnt_map_override(port, true);
2094 /* Wait for clock to latch (min of 100us) */
2096 tegra_periph_reset_deassert(pcie->pcie_xclk);
2097 /* at this point in time, there is no end point which would
2098 * take more than 20 msec for root port to detect receiver and
2099 * set AUX_TX_RDET_STATUS bit. This would bring link up checking
2100 * time from its current value (around 200ms) to flat 20ms
2102 usleep_range(19000, 21000);
2103 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2104 if (tegra_pcie_port_check_link(port)) {
2107 tegra_pcie_update_lane_width(port);
2108 tegra_pcie_update_pads2plle(port);
2111 dev_info(pcie->dev, "link %u down, ignoring\n", port->index);
2112 tegra_pcie_port_disable(port);
2114 #if defined(CONFIG_ARCH_TEGRA_21x_SOC)
2115 mbist_war(pcie, false);
2119 static int tegra_pcie_conf_gpios(struct tegra_pcie *pcie)
2122 struct tegra_pcie_port *port, *tmp;
2125 if (gpio_is_valid(pcie->plat_data->gpio_hot_plug)) {
2126 /* configure gpio for hotplug detection */
2127 dev_info(pcie->dev, "acquiring hotplug_detect = %d\n",
2128 pcie->plat_data->gpio_hot_plug);
2129 err = devm_gpio_request(pcie->dev,
2130 pcie->plat_data->gpio_hot_plug,
2131 "pcie_hotplug_detect");
2133 dev_err(pcie->dev, "%s: gpio_request failed %d\n",
2137 err = gpio_direction_input(
2138 pcie->plat_data->gpio_hot_plug);
2141 "%s: gpio_direction_input failed %d\n",
2145 irq = gpio_to_irq(pcie->plat_data->gpio_hot_plug);
2148 "Unable to get irq for hotplug_detect\n");
2151 err = devm_request_irq(pcie->dev, (unsigned int)irq,
2152 gpio_pcie_detect_isr,
2153 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
2154 "pcie_hotplug_detect",
2158 "Unable to claim irq for hotplug_detect\n");
2162 if (gpio_is_valid(pcie->plat_data->gpio_x1_slot)) {
2163 err = devm_gpio_request(pcie->dev,
2164 pcie->plat_data->gpio_x1_slot, "pcie_x1_slot");
2167 "%s: pcie_x1_slot gpio_request failed %d\n",
2171 err = gpio_direction_output(
2172 pcie->plat_data->gpio_x1_slot, 1);
2175 "%s: pcie_x1_slot gpio_direction_output failed %d\n",
2179 gpio_set_value_cansleep(
2180 pcie->plat_data->gpio_x1_slot, 1);
2182 if (gpio_is_valid(pcie->plat_data->gpio_wake)) {
2183 err = devm_gpio_request(pcie->dev,
2184 pcie->plat_data->gpio_wake, "pcie_wake");
2187 "%s: pcie_wake gpio_request failed %d\n",
2191 err = gpio_direction_input(
2192 pcie->plat_data->gpio_wake);
2195 "%s: pcie_wake gpio_direction_input failed %d\n",
2201 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2202 if (gpio_is_valid(port->gpio_presence_detection)) {
2203 err = devm_gpio_request_one(pcie->dev,
2204 port->gpio_presence_detection,
2206 "pcie_presence_detection");
2209 "%s: pcie_prsnt gpio_request failed %d\n",
2218 static int tegra_pcie_scale_voltage(struct tegra_pcie *pcie, bool isGen2)
2224 if (tegra_pcie_xclk_rate == TEGRA_PCIE_XCLK_500 &&
2225 tegra_pcie_mselect_rate == TEGRA_PCIE_MSELECT_CLK_408 &&
2226 tegra_pcie_emc_rate == TEGRA_PCIE_EMC_CLK_528)
2228 /* Scale up voltage for Gen2 speed */
2229 tegra_pcie_xclk_rate = TEGRA_PCIE_XCLK_500;
2230 tegra_pcie_mselect_rate = TEGRA_PCIE_MSELECT_CLK_408;
2231 tegra_pcie_emc_rate = TEGRA_PCIE_EMC_CLK_528;
2233 if (tegra_pcie_xclk_rate == TEGRA_PCIE_XCLK_250 &&
2234 tegra_pcie_mselect_rate == TEGRA_PCIE_MSELECT_CLK_204 &&
2235 tegra_pcie_emc_rate == TEGRA_PCIE_EMC_CLK_102)
2237 /* Scale down voltage for Gen1 speed */
2238 tegra_pcie_xclk_rate = TEGRA_PCIE_XCLK_250;
2239 tegra_pcie_mselect_rate = TEGRA_PCIE_MSELECT_CLK_204;
2240 tegra_pcie_emc_rate = TEGRA_PCIE_EMC_CLK_102;
2242 err = clk_set_rate(pcie->pcie_xclk, tegra_pcie_xclk_rate);
2245 err = clk_set_rate(pcie->pcie_mselect, tegra_pcie_mselect_rate);
2248 err = clk_set_rate(pcie->pcie_emc, tegra_pcie_emc_rate);
2254 static bool tegra_pcie_change_link_speed(struct tegra_pcie *pcie,
2255 struct pci_dev *pdev, bool isGen2)
2257 u16 val, link_sts_up_spd, link_sts_dn_spd;
2258 u16 link_cap_up_spd, link_cap_dn_spd;
2259 struct pci_dev *up_dev, *dn_dev;
2262 /* skip if current device is not PCI express capable */
2263 /* or is either a root port or downstream port */
2264 if (!pci_is_pcie(pdev))
2266 if ((pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM) ||
2267 (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT))
2270 /* initialize upstream/endpoint and downstream/root port device ptr */
2272 dn_dev = pdev->bus->self;
2274 /* read link status register to find current speed */
2275 pcie_capability_read_word(up_dev, PCI_EXP_LNKSTA, &link_sts_up_spd);
2276 link_sts_up_spd &= PCI_EXP_LNKSTA_CLS;
2277 pcie_capability_read_word(dn_dev, PCI_EXP_LNKSTA, &link_sts_dn_spd);
2278 link_sts_dn_spd &= PCI_EXP_LNKSTA_CLS;
2279 /* read link capability register to find max speed supported */
2280 pcie_capability_read_word(up_dev, PCI_EXP_LNKCAP, &link_cap_up_spd);
2281 link_cap_up_spd &= PCI_EXP_LNKCAP_SLS;
2282 pcie_capability_read_word(dn_dev, PCI_EXP_LNKCAP, &link_cap_dn_spd);
2283 link_cap_dn_spd &= PCI_EXP_LNKCAP_SLS;
2284 /* skip if both devices across the link are already trained to gen2 */
2286 if (((link_cap_up_spd >= PCI_EXP_LNKSTA_CLS_5_0GB) &&
2287 (link_cap_dn_spd >= PCI_EXP_LNKSTA_CLS_5_0GB)) &&
2288 ((link_sts_up_spd != PCI_EXP_LNKSTA_CLS_5_0GB) ||
2289 (link_sts_dn_spd != PCI_EXP_LNKSTA_CLS_5_0GB)))
2294 /* gen1 should be supported by default by all pcie cards */
2295 if ((link_sts_up_spd != PCI_EXP_LNKSTA_CLS_2_5GB) ||
2296 (link_sts_dn_spd != PCI_EXP_LNKSTA_CLS_2_5GB))
2303 if (tegra_pcie_scale_voltage(pcie, isGen2))
2305 /* Set Link Speed */
2306 pcie_capability_read_word(dn_dev, PCI_EXP_LNKCTL2, &val);
2307 val &= ~PCI_EXP_LNKSTA_CLS;
2309 val |= PCI_EXP_LNKSTA_CLS_5_0GB;
2311 val |= PCI_EXP_LNKSTA_CLS_2_5GB;
2312 pcie_capability_write_word(dn_dev, PCI_EXP_LNKCTL2, val);
2314 /* Retrain the link */
2315 pcie_capability_read_word(dn_dev, PCI_EXP_LNKCTL, &val);
2316 val |= PCI_EXP_LNKCTL_RL;
2317 pcie_capability_write_word(dn_dev, PCI_EXP_LNKCTL, val);
2324 static bool tegra_pcie_link_speed(struct tegra_pcie *pcie, bool isGen2)
2326 struct pci_dev *pdev = NULL;
2330 /* Voltage scaling should happen before any device transition */
2331 /* to Gen2 or after all devices has transitioned to Gen1 */
2332 for_each_pci_dev(pdev) {
2333 if (tegra_pcie_change_link_speed(pcie, pdev, isGen2))
2339 #if defined(CONFIG_ARCH_TEGRA_21x_SOC)
2340 static void tegra_pcie_config_l1ss_tpwr_on(void)
2342 struct pci_dev *pdev = NULL;
2343 u32 data = 0, data1 = 0, data2 = 0, pos1 = 0, pos2 = 0;
2344 unsigned long max1 = 0, max2 = 0;
2347 /* find max T_POWER_ON reported by RP & EP capability regs */
2348 /* and program same in ctrl2 reg of both RP & EP */
2349 for_each_pci_dev(pdev) {
2350 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) {
2351 pos1 = pci_find_ext_capability(pdev->bus->self,
2352 PCI_EXT_CAP_ID_L1SS);
2353 pos2 = pci_find_ext_capability(pdev,
2354 PCI_EXT_CAP_ID_L1SS);
2357 pci_read_config_dword(pdev->bus->self,
2358 pos1 + PCI_L1SS_CAP, &data1);
2359 max1 = (((data1 & PCI_L1SS_CAP_PWRN_SCL_MASK) >>
2360 PCI_L1SS_CAP_PWRN_SCL_SHIFT) *
2361 ((data1 & PCI_L1SS_CAP_PWRN_VAL_MASK) >>
2362 PCI_L1SS_CAP_PWRN_VAL_SHIFT));
2363 pci_read_config_dword(pdev,
2364 pos2 + PCI_L1SS_CAP, &data2);
2365 max2 = (((data2 & PCI_L1SS_CAP_PWRN_SCL_MASK) >>
2366 PCI_L1SS_CAP_PWRN_SCL_SHIFT) *
2367 ((data2 & PCI_L1SS_CAP_PWRN_VAL_MASK) >>
2368 PCI_L1SS_CAP_PWRN_VAL_SHIFT));
2370 data = (data1 & PCI_L1SS_CAP_PWRN_VS_MASK) >>
2371 PCI_L1SS_CAP_PWRN_SCL_SHIFT;
2373 data = (data2 & PCI_L1SS_CAP_PWRN_VS_MASK) >>
2374 PCI_L1SS_CAP_PWRN_SCL_SHIFT;
2376 pci_write_config_dword(pdev,
2377 pos2 + PCI_L1SS_CTRL2, data);
2378 pci_write_config_dword(pdev->bus->self,
2379 pos1 + PCI_L1SS_CTRL2, data);
2384 static void tegra_pcie_config_l1ss_cm_rtime(void)
2386 struct pci_dev *pdev = NULL;
2387 u32 data = 0, max[MAX_PCIE_SUPPORTED_PORTS] = {0};
2388 int i = -1, pos = 0;
2391 /* find max of common mode restore time reported by all */
2392 /* devices including RP in capability register, and set same */
2393 /* in control 1 register after substracting t_pwr_on for both RP & EP */
2394 for_each_pci_dev(pdev) {
2395 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)
2397 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
2398 pci_read_config_dword(pdev, pos + PCI_L1SS_CAP, &data);
2399 data &= PCI_L1SS_CAP_CM_RTM_MASK;
2404 for_each_pci_dev(pdev) {
2405 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)
2407 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
2408 pci_read_config_dword(pdev, pos + PCI_L1SS_CTRL1, &data);
2409 data &= ~PCI_L1SS_CAP_CM_RTM_MASK;
2411 pci_write_config_dword(pdev, pos + PCI_L1SS_CTRL1, data);
2415 static void tegra_pcie_config_l1ss_l12_thtime(void)
2417 struct pci_dev *pdev = NULL;
2418 u32 data = 0, pos = 0;
2421 /* program same LTR L1.2 threshold = 106us for all devices */
2422 for_each_pci_dev(pdev) {
2423 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
2424 pci_read_config_dword(pdev, pos + PCI_L1SS_CTRL1, &data);
2425 data |= 0x37 << PCI_L1SS_CTRL1_L12TH_VAL_SHIFT;
2426 pci_write_config_dword(pdev, pos + PCI_L1SS_CTRL1, data);
2427 pci_read_config_dword(pdev, pos + PCI_L1SS_CTRL1, &data);
2428 data |= 0x02 << PCI_L1SS_CTRL1_L12TH_SCALE_SHIFT;
2429 pci_write_config_dword(pdev, pos + PCI_L1SS_CTRL1, data);
2430 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT)
2435 static void tegra_pcie_enable_l1ss_support(void)
2437 struct pci_dev *pdev = NULL;
2438 u32 aspm = 0, data = 0, pos = 0;
2441 for_each_pci_dev(pdev) {
2442 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
2443 /* enable L1 substate as per device capability */
2444 pci_read_config_dword(pdev, pos + PCI_L1SS_CAP, &aspm);
2445 pci_read_config_dword(pdev, pos + PCI_L1SS_CTRL1, &data);
2446 data &= ~PCI_L1SS_CAP_L1PM_MASK;
2447 data |= (aspm & PCI_L1SS_CAP_L1PM_MASK);
2448 pci_write_config_dword(pdev, pos + PCI_L1SS_CTRL1, data);
2449 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT)
2454 static void tegra_pcie_enable_ltr_support(void)
2456 struct pci_dev *pdev = NULL;
2461 /* enable LTR mechanism for L1.2 support */
2462 for_each_pci_dev(pdev) {
2463 pcie_capability_read_dword(pdev, PCI_EXP_DEVCAP2, &data);
2464 if (data & PCI_EXP_DEVCAP2_LTR) {
2465 pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val);
2466 val |= PCI_EXP_LTR_EN;
2467 pcie_capability_write_word(pdev, PCI_EXP_DEVCTL2, val);
2469 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT)
2474 static void tegra_pcie_config_clkreq(struct tegra_pcie *pcie, bool enable)
2476 static struct pinctrl_dev *pctl_dev = NULL;
2477 unsigned long od_conf, tr_conf;
2482 pctl_dev = pinctrl_get_dev_from_of_compatible(
2483 pinctrl_compatible);
2486 "%s(): tegra pincontrol does not found\n", __func__);
2490 od_conf = TEGRA_PINCONF_PACK(TEGRA_PINCONF_PARAM_OPEN_DRAIN,
2492 tr_conf = TEGRA_PINCONF_PACK(TEGRA_PINCONF_PARAM_TRISTATE,
2495 od_conf = TEGRA_PINCONF_PACK(TEGRA_PINCONF_PARAM_OPEN_DRAIN,
2497 tr_conf = TEGRA_PINCONF_PACK(TEGRA_PINCONF_PARAM_TRISTATE,
2501 /* Make CLKREQ# bi-directional if L1PM SS are enabled */
2502 pinctrl_set_config_for_group_name(pctl_dev,
2503 pin_pex_l0_clkreq, tr_conf);
2504 pinctrl_set_config_for_group_name(pctl_dev,
2505 pin_pex_l0_clkreq, od_conf);
2506 pinctrl_set_config_for_group_name(pctl_dev,
2507 pin_pex_l1_clkreq, tr_conf);
2508 pinctrl_set_config_for_group_name(pctl_dev,
2509 pin_pex_l1_clkreq, od_conf);
2511 struct pci_dev *pdev = NULL;
2514 /* Make CLKREQ# input only if L1PM SS is disabled later */
2515 /* also disable ASPM L1 momentarily before doing this */
2516 for_each_pci_dev(pdev) {
2517 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &val);
2518 val &= ~PCI_EXP_LNKCTL_ASPM_L1;
2519 pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, val);
2521 pinctrl_set_config_for_group_name(pctl_dev,
2522 pin_pex_l0_clkreq, tr_conf);
2523 pinctrl_set_config_for_group_name(pctl_dev,
2524 pin_pex_l0_clkreq, od_conf);
2525 pinctrl_set_config_for_group_name(pctl_dev,
2526 pin_pex_l1_clkreq, tr_conf);
2527 pinctrl_set_config_for_group_name(pctl_dev,
2528 pin_pex_l1_clkreq, od_conf);
2529 for_each_pci_dev(pdev) {
2530 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &val);
2531 val |= PCI_EXP_LNKCTL_ASPM_L1;
2532 pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, val);
2539 /* Enable ASPM support of all devices based on it's capability */
2540 static void tegra_pcie_enable_aspm(struct tegra_pcie *pcie)
2542 #if defined(CONFIG_ARCH_TEGRA_21x_SOC)
2543 struct pci_dev *pdev = NULL;
2546 bool config_l1ss = true;
2550 if (!pcie_aspm_support_enabled()) {
2551 dev_info(pcie->dev, "PCIE: ASPM not enabled\n");
2554 #if defined(CONFIG_ARCH_TEGRA_21x_SOC)
2555 /* L1SS configuration as per IAS */
2556 for_each_pci_dev(pdev) {
2557 /* check if L1SS capability is supported in current device */
2558 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
2560 config_l1ss = false;
2563 /* avoid L1SS config if no support of L1PM substate feature */
2564 pci_read_config_dword(pdev, pos + PCI_L1SS_CAP, &aspm);
2565 if (((aspm & PCI_L1SS_CAP_L1PMS) == 0) ||
2566 ((aspm & PCI_L1SS_CAP_L1PM_MASK) == 0)) {
2567 config_l1ss = false;
2570 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT)
2574 tegra_pcie_config_clkreq(pcie, true);
2575 tegra_pcie_config_l1ss_tpwr_on();
2576 tegra_pcie_config_l1ss_cm_rtime();
2577 tegra_pcie_config_l1ss_l12_thtime();
2578 tegra_pcie_enable_l1ss_support();
2579 tegra_pcie_enable_ltr_support();
2584 static void tegra_pcie_enable_features(struct tegra_pcie *pcie)
2586 struct tegra_pcie_port *port;
2589 /* configure all links to gen2 speed by default */
2590 if (!tegra_pcie_link_speed(pcie, true))
2591 dev_info(pcie->dev, "PCIE: No Link speed change happened\n");
2593 tegra_pcie_enable_aspm(pcie);
2594 list_for_each_entry(port, &pcie->ports, list) {
2596 tegra_pcie_apply_sw_war(port, true);
2599 static int tegra_pcie_enable_msi(struct tegra_pcie *, bool);
2600 static int tegra_pcie_disable_msi(struct tegra_pcie *pcie);
2602 static int tegra_pcie_init(struct tegra_pcie *pcie)
2605 struct platform_device *pdev = to_platform_device(pcie->dev);
2607 pcibios_min_io = 0x1000ul;
2610 INIT_WORK(&pcie->hotplug_detect, work_hotplug_handler);
2611 err = tegra_pcie_get_resources(pcie);
2613 dev_err(pcie->dev, "PCIE: get resources failed\n");
2616 err = tegra_pcie_enable_pads(pcie, true);
2618 dev_err(pcie->dev, "PCIE: enable pads failed\n");
2619 goto fail_release_resource;
2622 tegra_periph_reset_deassert(pcie->pcie_afi);
2624 tegra_pcie_enable_controller(pcie);
2625 err = tegra_pcie_conf_gpios(pcie);
2627 dev_err(pcie->dev, "PCIE: configuring gpios failed\n");
2628 goto fail_release_resource;
2630 /* setup the AFI address translations */
2631 tegra_pcie_setup_translations(pcie);
2633 if (IS_ENABLED(CONFIG_PCI_MSI)) {
2634 err = tegra_pcie_enable_msi(pcie, false);
2637 "failed to enable MSI support: %d\n",
2639 goto fail_release_resource;
2643 tegra_periph_reset_deassert(pcie->pcie_pcie);
2645 tegra_pcie_check_ports(pcie);
2647 if (pcie->num_ports) {
2648 tegra_pcie_hw.private_data = (void **)&pcie;
2649 tegra_pcie_hw.ops = &tegra_pcie_ops;
2650 tegra_pcie_hw.sys = &pcie->sys;
2651 pci_common_init_dev(pcie->dev, &tegra_pcie_hw);
2653 dev_info(pcie->dev, "PCIE: no ports detected\n");
2656 tegra_pcie_enable_features(pcie);
2657 /* register pcie device as wakeup source */
2658 device_init_wakeup(pcie->dev, true);
2663 if (IS_ENABLED(CONFIG_PCI_MSI))
2664 tegra_pcie_disable_msi(pcie);
2665 fail_release_resource:
2666 tegra_pcie_power_off(pcie, true);
2667 tegra_pcie_disable_regulators(pcie);
2668 tegra_pcie_clocks_put(pcie);
2673 /* 1:1 matching of these to the MSI vectors, 1 per bit */
2674 /* and each mapping matches one of the available interrupts */
2675 struct msi_map_entry {
2681 /* hardware supports 256 max*/
2682 #if (INT_PCI_MSI_NR > 256)
2683 #error "INT_PCI_MSI_NR too big"
2686 static int tegra_msi_alloc(struct tegra_msi *chip)
2692 mutex_lock(&chip->lock);
2694 msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
2695 if (msi < INT_PCI_MSI_NR)
2696 set_bit(msi, chip->used);
2700 mutex_unlock(&chip->lock);
2705 static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
2707 struct device *dev = chip->chip.dev;
2711 mutex_lock(&chip->lock);
2713 if (!test_bit(irq, chip->used))
2714 dev_err(dev, "trying to free unused MSI#%lu\n", irq);
2716 clear_bit(irq, chip->used);
2718 mutex_unlock(&chip->lock);
2722 static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
2724 struct tegra_pcie *pcie = data;
2725 struct tegra_msi *msi = &pcie->msi;
2726 unsigned int i, processed = 0;
2730 for (i = 0; i < 8; i++) {
2731 unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0_0 + i * 4);
2734 unsigned int offset = find_first_bit(®, 32);
2735 unsigned int index = i * 32 + offset;
2738 /* clear the interrupt */
2739 afi_writel(pcie, 1 << offset, AFI_MSI_VEC0_0 + i * 4);
2741 irq = irq_find_mapping(msi->domain, index);
2743 if (test_bit(index, msi->used))
2744 generic_handle_irq(irq);
2746 dev_info(pcie->dev, "unhandled MSI\n");
2749 * that's weird who triggered this?
2752 dev_info(pcie->dev, "unexpected MSI\n");
2755 /* see if there's any more pending in this vector */
2756 reg = afi_readl(pcie, AFI_MSI_VEC0_0 + i * 4);
2762 return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
2765 static int tegra_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
2766 struct msi_desc *desc)
2768 struct tegra_msi *msi = to_tegra_msi(chip);
2775 hwirq = tegra_msi_alloc(msi);
2779 irq = irq_create_mapping(msi->domain, hwirq);
2783 irq_set_msi_desc(irq, desc);
2785 msg.address_lo = virt_to_phys((void *)msi->pages) & 0xFFFFFFFF;
2787 msg.address_hi = virt_to_phys((void *)msi->pages) >> 32;
2793 write_msi_msg(irq, &msg);
2798 static void tegra_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
2800 struct tegra_msi *msi = to_tegra_msi(chip);
2801 struct irq_data *d = irq_get_irq_data(irq);
2804 tegra_msi_free(msi, d->hwirq);
2807 static struct irq_chip tegra_msi_irq_chip = {
2808 .name = "Tegra PCIe MSI",
2809 .irq_enable = unmask_msi_irq,
2810 .irq_disable = mask_msi_irq,
2811 .irq_mask = mask_msi_irq,
2812 .irq_unmask = unmask_msi_irq,
2815 static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
2816 irq_hw_number_t hwirq)
2819 irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
2820 irq_set_chip_data(irq, domain->host_data);
2821 set_irq_flags(irq, IRQF_VALID);
2825 static const struct irq_domain_ops msi_domain_ops = {
2826 .map = tegra_msi_map,
2829 static int tegra_pcie_enable_msi(struct tegra_pcie *pcie, bool no_init)
2831 struct platform_device *pdev = to_platform_device(pcie->dev);
2832 struct tegra_msi *msi = &pcie->msi;
2843 mutex_init(&msi->lock);
2845 msi->chip.dev = pcie->dev;
2846 msi->chip.setup_irq = tegra_msi_setup_irq;
2847 msi->chip.teardown_irq = tegra_msi_teardown_irq;
2849 msi->domain = irq_domain_add_linear(pcie->dev->of_node,
2850 INT_PCI_MSI_NR, &msi_domain_ops, &msi->chip);
2852 dev_err(&pdev->dev, "failed to create IRQ domain\n");
2856 err = platform_get_irq_byname(pdev, "msi");
2858 dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
2863 err = request_irq(msi->irq, tegra_pcie_msi_irq, 0,
2864 tegra_msi_irq_chip.name, pcie);
2866 dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
2870 /* setup AFI/FPCI range */
2871 msi->pages = __get_free_pages(GFP_KERNEL, 0);
2873 base = virt_to_phys((void *)msi->pages);
2875 afi_writel(pcie, base >> 8, AFI_MSI_FPCI_BAR_ST);
2876 afi_writel(pcie, base, AFI_MSI_AXI_BAR_ST);
2877 /* this register is in 4K increments */
2878 afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
2880 /* enable all MSI vectors */
2881 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0_0);
2882 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1_0);
2883 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2_0);
2884 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3_0);
2885 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4_0);
2886 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5_0);
2887 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6_0);
2888 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7_0);
2890 /* and unmask the MSI interrupt */
2891 reg = afi_readl(pcie, AFI_INTR_MASK);
2892 reg |= AFI_INTR_MASK_MSI_MASK;
2893 afi_writel(pcie, reg, AFI_INTR_MASK);
2898 irq_domain_remove(msi->domain);
2902 static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
2904 struct tegra_msi *msi = &pcie->msi;
2905 unsigned int i, irq;
2910 if (pcie->pcie_power_enabled == 0)
2913 /* mask the MSI interrupt */
2914 value = afi_readl(pcie, AFI_INTR_MASK);
2915 value &= ~AFI_INTR_MASK_MSI_MASK;
2916 afi_writel(pcie, value, AFI_INTR_MASK);
2918 /* disable all MSI vectors */
2919 afi_writel(pcie, 0, AFI_MSI_EN_VEC0_0);
2920 afi_writel(pcie, 0, AFI_MSI_EN_VEC1_0);
2921 afi_writel(pcie, 0, AFI_MSI_EN_VEC2_0);
2922 afi_writel(pcie, 0, AFI_MSI_EN_VEC3_0);
2923 afi_writel(pcie, 0, AFI_MSI_EN_VEC4_0);
2924 afi_writel(pcie, 0, AFI_MSI_EN_VEC5_0);
2925 afi_writel(pcie, 0, AFI_MSI_EN_VEC6_0);
2926 afi_writel(pcie, 0, AFI_MSI_EN_VEC7_0);
2928 free_pages(msi->pages, 0);
2931 free_irq(msi->irq, pcie);
2933 for (i = 0; i < INT_PCI_MSI_NR; i++) {
2934 irq = irq_find_mapping(msi->domain, i);
2936 irq_dispose_mapping(irq);
2939 irq_domain_remove(msi->domain);
2944 static void tegra_pcie_read_plat_data(struct tegra_pcie *pcie)
2946 struct device_node *node = pcie->dev->of_node;
2949 of_property_read_u32(node, "nvidia,boot-detect-delay",
2950 &pcie->plat_data->boot_detect_delay);
2951 pcie->plat_data->gpio_hot_plug =
2952 of_get_named_gpio(node, "nvidia,hot-plug-gpio", 0);
2953 pcie->plat_data->gpio_wake =
2954 of_get_named_gpio(node, "nvidia,wake-gpio", 0);
2955 pcie->plat_data->gpio_x1_slot =
2956 of_get_named_gpio(node, "nvidia,x1-slot-gpio", 0);
2957 pcie->plat_data->has_memtype_lpddr4 =
2958 of_property_read_bool(node, "nvidia,has_memtype_lpddr4");
2959 if (of_property_read_u32(node, "nvidia,lane-map",
2960 &pcie->plat_data->lane_map)) {
2962 "PCIE lane map attribute missing, use x4_x1 as default\n");
2963 pcie->plat_data->lane_map = PCIE_LANES_X4_X1;
2967 static char *t124_rail_names[] = {"hvdd-pex", "hvdd-pex-pll-e", "dvddio-pex",
2968 "avddio-pex", "avdd-pex-pll", "vddio-pex-ctl"};
2970 static char *t210_rail_names[] = {"dvdd-pex-pll", "hvdd-pex-pll-e",
2971 "l0-hvddio-pex", "l0-dvddio-pex",
2972 "l1-hvddio-pex", "l1-dvddio-pex",
2973 "l2-hvddio-pex", "l2-dvddio-pex",
2974 "l3-hvddio-pex", "l3-dvddio-pex",
2975 "l4-hvddio-pex", "l4-dvddio-pex",
2976 "l5-hvddio-pex", "l5-dvddio-pex",
2977 "l6-hvddio-pex", "l6-dvddio-pex",
2980 static const struct tegra_pcie_soc_data tegra210_pcie_data = {
2982 .pcie_regulator_names = t210_rail_names,
2983 .num_pcie_regulators =
2984 sizeof(t210_rail_names) / sizeof(t210_rail_names[0]),
2987 static const struct tegra_pcie_soc_data tegra124_pcie_data = {
2989 .pcie_regulator_names = t124_rail_names,
2990 .num_pcie_regulators =
2991 sizeof(t124_rail_names) / sizeof(t124_rail_names[0]),
2994 static struct of_device_id tegra_pcie_of_match[] = {
2995 { .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie_data },
2996 { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie_data },
2999 MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
3001 static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
3003 struct tegra_pcie_soc_data *soc = pcie->soc_data;
3004 struct device_node *np = pcie->dev->of_node, *port;
3005 struct of_pci_range_parser parser;
3006 struct of_pci_range range;
3007 u32 lanes = 0, mask = 0;
3008 unsigned int lane = 0;
3009 struct resource res;
3014 if (of_pci_range_parser_init(&parser, np)) {
3015 dev_err(pcie->dev, "missing \"ranges\" property\n");
3019 for_each_of_pci_range(&parser, &range) {
3020 of_pci_range_to_resource(&range, np, &res);
3021 switch (res.flags & IORESOURCE_TYPE_BITS) {
3023 memcpy(&pcie->io, &res, sizeof(res));
3024 pcie->io.name = np->full_name;
3027 case IORESOURCE_MEM:
3028 if (res.flags & IORESOURCE_PREFETCH) {
3029 memcpy(&pcie->prefetch, &res, sizeof(res));
3030 pcie->prefetch.name = "pcie-prefetchable";
3032 memcpy(&pcie->mem, &res, sizeof(res));
3033 pcie->mem.name = "pcie-non-prefetchable";
3039 err = of_pci_parse_bus_range(np, &pcie->busn);
3041 dev_err(pcie->dev, "failed to parse ranges property: %d\n",
3043 pcie->busn.name = np->name;
3044 pcie->busn.start = 0;
3045 pcie->busn.end = 0xff;
3046 pcie->busn.flags = IORESOURCE_BUS;
3049 /* parse root ports */
3050 for_each_child_of_node(np, port) {
3051 struct tegra_pcie_port *rp;
3055 if (strncmp(port->type, "pci", sizeof("pci")))
3058 err = of_pci_get_devfn(port);
3060 dev_err(pcie->dev, "failed to parse address: %d\n",
3065 index = PCI_SLOT(err);
3066 if (index < 1 || index > soc->num_ports) {
3067 dev_err(pcie->dev, "invalid port number: %d\n", index);
3073 err = of_property_read_u32(port, "nvidia,num-lanes", &value);
3075 dev_err(pcie->dev, "failed to parse # of lanes: %d\n",
3081 dev_err(pcie->dev, "invalid # of lanes: %u\n", value);
3084 lanes |= value << (index << 3);
3086 if (!of_device_is_available(port)) {
3091 mask |= ((1 << value) - 1) << lane;
3094 rp = devm_kzalloc(pcie->dev, sizeof(*rp), GFP_KERNEL);
3098 err = of_address_to_resource(port, 0, &rp->regs);
3100 dev_err(pcie->dev, "failed to parse address: %d\n",
3105 rp->gpio_presence_detection =
3106 of_get_named_gpio(port,
3107 "nvidia,presence-detection-gpio", 0);
3109 INIT_LIST_HEAD(&rp->list);
3113 rp->base = devm_ioremap_resource(pcie->dev, &rp->regs);
3115 return -EADDRNOTAVAIL;
3116 rp->disable_clock_request = of_property_read_bool(port,
3117 "nvidia,disable-clock-request");
3119 list_add_tail(&rp->list, &pcie->ports);
3125 static int list_devices(struct seq_file *s, void *data)
3127 struct pci_dev *pdev = NULL;
3128 u16 vendor, device, devclass, speed;
3132 for_each_pci_dev(pdev) {
3134 ret = pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor);
3139 ret = pci_read_config_word(pdev, PCI_DEVICE_ID, &device);
3144 ret = pci_read_config_word(pdev, PCI_CLASS_DEVICE, &devclass);
3149 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &speed);
3151 seq_printf(s, "%s Vendor:%04x Device id:%04x ",
3152 kobject_name(&pdev->dev.kobj), vendor,
3154 seq_printf(s, "Class:%04x Speed:%s Driver:%s(%s)\n", devclass,
3155 ((speed & PCI_EXP_LNKSTA_CLS_5_0GB) ==
3156 PCI_EXP_LNKSTA_CLS_5_0GB) ?
3158 (pdev->driver) ? "enabled" : "disabled",
3159 (pdev->driver) ? pdev->driver->name : NULL);
3162 seq_printf(s, "Couldn't read devices\n");
3167 static int apply_link_speed(struct seq_file *s, void *data)
3170 struct tegra_pcie *pcie = (struct tegra_pcie *)(s->private);
3172 seq_printf(s, "Changing link speed to %s... ",
3173 (is_gen2_speed) ? "Gen2" : "Gen1");
3174 pass = tegra_pcie_link_speed(pcie, is_gen2_speed);
3177 seq_printf(s, "Done\n");
3179 seq_printf(s, "Failed\n");
3183 static int check_d3hot(struct seq_file *s, void *data)
3186 struct pci_dev *pdev = NULL;
3188 /* Force all the devices (including RPs) in d3 hot state */
3189 for_each_pci_dev(pdev) {
3190 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
3191 pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM)
3193 /* First, keep Downstream component in D3_Hot */
3194 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL,
3196 if ((val & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot)
3197 seq_printf(s, "device[%x:%x] is already in D3_hot]\n",
3198 pdev->vendor, pdev->device);
3199 val &= ~PCI_PM_CTRL_STATE_MASK;
3201 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL,
3203 /* Keep corresponding upstream component in D3_Hot */
3204 pci_read_config_word(pdev->bus->self,
3205 pdev->bus->self->pm_cap + PCI_PM_CTRL, &val);
3206 val &= ~PCI_PM_CTRL_STATE_MASK;
3208 pci_write_config_word(pdev->bus->self,
3209 pdev->bus->self->pm_cap + PCI_PM_CTRL, val);
3211 /* check if they have changed their state */
3212 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL,
3214 if ((val & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot)
3215 seq_printf(s, "device[%x:%x] transitioned to D3_hot]\n",
3216 pdev->vendor, pdev->device);
3218 seq_printf(s, "device[%x:%x] couldn't transition]\n",
3219 pdev->vendor, pdev->device);
3220 pci_read_config_word(pdev->bus->self,
3221 pdev->bus->self->pm_cap + PCI_PM_CTRL, &val);
3222 if ((val & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot)
3223 seq_printf(s, "device[%x:%x] transitioned to D3_hot]\n",
3224 pdev->bus->self->vendor,
3225 pdev->bus->self->device);
3227 seq_printf(s, "device[%x:%x] couldn't transition]\n",
3228 pdev->bus->self->vendor,
3229 pdev->bus->self->device);
3235 static int dump_config_space(struct seq_file *s, void *data)
3239 struct pci_dev *pdev = NULL;
3241 for_each_pci_dev(pdev) {
3242 int row_cnt = pci_is_pcie(pdev) ?
3243 PCI_EXT_CFG_SPACE_SIZE : PCI_CFG_SPACE_SIZE;
3244 seq_printf(s, "%s\n", kobject_name(&pdev->dev.kobj));
3245 seq_printf(s, "%s\n", "------------");
3247 for (row = 0; row < (row_cnt / 16); row++) {
3248 seq_printf(s, "%02x: ", (row * 16));
3249 for (col = 0; col < 16; col++) {
3250 pci_read_config_byte(pdev, ((row * 16) + col),
3252 seq_printf(s, "%02x ", val);
3254 seq_printf(s, "\n");
3260 static int dump_afi_space(struct seq_file *s, void *data)
3263 struct tegra_pcie_port *port = NULL;
3264 struct tegra_pcie *pcie = (struct tegra_pcie *)(s->private);
3266 list_for_each_entry(port, &pcie->ports, list) {
3267 seq_puts(s, "Offset: Values\n");
3268 for (offset = 0; offset < 0x200; offset += 0x10) {
3269 val = afi_readl(port->pcie, offset);
3270 seq_printf(s, "%6x: %8x %8x %8x %8x\n", offset,
3271 afi_readl(port->pcie, offset),
3272 afi_readl(port->pcie, offset + 4),
3273 afi_readl(port->pcie, offset + 8),
3274 afi_readl(port->pcie, offset + 12));
3280 static int config_read(struct seq_file *s, void *data)
3283 struct pci_dev *pdev = NULL;
3285 pdev = pci_get_bus_and_slot((bdf >> 8), (bdf & 0xFF));
3287 seq_printf(s, "%02d:%02d.%02d : Doesn't exist\n",
3288 (bdf >> 8), PCI_SLOT(bdf), PCI_FUNC(bdf));
3290 "Enter (bus<<8 | dev<<3 | func) value to bdf file\n");
3293 if (config_offset >= PCI_EXT_CFG_SPACE_SIZE) {
3294 seq_printf(s, "Config offset exceeds max (i.e %d) value\n",
3295 PCI_EXT_CFG_SPACE_SIZE);
3297 if (!(config_offset & 0x3)) {
3299 pci_read_config_dword(pdev, config_offset, &val);
3300 seq_printf(s, "%08x\n", val);
3302 } else if (!(config_offset & 0x1)) {
3304 pci_read_config_word(pdev, config_offset, (u16 *)&val);
3305 seq_printf(s, "%04x\n", (u16)(val & 0xFFFF));
3306 config_val = val & 0xFFFF;
3309 pci_read_config_byte(pdev, config_offset, (u8 *)&val);
3310 seq_printf(s, "%02x\n", (u8)(val & 0xFF));
3311 config_val = val & 0xFF;
3318 static int config_write(struct seq_file *s, void *data)
3320 struct pci_dev *pdev = NULL;
3322 pdev = pci_get_bus_and_slot((bdf >> 8), (bdf & 0xFF));
3324 seq_printf(s, "%02d:%02d.%02d : Doesn't exist\n",
3325 (bdf >> 8), PCI_SLOT(bdf), PCI_FUNC(bdf));
3327 "Enter (bus<<8 | dev<<3 | func) value to bdf file\n");
3330 if (config_offset >= PCI_EXT_CFG_SPACE_SIZE) {
3331 seq_printf(s, "Config offset exceeds max (i.e %d) value\n",
3332 PCI_EXT_CFG_SPACE_SIZE);
3334 if (!(config_offset & 0x3)) {
3336 pci_write_config_dword(pdev, config_offset, config_val);
3337 } else if (!(config_offset & 0x1)) {
3339 pci_write_config_word(pdev, config_offset,
3340 (u16)(config_val & 0xFFFF));
3343 pci_write_config_byte(pdev, config_offset,
3344 (u8)(config_val & 0xFF));
3351 static int power_down(struct seq_file *s, void *data)
3353 struct tegra_pcie_port *port = NULL;
3354 struct tegra_pcie *pcie = (struct tegra_pcie *)(s->private);
3358 val = afi_readl(pcie, AFI_PCIE_PME);
3359 val |= AFI_PCIE_PME_TURN_OFF;
3360 afi_writel(pcie, val, AFI_PCIE_PME);
3362 val = afi_readl(pcie, AFI_PCIE_PME);
3363 } while(!(val & AFI_PCIE_PME_ACK));
3366 list_for_each_entry(port, &pcie->ports, list) {
3367 val = rp_readl(port, NV_PCIE2_RP_LTSSM_DBGREG);
3368 if (val & PCIE2_RP_LTSSM_DBGREG_LINKFSM16) {
3376 seq_printf(s, "[pass: pcie_power_down]\n");
3378 seq_printf(s, "[fail: pcie_power_down]\n");
3379 pr_info("PCIE power_down test END..\n");
3383 static int apply_lane_width(struct seq_file *s, void *data)
3386 struct tegra_pcie_port *port = (struct tegra_pcie_port *)(s->private);
3388 if (port->lanes > 0x10) {
3389 seq_printf(s, "link width cannot be grater than 16\n");
3390 new = rp_readl(port, RP_LINK_CONTROL_STATUS);
3391 port->lanes = (new &
3392 RP_LINK_CONTROL_STATUS_NEG_LINK_WIDTH) >> 20;
3395 new = rp_readl(port, NV_PCIE2_RP_VEND_XP1);
3396 new &= ~NV_PCIE2_RP_VEND_XP1_RNCTRL_MAXWIDTH_MASK;
3397 new |= port->lanes | NV_PCIE2_RP_VEND_XP1_RNCTRL_EN;
3398 rp_writel(port, new, NV_PCIE2_RP_VEND_XP1);
3401 new = rp_readl(port, RP_LINK_CONTROL_STATUS);
3402 new = (new & RP_LINK_CONTROL_STATUS_NEG_LINK_WIDTH) >> 20;
3403 if (new != port->lanes)
3404 seq_printf(s, "can't set link width %u, falling back to %u\n",
3407 seq_printf(s, "lane width %d applied\n", new);
3412 static int aspm_state_cnt(struct seq_file *s, void *data)
3415 struct tegra_pcie_port *port = (struct tegra_pcie_port *)(s->private);
3417 cs = rp_readl(port, RP_LINK_CONTROL_STATUS);
3418 /* check if L0s is enabled on this port */
3419 if (cs & RP_LINK_CONTROL_STATUS_L0s_ENABLED) {
3420 val = rp_readl(port, NV_PCIE2_RP_PRIV_XP_TX_L0S_ENTRY_COUNT);
3421 seq_printf(s, "Tx L0s entry count : %u\n", val);
3423 seq_printf(s, "Tx L0s entry count : %s\n", "disabled");
3425 val = rp_readl(port, NV_PCIE2_RP_PRIV_XP_RX_L0S_ENTRY_COUNT);
3426 seq_printf(s, "Rx L0s entry count : %u\n", val);
3428 /* check if L1 is enabled on this port */
3429 if (cs & RP_LINK_CONTROL_STATUS_L1_ENABLED) {
3430 val = rp_readl(port, NV_PCIE2_RP_PRIV_XP_TX_L1_ENTRY_COUNT);
3431 seq_printf(s, "Link L1 entry count : %u\n", val);
3433 seq_printf(s, "Link L1 entry count : %s\n", "disabled");
3435 #if defined(CONFIG_ARCH_TEGRA_21x_SOC)
3436 cs = rp_readl(port, PCIE2_RP_L1_PM_SS_CONTROL);
3437 /* RESETting the count value is not possible by any means
3438 because of HW Bug : 200034278 */
3439 /* check if L1.1 is enabled */
3440 if (cs & PCIE2_RP_L1_PM_SS_CONTROL_ASPM_L11_ENABLE) {
3441 val = rp_readl(port, NV_PCIE2_RP_L1_1_ENTRY_COUNT);
3442 seq_printf(s, "Link L1.1 entry count : %u\n", (val & 0xFFFF));
3444 seq_printf(s, "Link L1.1 entry count : %s\n", "disabled");
3445 /* check if L1.2 is enabled */
3446 if (cs & PCIE2_RP_L1_PM_SS_CONTROL_ASPM_L12_ENABLE) {
3447 val = rp_readl(port, NV_PCIE2_RP_L1_2_ENTRY_COUNT);
3448 seq_printf(s, "Link L1.2 entry count : %u\n", (val & 0xFFFF));
3450 seq_printf(s, "Link L1.2 entry count : %s\n", "disabled");
3455 static char *aspm_states[] = {
3459 "IDLE ((Tx-L0s && Rx-L0s) + L1)"
3462 static int list_aspm_states(struct seq_file *s, void *data)
3465 seq_printf(s, "----------------------------------------------------\n");
3466 seq_printf(s, "Note: Duration of link's residency is calcualated\n");
3467 seq_printf(s, " only for one of the ASPM states at a time\n");
3468 seq_printf(s, "----------------------------------------------------\n");
3469 seq_printf(s, "write(echo) number from below table corresponding to\n");
3470 seq_printf(s, "one of the ASPM states for which link duration needs\n");
3471 seq_printf(s, "to be calculated to 'config_aspm_state'\n");
3472 seq_printf(s, "-----------------\n");
3473 for (i = 0; i < ARRAY_SIZE(aspm_states); i++)
3474 seq_printf(s, "%d : %s\n", i, aspm_states[i]);
3475 seq_printf(s, "-----------------\n");
3479 static int apply_aspm_state(struct seq_file *s, void *data)
3482 struct tegra_pcie_port *port = (struct tegra_pcie_port *)(s->private);
3484 if (config_aspm_state > ARRAY_SIZE(aspm_states)) {
3485 seq_printf(s, "Invalid ASPM state : %u\n", config_aspm_state);
3486 list_aspm_states(s, data);
3488 val = rp_readl(port, NV_PCIE2_RP_PRIV_XP_CONFIG);
3489 val &= ~NV_PCIE2_RP_PRIV_XP_CONFIG_LOW_PWR_DURATION_MASK;
3490 val |= config_aspm_state;
3491 rp_writel(port, val, NV_PCIE2_RP_PRIV_XP_CONFIG);
3492 seq_printf(s, "Configured for ASPM-%s state...\n",
3493 aspm_states[config_aspm_state]);
3498 static int get_aspm_duration(struct seq_file *s, void *data)
3501 struct tegra_pcie_port *port = (struct tegra_pcie_port *)(s->private);
3503 val = rp_readl(port, NV_PCIE2_RP_PRIV_XP_DURATION_IN_LOW_PWR_100NS);
3504 /* 52.08 = 1000 / 19.2MHz is rounded to 52 */
3505 seq_printf(s, "ASPM-%s duration = %d ns\n",
3506 aspm_states[config_aspm_state], (u32)((val * 100)/52));
3510 static int secondary_bus_reset(struct seq_file *s, void *data)
3513 struct tegra_pcie_port *port = (struct tegra_pcie_port *)(s->private);
3515 val = rp_readl(port, NV_PCIE2_RP_INTR_BCR);
3516 val |= NV_PCIE2_RP_INTR_BCR_SB_RESET;
3517 rp_writel(port, val, NV_PCIE2_RP_INTR_BCR);
3519 val = rp_readl(port, NV_PCIE2_RP_INTR_BCR);
3520 val &= ~NV_PCIE2_RP_INTR_BCR_SB_RESET;
3521 rp_writel(port, val, NV_PCIE2_RP_INTR_BCR);
3523 seq_printf(s, "Secondary Bus Reset applied successfully...\n");
3527 static void reset_l1ss_counter(struct tegra_pcie_port *port, u32 val,
3528 unsigned long offset)
3532 if ((val & 0xFFFF) == 0xFFFF) {
3533 pr_info(" Trying reset L1ss entry count to 0\n");
3536 pr_info("Timeout: reset did not happen!\n");
3539 val |= PCIE2_RP_L1_1_ENTRY_COUNT_RESET;
3540 rp_writel(port, val, offset);
3542 val = rp_readl(port, offset);
3545 pr_info("L1ss entry count reset to 0\n");
3548 static int aspm_l11(struct seq_file *s, void *data)
3550 struct pci_dev *pdev = NULL;
3551 u32 val = 0, pos = 0;
3552 struct tegra_pcie_port *port = NULL;
3553 struct tegra_pcie *pcie = (struct tegra_pcie *)(s->private);
3555 pr_info("\nPCIE aspm l1.1 test START..\n");
3556 list_for_each_entry(port, &pcie->ports, list) {
3557 /* reset RP L1.1 counter */
3558 val = rp_readl(port, NV_PCIE2_RP_L1_1_ENTRY_COUNT);
3559 val |= PCIE2_RP_L1_1_ENTRY_COUNT_RESET;
3560 rp_writel(port, val, NV_PCIE2_RP_L1_1_ENTRY_COUNT);
3562 val = rp_readl(port, NV_PCIE2_RP_L1_1_ENTRY_COUNT);
3563 pr_info("L1.1 Entry count before %x\n", val);
3564 reset_l1ss_counter(port, val, NV_PCIE2_RP_L1_1_ENTRY_COUNT);
3566 /* disable automatic l1ss exit by gpu */
3567 for_each_pci_dev(pdev)
3568 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) {
3569 pci_write_config_dword(pdev, 0x658, 0);
3570 pci_write_config_dword(pdev, 0x150, 0xE0000015);
3572 for_each_pci_dev(pdev) {
3574 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &aspm);
3575 aspm |= PCI_EXP_LNKCTL_ASPM_L1;
3576 pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, aspm);
3577 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
3578 pci_read_config_dword(pdev, pos + PCI_L1SS_CTRL1, &val);
3579 val &= ~PCI_L1SS_CAP_L1PM_MASK;
3580 val |= PCI_L1SS_CTRL1_ASPM_L11S;
3581 pci_write_config_dword(pdev, pos + PCI_L1SS_CTRL1, val);
3582 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT)
3586 for_each_pci_dev(pdev) {
3587 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
3588 pci_read_config_dword(pdev, pos + PCI_L1SS_CTRL1, &val);
3589 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT)
3592 list_for_each_entry(port, &pcie->ports, list) {
3593 val = rp_readl(port, NV_PCIE2_RP_L1_1_ENTRY_COUNT);
3594 pr_info("L1.1 Entry count after %x\n", val);
3597 pr_info("PCIE aspm l1.1 test END..\n");
3601 static int aspm_l1ss(struct seq_file *s, void *data)
3603 struct pci_dev *pdev = NULL;
3604 u32 val = 0, pos = 0;
3605 struct tegra_pcie_port *port = NULL;
3606 struct tegra_pcie *pcie = (struct tegra_pcie *)(s->private);
3608 pr_info("\nPCIE aspm l1ss test START..\n");
3609 list_for_each_entry(port, &pcie->ports, list) {
3610 /* reset RP L1.1 L1.2 counters */
3611 val = rp_readl(port, NV_PCIE2_RP_L1_1_ENTRY_COUNT);
3612 val |= PCIE2_RP_L1_1_ENTRY_COUNT_RESET;
3613 rp_writel(port, val, NV_PCIE2_RP_L1_1_ENTRY_COUNT);
3614 val = rp_readl(port, NV_PCIE2_RP_L1_1_ENTRY_COUNT);
3615 pr_info("L1.1 Entry count before %x\n", val);
3616 reset_l1ss_counter(port, val, NV_PCIE2_RP_L1_1_ENTRY_COUNT);
3618 val = rp_readl(port, NV_PCIE2_RP_L1_2_ENTRY_COUNT);
3619 val |= PCIE2_RP_L1_2_ENTRY_COUNT_RESET;
3620 rp_writel(port, val, NV_PCIE2_RP_L1_2_ENTRY_COUNT);
3621 val = rp_readl(port, NV_PCIE2_RP_L1_2_ENTRY_COUNT);
3622 pr_info("L1.2 Entry count before %x\n", val);
3623 reset_l1ss_counter(port, val, NV_PCIE2_RP_L1_2_ENTRY_COUNT);
3625 /* disable automatic l1ss exit by gpu */
3626 for_each_pci_dev(pdev)
3627 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) {
3628 pci_write_config_dword(pdev, 0x658, 0);
3629 pci_write_config_dword(pdev, 0x150, 0xE0000015);
3632 for_each_pci_dev(pdev) {
3634 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &aspm);
3635 aspm |= PCI_EXP_LNKCTL_ASPM_L1;
3636 pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, aspm);
3637 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
3638 pci_read_config_dword(pdev, pos + PCI_L1SS_CTRL1, &val);
3639 val &= ~PCI_L1SS_CAP_L1PM_MASK;
3640 val |= (PCI_L1SS_CTRL1_ASPM_L11S | PCI_L1SS_CTRL1_ASPM_L12S);
3641 pci_write_config_dword(pdev, pos + PCI_L1SS_CTRL1, val);
3642 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT)
3646 for_each_pci_dev(pdev) {
3647 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
3648 pci_read_config_dword(pdev, pos + PCI_L1SS_CTRL1, &val);
3649 if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT)
3652 list_for_each_entry(port, &pcie->ports, list) {
3654 val = rp_readl(port, NV_PCIE2_RP_L1_1_ENTRY_COUNT);
3655 pr_info("L1.1 Entry count after %x\n", val);
3656 val = rp_readl(port, NV_PCIE2_RP_L1_2_ENTRY_COUNT);
3657 pr_info("L1.2 Entry count after %x\n", val);
3659 val = rp_readl(port, NV_PCIE2_RP_LTR_REP_VAL);
3660 pr_info("LTR reproted by EP %x\n", val);
3661 ltr_val = (val & 0x1FF) * (1 << (5 * ((val & 0x1C00) >> 10)));
3662 if (ltr_val > (106 * 1000)) {
3663 pr_info("EP's LTR = %u ns is > RP's threshold = %u ns\n",
3664 ltr_val, 106 * 1000);
3665 pr_info("Hence only L1.2 entry allowed\n");
3667 pr_info("EP's LTR = %u ns is < RP's threshold = %u ns\n",
3668 ltr_val, 106 * 1000);
3669 pr_info("Hence only L1.1 entry allowed\n");
3673 pr_info("PCIE aspm l1ss test END..\n");
3676 static struct dentry *create_tegra_pcie_debufs_file(char *name,
3677 const struct file_operations *ops,
3678 struct dentry *parent,
3683 d = debugfs_create_file(name, S_IRUGO, parent, data, ops);
3685 debugfs_remove_recursive(parent);
3690 #define DEFINE_ENTRY(__name) \
3691 static int __name ## _open(struct inode *inode, struct file *file) \
3693 return single_open(file, __name, inode->i_private); \
3695 static const struct file_operations __name ## _fops = { \
3696 .open = __name ## _open, \
3698 .llseek = seq_lseek, \
3699 .release = single_release, \
3703 DEFINE_ENTRY(list_devices)
3704 DEFINE_ENTRY(apply_link_speed)
3705 DEFINE_ENTRY(check_d3hot)
3706 DEFINE_ENTRY(dump_config_space)
3707 DEFINE_ENTRY(dump_afi_space)
3708 DEFINE_ENTRY(config_read)
3709 DEFINE_ENTRY(config_write)
3710 DEFINE_ENTRY(aspm_l11)
3711 DEFINE_ENTRY(aspm_l1ss)
3712 DEFINE_ENTRY(power_down)
3715 DEFINE_ENTRY(apply_lane_width)
3716 DEFINE_ENTRY(aspm_state_cnt)
3717 DEFINE_ENTRY(list_aspm_states)
3718 DEFINE_ENTRY(apply_aspm_state)
3719 DEFINE_ENTRY(get_aspm_duration)
3720 DEFINE_ENTRY(secondary_bus_reset)
3722 static int tegra_pcie_port_debugfs_init(struct tegra_pcie_port *port)
3727 sprintf(&port_name, "%d", port->index);
3728 port->port_debugfs = debugfs_create_dir(&port_name,
3729 port->pcie->debugfs);
3730 if (!port->port_debugfs)
3733 d = debugfs_create_u32("lane_width", S_IWUGO | S_IRUGO,
3739 d = debugfs_create_file("apply_lane_width", S_IRUGO,
3740 port->port_debugfs, (void *)port,
3741 &apply_lane_width_fops);
3745 d = debugfs_create_file("aspm_state_cnt", S_IRUGO,
3746 port->port_debugfs, (void *)port,
3747 &aspm_state_cnt_fops);
3751 d = debugfs_create_u16("config_aspm_state", S_IWUGO | S_IRUGO,
3753 &config_aspm_state);
3757 d = debugfs_create_file("apply_aspm_state", S_IRUGO,
3758 port->port_debugfs, (void *)port,
3759 &apply_aspm_state_fops);
3763 d = debugfs_create_file("list_aspm_states", S_IRUGO,
3764 port->port_debugfs, (void *)port,
3765 &list_aspm_states_fops);
3769 d = debugfs_create_file("get_aspm_duration", S_IRUGO,
3770 port->port_debugfs, (void *)port,
3771 &get_aspm_duration_fops);
3775 d = debugfs_create_file("secondary_bus_reset", S_IRUGO,
3776 port->port_debugfs, (void *)port,
3777 &secondary_bus_reset_fops);
3784 debugfs_remove_recursive(port->port_debugfs);
3785 port->port_debugfs = NULL;
3789 static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
3791 struct tegra_pcie *pcie = s->private;
3793 if (list_empty(&pcie->ports))
3796 seq_printf(s, "Index Status\n");
3798 return seq_list_start(&pcie->ports, *pos);
3801 static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
3803 struct tegra_pcie *pcie = s->private;
3805 return seq_list_next(v, &pcie->ports, pos);
3808 static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
3812 static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
3814 bool up = false, active = false;
3815 struct tegra_pcie_port *port;
3818 port = list_entry(v, struct tegra_pcie_port, list);
3823 value = readl(port->base + RP_VEND_XP);
3825 if (value & RP_VEND_XP_DL_UP)
3828 value = readl(port->base + RP_LINK_CONTROL_STATUS);
3830 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
3833 seq_printf(s, "%2u ", port->index);
3836 seq_printf(s, "up");
3840 seq_printf(s, ", ");
3842 seq_printf(s, "active");
3845 seq_printf(s, "\n");
3849 static const struct seq_operations tegra_pcie_ports_seq_ops = {
3850 .start = tegra_pcie_ports_seq_start,
3851 .next = tegra_pcie_ports_seq_next,
3852 .stop = tegra_pcie_ports_seq_stop,
3853 .show = tegra_pcie_ports_seq_show,
3856 static int tegra_pcie_ports_open(struct inode *inode, struct file *file)
3858 struct tegra_pcie *pcie = inode->i_private;
3862 err = seq_open(file, &tegra_pcie_ports_seq_ops);
3866 s = file->private_data;
3872 static const struct file_operations tegra_pcie_ports_ops = {
3873 .owner = THIS_MODULE,
3874 .open = tegra_pcie_ports_open,
3876 .llseek = seq_lseek,
3877 .release = seq_release,
3880 static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie)
3882 debugfs_remove_recursive(pcie->debugfs);
3885 static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
3887 struct dentry *file, *d;
3888 struct tegra_pcie_port *port;
3890 pcie->debugfs = debugfs_create_dir("pcie", NULL);
3894 file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs,
3895 pcie, &tegra_pcie_ports_ops);
3899 d = create_tegra_pcie_debufs_file("list_devices",
3900 &list_devices_fops, pcie->debugfs,
3905 d = debugfs_create_bool("is_gen2_speed(WO)", S_IWUSR, pcie->debugfs,
3910 d = create_tegra_pcie_debufs_file("apply_link_speed",
3911 &apply_link_speed_fops, pcie->debugfs,
3916 d = create_tegra_pcie_debufs_file("check_d3hot",
3917 &check_d3hot_fops, pcie->debugfs,
3922 d = create_tegra_pcie_debufs_file("power_down",
3923 &power_down_fops, pcie->debugfs,
3928 d = create_tegra_pcie_debufs_file("dump_config_space",
3929 &dump_config_space_fops, pcie->debugfs,
3934 d = create_tegra_pcie_debufs_file("dump_afi_space",
3935 &dump_afi_space_fops, pcie->debugfs,
3940 d = debugfs_create_u16("bus_dev_func", S_IWUGO | S_IRUGO,
3946 d = debugfs_create_u16("config_offset", S_IWUGO | S_IRUGO,
3952 d = debugfs_create_u32("config_val", S_IWUGO | S_IRUGO,
3958 d = create_tegra_pcie_debufs_file("config_read",
3959 &config_read_fops, pcie->debugfs,
3964 d = create_tegra_pcie_debufs_file("config_write",
3965 &config_write_fops, pcie->debugfs,
3969 d = create_tegra_pcie_debufs_file("aspm_l11",
3970 &aspm_l11_fops, pcie->debugfs,
3974 d = create_tegra_pcie_debufs_file("aspm_l1ss",
3975 &aspm_l1ss_fops, pcie->debugfs,
3980 list_for_each_entry(port, &pcie->ports, list) {
3982 if (tegra_pcie_port_debugfs_init(port))
3989 tegra_pcie_debugfs_exit(pcie);
3990 pcie->debugfs = NULL;
3994 static int tegra_pcie_probe_complete(struct tegra_pcie *pcie)
3997 struct platform_device *pdev = to_platform_device(pcie->dev);
4000 ret = tegra_pcie_init(pcie);
4004 /* FIXME:In Bug 200160313, device hang is observed during LP0 with PCIe
4005 * device connected. When PCIe device is not under mc_clk power-domain,
4006 * this issue does not occur, but SC2/SC3 might break. So, we are calling
4007 * disable_scx_states(), that will disabled SCx states whenever PCIe
4008 * device is connected.
4010 if (pcie->num_ports)
4011 disable_scx_states();
4013 if (IS_ENABLED(CONFIG_DEBUG_FS))
4014 if (pcie->num_ports) {
4015 int ret = tegra_pcie_debugfs_init(pcie);
4017 dev_err(&pdev->dev, "failed to setup debugfs: %d\n",
4024 static void pcie_delayed_detect(struct work_struct *work)
4026 struct tegra_pcie *pcie;
4029 pcie = container_of(work, struct tegra_pcie, detect_delay.work);
4030 ret = tegra_pcie_probe_complete(pcie);
4036 static int tegra_pcie_probe(struct platform_device *pdev)
4040 const struct of_device_id *match;
4041 struct tegra_pcie *pcie;
4045 #ifdef CONFIG_ARCH_TEGRA_21x_SOC
4046 if (tegra_bonded_out_dev(BOND_OUT_PCIE)) {
4047 dev_err(&pdev->dev, "PCIE instance is not present\n");
4052 pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
4056 platform_set_drvdata(pdev, pcie);
4057 pcie->dev = &pdev->dev;
4059 /* use DT way to init platform data */
4060 pcie->plat_data = devm_kzalloc(pcie->dev,
4061 sizeof(*(pcie->plat_data)), GFP_KERNEL);
4062 if (!(pcie->plat_data)) {
4063 dev_err(pcie->dev, "memory alloc failed\n");
4066 tegra_pcie_read_plat_data(pcie);
4068 match = of_match_device(tegra_pcie_of_match, &pdev->dev);
4071 pcie->soc_data = (struct tegra_pcie_soc_data *)match->data;
4073 pcie->pcie_regulators = devm_kzalloc(pcie->dev,
4074 pcie->soc_data->num_pcie_regulators
4075 * sizeof(struct regulator *), GFP_KERNEL);
4077 for (i = 0; i < pcie->soc_data->num_pcie_regulators; i++) {
4078 pcie->pcie_regulators[i] =
4079 devm_regulator_get(pcie->dev,
4080 pcie->soc_data->pcie_regulator_names[i]);
4081 if (IS_ERR(pcie->pcie_regulators[i])) {
4082 dev_err(pcie->dev, "%s: unable to get regulator %s\n",
4084 pcie->soc_data->pcie_regulator_names[i]);
4085 pcie->pcie_regulators[i] = NULL;
4089 INIT_LIST_HEAD(&pcie->buses);
4090 INIT_LIST_HEAD(&pcie->ports);
4091 INIT_LIST_HEAD(&pcie->sys);
4092 INIT_DELAYED_WORK(&pcie->detect_delay, pcie_delayed_detect);
4094 ret = tegra_pcie_parse_dt(pcie);
4098 pcie->prod_list = tegra_prod_init(pcie->dev->of_node);
4099 if (IS_ERR(pcie->prod_list)) {
4100 dev_err(pcie->dev, "Prod Init failed\n");
4101 pcie->prod_list = NULL;
4104 /* Enable Runtime PM for PCIe, TODO: Need to add PCIe host device */
4105 pm_runtime_enable(pcie->dev);
4107 if (pcie->plat_data->boot_detect_delay) {
4108 unsigned long delay =
4109 msecs_to_jiffies(pcie->plat_data->boot_detect_delay);
4110 schedule_delayed_work(&pcie->detect_delay, delay);
4114 ret = tegra_pcie_probe_complete(pcie);
4116 pm_runtime_disable(pcie->dev);
4117 tegra_pd_remove_device(pcie->dev);
4122 static int tegra_pcie_remove(struct platform_device *pdev)
4124 struct tegra_pcie *pcie = platform_get_drvdata(pdev);
4125 struct tegra_pcie_bus *bus;
4128 pm_runtime_disable(pcie->dev);
4129 if (cancel_delayed_work_sync(&pcie->detect_delay))
4131 if (IS_ENABLED(CONFIG_DEBUG_FS))
4132 tegra_pcie_debugfs_exit(pcie);
4133 pci_common_exit(&pcie->sys);
4134 list_for_each_entry(bus, &pcie->buses, list) {
4135 vunmap(bus->area->addr);
4138 if (IS_ENABLED(CONFIG_PCI_MSI))
4139 tegra_pcie_disable_msi(pcie);
4140 if (pcie->prod_list)
4141 tegra_prod_release(&pcie->prod_list);
4142 tegra_pcie_detach(pcie);
4143 tegra_pd_remove_device(pcie->dev);
4144 tegra_pcie_power_off(pcie, true);
4145 tegra_pcie_disable_regulators(pcie);
4146 tegra_pcie_clocks_put(pcie);
4152 static int tegra_pcie_suspend_noirq(struct device *dev)
4155 struct tegra_pcie *pcie = dev_get_drvdata(dev);
4158 ret = tegra_pcie_power_off(pcie, true);
4161 /* configure PE_WAKE signal as wake sources */
4162 if (gpio_is_valid(pcie->plat_data->gpio_wake) &&
4163 device_may_wakeup(dev)) {
4164 ret = enable_irq_wake(gpio_to_irq(
4165 pcie->plat_data->gpio_wake));
4168 "ID wake-up event failed with error %d\n", ret);
4171 ret = tegra_pcie_disable_regulators(pcie);
4178 static int tegra_pcie_enable_msi(struct tegra_pcie *, bool);
4180 static int tegra_pcie_resume_noirq(struct device *dev)
4183 struct tegra_pcie *pcie = dev_get_drvdata(dev);
4186 if (gpio_is_valid(pcie->plat_data->gpio_wake) &&
4187 device_may_wakeup(dev)) {
4188 ret = disable_irq_wake(gpio_to_irq(
4189 pcie->plat_data->gpio_wake));
4192 "ID wake-up event failed with error %d\n", ret);
4196 ret = tegra_pcie_enable_regulators(pcie);
4198 dev_err(pcie->dev, "PCIE: Failed to enable regulators\n");
4202 ret = tegra_pcie_power_on(pcie);
4204 dev_err(dev, "PCIE: Failed to power on: %d\n", ret);
4207 tegra_pcie_enable_pads(pcie, true);
4208 tegra_periph_reset_deassert(pcie->pcie_afi);
4209 tegra_pcie_enable_controller(pcie);
4210 tegra_pcie_setup_translations(pcie);
4211 /* Set up MSI registers, if MSI have been enabled */
4212 tegra_pcie_enable_msi(pcie, true);
4213 tegra_periph_reset_deassert(pcie->pcie_pcie);
4214 tegra_pcie_check_ports(pcie);
4215 if (!pcie->num_ports) {
4216 tegra_pcie_power_off(pcie, true);
4217 ret = tegra_pcie_disable_regulators(pcie);
4225 static int tegra_pcie_resume(struct device *dev)
4227 struct tegra_pcie *pcie = dev_get_drvdata(dev);
4229 tegra_pcie_enable_features(pcie);
4233 static const struct dev_pm_ops tegra_pcie_pm_ops = {
4234 .suspend_noirq = tegra_pcie_suspend_noirq,
4235 .resume_noirq = tegra_pcie_resume_noirq,
4236 .resume = tegra_pcie_resume,
4238 #endif /* CONFIG_PM */
4240 /* driver data is accessed after init, so use __refdata instead of __initdata */
4241 static struct platform_driver __refdata tegra_pcie_driver = {
4242 .probe = tegra_pcie_probe,
4243 .remove = tegra_pcie_remove,
4245 .name = "tegra-pcie",
4246 .owner = THIS_MODULE,
4248 .pm = &tegra_pcie_pm_ops,
4250 .of_match_table = tegra_pcie_of_match,
4254 static int __init tegra_pcie_init_driver(void)
4256 return platform_driver_register(&tegra_pcie_driver);
4259 static void __exit_refok tegra_pcie_exit_driver(void)
4261 platform_driver_unregister(&tegra_pcie_driver);
4264 module_init(tegra_pcie_init_driver);
4265 module_exit(tegra_pcie_exit_driver);
4266 MODULE_LICENSE("GPL v2");