blob: 1cf94854c44fdc68ff1f84fedea116b45d1ec9d4 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * PCIe host controller driver for Tegra194 SoC
4 *
5 * Copyright (C) 2019 NVIDIA Corporation.
6 *
7 * Author: Vidya Sagar <vidyas@nvidia.com>
8 */
9
10#include <linux/bitfield.h>
11#include <linux/clk.h>
12#include <linux/debugfs.h>
13#include <linux/delay.h>
14#include <linux/gpio.h>
15#include <linux/interrupt.h>
16#include <linux/iopoll.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/of.h>
20#include <linux/of_device.h>
21#include <linux/of_gpio.h>
22#include <linux/of_irq.h>
23#include <linux/of_pci.h>
24#include <linux/pci.h>
25#include <linux/phy/phy.h>
26#include <linux/pinctrl/consumer.h>
27#include <linux/platform_device.h>
28#include <linux/pm_runtime.h>
29#include <linux/random.h>
30#include <linux/reset.h>
31#include <linux/resource.h>
32#include <linux/types.h>
33#include "pcie-designware.h"
34#include <soc/tegra/bpmp.h>
35#include <soc/tegra/bpmp-abi.h>
36#include "../../pci.h"
37
38#define APPL_PINMUX 0x0
39#define APPL_PINMUX_PEX_RST BIT(0)
40#define APPL_PINMUX_CLKREQ_OVERRIDE_EN BIT(2)
41#define APPL_PINMUX_CLKREQ_OVERRIDE BIT(3)
42#define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN BIT(4)
43#define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE BIT(5)
44#define APPL_PINMUX_CLKREQ_OUT_OVRD_EN BIT(9)
45#define APPL_PINMUX_CLKREQ_OUT_OVRD BIT(10)
46
47#define APPL_CTRL 0x4
48#define APPL_CTRL_SYS_PRE_DET_STATE BIT(6)
49#define APPL_CTRL_LTSSM_EN BIT(7)
50#define APPL_CTRL_HW_HOT_RST_EN BIT(20)
51#define APPL_CTRL_HW_HOT_RST_MODE_MASK GENMASK(1, 0)
52#define APPL_CTRL_HW_HOT_RST_MODE_SHIFT 22
53#define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST 0x1
54
55#define APPL_INTR_EN_L0_0 0x8
56#define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN BIT(0)
57#define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN BIT(4)
58#define APPL_INTR_EN_L0_0_INT_INT_EN BIT(8)
59#define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN BIT(19)
60#define APPL_INTR_EN_L0_0_SYS_INTR_EN BIT(30)
61#define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN BIT(31)
62
63#define APPL_INTR_STATUS_L0 0xC
64#define APPL_INTR_STATUS_L0_LINK_STATE_INT BIT(0)
65#define APPL_INTR_STATUS_L0_INT_INT BIT(8)
66#define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT BIT(18)
67
68#define APPL_INTR_EN_L1_0_0 0x1C
69#define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN BIT(1)
70
71#define APPL_INTR_STATUS_L1_0_0 0x20
72#define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED BIT(1)
73
74#define APPL_INTR_STATUS_L1_1 0x2C
75#define APPL_INTR_STATUS_L1_2 0x30
76#define APPL_INTR_STATUS_L1_3 0x34
77#define APPL_INTR_STATUS_L1_6 0x3C
78#define APPL_INTR_STATUS_L1_7 0x40
79
80#define APPL_INTR_EN_L1_8_0 0x44
81#define APPL_INTR_EN_L1_8_BW_MGT_INT_EN BIT(2)
82#define APPL_INTR_EN_L1_8_AUTO_BW_INT_EN BIT(3)
83#define APPL_INTR_EN_L1_8_INTX_EN BIT(11)
84#define APPL_INTR_EN_L1_8_AER_INT_EN BIT(15)
85
86#define APPL_INTR_STATUS_L1_8_0 0x4C
87#define APPL_INTR_STATUS_L1_8_0_EDMA_INT_MASK GENMASK(11, 6)
88#define APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS BIT(2)
89#define APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS BIT(3)
90
91#define APPL_INTR_STATUS_L1_9 0x54
92#define APPL_INTR_STATUS_L1_10 0x58
93#define APPL_INTR_STATUS_L1_11 0x64
94#define APPL_INTR_STATUS_L1_13 0x74
95#define APPL_INTR_STATUS_L1_14 0x78
96#define APPL_INTR_STATUS_L1_15 0x7C
97#define APPL_INTR_STATUS_L1_17 0x88
98
99#define APPL_INTR_EN_L1_18 0x90
100#define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMPLT BIT(2)
101#define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR BIT(1)
102#define APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0)
103
104#define APPL_INTR_STATUS_L1_18 0x94
105#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT BIT(2)
106#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR BIT(1)
107#define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0)
108
109#define APPL_MSI_CTRL_2 0xB0
110
111#define APPL_LTR_MSG_1 0xC4
112#define LTR_MSG_REQ BIT(15)
113#define LTR_MST_NO_SNOOP_SHIFT 16
114
115#define APPL_LTR_MSG_2 0xC8
116#define APPL_LTR_MSG_2_LTR_MSG_REQ_STATE BIT(3)
117
118#define APPL_LINK_STATUS 0xCC
119#define APPL_LINK_STATUS_RDLH_LINK_UP BIT(0)
120
121#define APPL_DEBUG 0xD0
122#define APPL_DEBUG_PM_LINKST_IN_L2_LAT BIT(21)
123#define APPL_DEBUG_PM_LINKST_IN_L0 0x11
124#define APPL_DEBUG_LTSSM_STATE_MASK GENMASK(8, 3)
125#define APPL_DEBUG_LTSSM_STATE_SHIFT 3
126#define LTSSM_STATE_PRE_DETECT 5
127
128#define APPL_RADM_STATUS 0xE4
129#define APPL_PM_XMT_TURNOFF_STATE BIT(0)
130
131#define APPL_DM_TYPE 0x100
132#define APPL_DM_TYPE_MASK GENMASK(3, 0)
133#define APPL_DM_TYPE_RP 0x4
134#define APPL_DM_TYPE_EP 0x0
135
136#define APPL_CFG_BASE_ADDR 0x104
137#define APPL_CFG_BASE_ADDR_MASK GENMASK(31, 12)
138
139#define APPL_CFG_IATU_DMA_BASE_ADDR 0x108
140#define APPL_CFG_IATU_DMA_BASE_ADDR_MASK GENMASK(31, 18)
141
142#define APPL_CFG_MISC 0x110
143#define APPL_CFG_MISC_SLV_EP_MODE BIT(14)
144#define APPL_CFG_MISC_ARCACHE_MASK GENMASK(13, 10)
145#define APPL_CFG_MISC_ARCACHE_SHIFT 10
146#define APPL_CFG_MISC_ARCACHE_VAL 3
147
148#define APPL_CFG_SLCG_OVERRIDE 0x114
149#define APPL_CFG_SLCG_OVERRIDE_SLCG_EN_MASTER BIT(0)
150
151#define APPL_CAR_RESET_OVRD 0x12C
152#define APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N BIT(0)
153
154#define IO_BASE_IO_DECODE BIT(0)
155#define IO_BASE_IO_DECODE_BIT8 BIT(8)
156
157#define CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE BIT(0)
158#define CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE BIT(16)
159
160#define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF 0x718
161#define CFG_TIMER_CTRL_ACK_NAK_SHIFT (19)
162
163#define EVENT_COUNTER_ALL_CLEAR 0x3
164#define EVENT_COUNTER_ENABLE_ALL 0x7
165#define EVENT_COUNTER_ENABLE_SHIFT 2
166#define EVENT_COUNTER_EVENT_SEL_MASK GENMASK(7, 0)
167#define EVENT_COUNTER_EVENT_SEL_SHIFT 16
168#define EVENT_COUNTER_EVENT_Tx_L0S 0x2
169#define EVENT_COUNTER_EVENT_Rx_L0S 0x3
170#define EVENT_COUNTER_EVENT_L1 0x5
171#define EVENT_COUNTER_EVENT_L1_1 0x7
172#define EVENT_COUNTER_EVENT_L1_2 0x8
173#define EVENT_COUNTER_GROUP_SEL_SHIFT 24
174#define EVENT_COUNTER_GROUP_5 0x5
175
176#define PORT_LOGIC_ACK_F_ASPM_CTRL 0x70C
177#define ENTER_ASPM BIT(30)
178#define L0S_ENTRANCE_LAT_SHIFT 24
179#define L0S_ENTRANCE_LAT_MASK GENMASK(26, 24)
180#define L1_ENTRANCE_LAT_SHIFT 27
181#define L1_ENTRANCE_LAT_MASK GENMASK(29, 27)
182#define N_FTS_SHIFT 8
183#define N_FTS_MASK GENMASK(7, 0)
184#define N_FTS_VAL 52
185
186#define PORT_LOGIC_GEN2_CTRL 0x80C
187#define PORT_LOGIC_GEN2_CTRL_DIRECT_SPEED_CHANGE BIT(17)
188#define FTS_MASK GENMASK(7, 0)
189#define FTS_VAL 52
190
191#define PORT_LOGIC_MSI_CTRL_INT_0_EN 0x828
192
193#define GEN3_EQ_CONTROL_OFF 0x8a8
194#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT 8
195#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8)
196#define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK GENMASK(3, 0)
197
198#define GEN3_RELATED_OFF 0x890
199#define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL BIT(0)
200#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16)
201#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24
202#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24)
203
204#define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0
205#define AMBA_ERROR_RESPONSE_CRS_SHIFT 3
206#define AMBA_ERROR_RESPONSE_CRS_MASK GENMASK(1, 0)
207#define AMBA_ERROR_RESPONSE_CRS_OKAY 0
208#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1
209#define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2
210
211#define PORT_LOGIC_MSIX_DOORBELL 0x948
212
213#define CAP_SPCIE_CAP_OFF 0x154
214#define CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK GENMASK(3, 0)
215#define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK GENMASK(11, 8)
216#define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT 8
217
218#define PME_ACK_TIMEOUT 10000
219
220#define LTSSM_TIMEOUT 50000 /* 50ms */
221
222#define GEN3_GEN4_EQ_PRESET_INIT 5
223
224#define GEN1_CORE_CLK_FREQ 62500000
225#define GEN2_CORE_CLK_FREQ 125000000
226#define GEN3_CORE_CLK_FREQ 250000000
227#define GEN4_CORE_CLK_FREQ 500000000
228
229static const unsigned int pcie_gen_freq[] = {
230 GEN1_CORE_CLK_FREQ,
231 GEN2_CORE_CLK_FREQ,
232 GEN3_CORE_CLK_FREQ,
233 GEN4_CORE_CLK_FREQ
234};
235
236static const u32 event_cntr_ctrl_offset[] = {
237 0x1d8,
238 0x1a8,
239 0x1a8,
240 0x1a8,
241 0x1c4,
242 0x1d8
243};
244
245static const u32 event_cntr_data_offset[] = {
246 0x1dc,
247 0x1ac,
248 0x1ac,
249 0x1ac,
250 0x1c8,
251 0x1dc
252};
253
254struct tegra_pcie_dw {
255 struct device *dev;
256 struct resource *appl_res;
257 struct resource *dbi_res;
258 struct resource *atu_dma_res;
259 void __iomem *appl_base;
260 struct clk *core_clk;
261 struct reset_control *core_apb_rst;
262 struct reset_control *core_rst;
263 struct dw_pcie pci;
264 struct tegra_bpmp *bpmp;
265
266 bool supports_clkreq;
267 bool enable_cdm_check;
268 bool link_state;
269 bool update_fc_fixup;
270 u8 init_link_width;
271 u32 msi_ctrl_int;
272 u32 num_lanes;
273 u32 max_speed;
274 u32 cid;
275 u32 cfg_link_cap_l1sub;
276 u32 pcie_cap_base;
277 u32 aspm_cmrt;
278 u32 aspm_pwr_on_t;
279 u32 aspm_l0s_enter_lat;
280
281 struct regulator *pex_ctl_supply;
282 struct regulator *slot_ctl_3v3;
283 struct regulator *slot_ctl_12v;
284
285 unsigned int phy_count;
286 struct phy **phys;
287
288 struct dentry *debugfs;
289};
290
291static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
292{
293 return container_of(pci, struct tegra_pcie_dw, pci);
294}
295
296static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value,
297 const u32 reg)
298{
299 writel_relaxed(value, pcie->appl_base + reg);
300}
301
302static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg)
303{
304 return readl_relaxed(pcie->appl_base + reg);
305}
306
307struct tegra_pcie_soc {
308 enum dw_pcie_device_mode mode;
309};
310
311static void apply_bad_link_workaround(struct pcie_port *pp)
312{
313 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
314 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
315 u32 current_link_width;
316 u16 val;
317
318 /*
319 * NOTE:- Since this scenario is uncommon and link as such is not
320 * stable anyway, not waiting to confirm if link is really
321 * transitioning to Gen-2 speed
322 */
323 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
324 if (val & PCI_EXP_LNKSTA_LBMS) {
325 current_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val);
326 if (pcie->init_link_width > current_link_width) {
327 dev_warn(pci->dev, "PCIe link is bad, width reduced\n");
328 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
329 PCI_EXP_LNKCTL2);
330 val &= ~PCI_EXP_LNKCTL2_TLS;
331 val |= PCI_EXP_LNKCTL2_TLS_2_5GT;
332 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
333 PCI_EXP_LNKCTL2, val);
334
335 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
336 PCI_EXP_LNKCTL);
337 val |= PCI_EXP_LNKCTL_RL;
338 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
339 PCI_EXP_LNKCTL, val);
340 }
341 }
342}
343
344static irqreturn_t tegra_pcie_rp_irq_handler(struct tegra_pcie_dw *pcie)
345{
346 struct dw_pcie *pci = &pcie->pci;
347 struct pcie_port *pp = &pci->pp;
348 u32 val, status_l0, status_l1;
349 u16 val_w;
350
351 status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
352 if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
353 status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
354 appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
355 if (status_l1 & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) {
356 /* SBR & Surprise Link Down WAR */
357 val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
358 val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
359 appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
360 udelay(1);
361 val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
362 val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
363 appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
364
365 val = dw_pcie_readl_dbi(pci, PORT_LOGIC_GEN2_CTRL);
366 val |= PORT_LOGIC_GEN2_CTRL_DIRECT_SPEED_CHANGE;
367 dw_pcie_writel_dbi(pci, PORT_LOGIC_GEN2_CTRL, val);
368 }
369 }
370
371 if (status_l0 & APPL_INTR_STATUS_L0_INT_INT) {
372 status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0);
373 if (status_l1 & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) {
374 appl_writel(pcie,
375 APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS,
376 APPL_INTR_STATUS_L1_8_0);
377 apply_bad_link_workaround(pp);
378 }
379 if (status_l1 & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) {
380 appl_writel(pcie,
381 APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS,
382 APPL_INTR_STATUS_L1_8_0);
383
384 val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
385 PCI_EXP_LNKSTA);
386 dev_dbg(pci->dev, "Link Speed : Gen-%u\n", val_w &
387 PCI_EXP_LNKSTA_CLS);
388 }
389 }
390
391 if (status_l0 & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) {
392 status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_18);
393 val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
394 if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) {
395 dev_info(pci->dev, "CDM check complete\n");
396 val |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE;
397 }
398 if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) {
399 dev_err(pci->dev, "CDM comparison mismatch\n");
400 val |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR;
401 }
402 if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) {
403 dev_err(pci->dev, "CDM Logic error\n");
404 val |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR;
405 }
406 dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
407 val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR);
408 dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", val);
409 }
410
411 return IRQ_HANDLED;
412}
413
414static irqreturn_t tegra_pcie_irq_handler(int irq, void *arg)
415{
416 struct tegra_pcie_dw *pcie = arg;
417
418 return tegra_pcie_rp_irq_handler(pcie);
419}
420
421static int tegra_pcie_dw_rd_own_conf(struct pcie_port *pp, int where, int size,
422 u32 *val)
423{
424 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
425
426 /*
427 * This is an endpoint mode specific register happen to appear even
428 * when controller is operating in root port mode and system hangs
429 * when it is accessed with link being in ASPM-L1 state.
430 * So skip accessing it altogether
431 */
432 if (where == PORT_LOGIC_MSIX_DOORBELL) {
433 *val = 0x00000000;
434 return PCIBIOS_SUCCESSFUL;
435 }
436
437 return dw_pcie_read(pci->dbi_base + where, size, val);
438}
439
440static int tegra_pcie_dw_wr_own_conf(struct pcie_port *pp, int where, int size,
441 u32 val)
442{
443 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
444
445 /*
446 * This is an endpoint mode specific register happen to appear even
447 * when controller is operating in root port mode and system hangs
448 * when it is accessed with link being in ASPM-L1 state.
449 * So skip accessing it altogether
450 */
451 if (where == PORT_LOGIC_MSIX_DOORBELL)
452 return PCIBIOS_SUCCESSFUL;
453
454 return dw_pcie_write(pci->dbi_base + where, size, val);
455}
456
457#if defined(CONFIG_PCIEASPM)
458static void disable_aspm_l11(struct tegra_pcie_dw *pcie)
459{
460 u32 val;
461
462 val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
463 val &= ~PCI_L1SS_CAP_ASPM_L1_1;
464 dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
465}
466
467static void disable_aspm_l12(struct tegra_pcie_dw *pcie)
468{
469 u32 val;
470
471 val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
472 val &= ~PCI_L1SS_CAP_ASPM_L1_2;
473 dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
474}
475
476static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event)
477{
478 u32 val;
479
480 val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid]);
481 val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT);
482 val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
483 val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT;
484 val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
485 dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val);
486 val = dw_pcie_readl_dbi(&pcie->pci, event_cntr_data_offset[pcie->cid]);
487
488 return val;
489}
490
491static int aspm_state_cnt(struct seq_file *s, void *data)
492{
493 struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *)
494 dev_get_drvdata(s->private);
495 u32 val;
496
497 seq_printf(s, "Tx L0s entry count : %u\n",
498 event_counter_prog(pcie, EVENT_COUNTER_EVENT_Tx_L0S));
499
500 seq_printf(s, "Rx L0s entry count : %u\n",
501 event_counter_prog(pcie, EVENT_COUNTER_EVENT_Rx_L0S));
502
503 seq_printf(s, "Link L1 entry count : %u\n",
504 event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1));
505
506 seq_printf(s, "Link L1.1 entry count : %u\n",
507 event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_1));
508
509 seq_printf(s, "Link L1.2 entry count : %u\n",
510 event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2));
511
512 /* Clear all counters */
513 dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid],
514 EVENT_COUNTER_ALL_CLEAR);
515
516 /* Re-enable counting */
517 val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
518 val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
519 dw_pcie_writel_dbi(&pcie->pci, event_cntr_ctrl_offset[pcie->cid], val);
520
521 return 0;
522}
523
524static void init_host_aspm(struct tegra_pcie_dw *pcie)
525{
526 struct dw_pcie *pci = &pcie->pci;
527 u32 val;
528
529 val = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS);
530 pcie->cfg_link_cap_l1sub = val + PCI_L1SS_CAP;
531
532 /* Enable ASPM counters */
533 val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
534 val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
535 dw_pcie_writel_dbi(pci, event_cntr_ctrl_offset[pcie->cid], val);
536
537 /* Program T_cmrt and T_pwr_on values */
538 val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
539 val &= ~(PCI_L1SS_CAP_CM_RESTORE_TIME | PCI_L1SS_CAP_P_PWR_ON_VALUE);
540 val |= (pcie->aspm_cmrt << 8);
541 val |= (pcie->aspm_pwr_on_t << 19);
542 dw_pcie_writel_dbi(pci, pcie->cfg_link_cap_l1sub, val);
543
544 /* Program L0s and L1 entrance latencies */
545 val = dw_pcie_readl_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL);
546 val &= ~L0S_ENTRANCE_LAT_MASK;
547 val |= (pcie->aspm_l0s_enter_lat << L0S_ENTRANCE_LAT_SHIFT);
548 val |= ENTER_ASPM;
549 dw_pcie_writel_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL, val);
550}
551
552static int init_debugfs(struct tegra_pcie_dw *pcie)
553{
554 struct dentry *d;
555
556 d = debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt",
557 pcie->debugfs, aspm_state_cnt);
558 if (IS_ERR_OR_NULL(d))
559 dev_err(pcie->dev,
560 "Failed to create debugfs file \"aspm_state_cnt\"\n");
561
562 return 0;
563}
564#else
565static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; }
566static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; }
567static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; }
568static inline int init_debugfs(struct tegra_pcie_dw *pcie) { return 0; }
569#endif
570
571static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)
572{
573 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
574 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
575 u32 val;
576 u16 val_w;
577
578 val = appl_readl(pcie, APPL_INTR_EN_L0_0);
579 val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
580 appl_writel(pcie, val, APPL_INTR_EN_L0_0);
581
582 val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
583 val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN;
584 appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
585
586 if (pcie->enable_cdm_check) {
587 val = appl_readl(pcie, APPL_INTR_EN_L0_0);
588 val |= APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN;
589 appl_writel(pcie, val, APPL_INTR_EN_L0_0);
590
591 val = appl_readl(pcie, APPL_INTR_EN_L1_18);
592 val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR;
593 val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR;
594 appl_writel(pcie, val, APPL_INTR_EN_L1_18);
595 }
596
597 val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
598 PCI_EXP_LNKSTA);
599 pcie->init_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val_w);
600
601 val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
602 PCI_EXP_LNKCTL);
603 val_w |= PCI_EXP_LNKCTL_LBMIE;
604 dw_pcie_writew_dbi(&pcie->pci, pcie->pcie_cap_base + PCI_EXP_LNKCTL,
605 val_w);
606}
607
608static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp)
609{
610 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
611 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
612 u32 val;
613
614 /* Enable legacy interrupt generation */
615 val = appl_readl(pcie, APPL_INTR_EN_L0_0);
616 val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
617 val |= APPL_INTR_EN_L0_0_INT_INT_EN;
618 appl_writel(pcie, val, APPL_INTR_EN_L0_0);
619
620 val = appl_readl(pcie, APPL_INTR_EN_L1_8_0);
621 val |= APPL_INTR_EN_L1_8_INTX_EN;
622 val |= APPL_INTR_EN_L1_8_AUTO_BW_INT_EN;
623 val |= APPL_INTR_EN_L1_8_BW_MGT_INT_EN;
624 if (IS_ENABLED(CONFIG_PCIEAER))
625 val |= APPL_INTR_EN_L1_8_AER_INT_EN;
626 appl_writel(pcie, val, APPL_INTR_EN_L1_8_0);
627}
628
629static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp)
630{
631 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
632 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
633 u32 val;
634
635 dw_pcie_msi_init(pp);
636
637 /* Enable MSI interrupt generation */
638 val = appl_readl(pcie, APPL_INTR_EN_L0_0);
639 val |= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN;
640 val |= APPL_INTR_EN_L0_0_MSI_RCV_INT_EN;
641 appl_writel(pcie, val, APPL_INTR_EN_L0_0);
642}
643
644static void tegra_pcie_enable_interrupts(struct pcie_port *pp)
645{
646 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
647 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
648
649 /* Clear interrupt statuses before enabling interrupts */
650 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
651 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
652 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
653 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
654 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
655 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
656 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
657 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
658 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
659 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
660 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
661 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
662 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
663 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
664 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
665
666 tegra_pcie_enable_system_interrupts(pp);
667 tegra_pcie_enable_legacy_interrupts(pp);
668 if (IS_ENABLED(CONFIG_PCI_MSI))
669 tegra_pcie_enable_msi_interrupts(pp);
670}
671
672static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
673{
674 struct dw_pcie *pci = &pcie->pci;
675 u32 val, offset, i;
676
677 /* Program init preset */
678 for (i = 0; i < pcie->num_lanes; i++) {
679 dw_pcie_read(pci->dbi_base + CAP_SPCIE_CAP_OFF
680 + (i * 2), 2, &val);
681 val &= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK;
682 val |= GEN3_GEN4_EQ_PRESET_INIT;
683 val &= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK;
684 val |= (GEN3_GEN4_EQ_PRESET_INIT <<
685 CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT);
686 dw_pcie_write(pci->dbi_base + CAP_SPCIE_CAP_OFF
687 + (i * 2), 2, val);
688
689 offset = dw_pcie_find_ext_capability(pci,
690 PCI_EXT_CAP_ID_PL_16GT) +
691 PCI_PL_16GT_LE_CTRL;
692 dw_pcie_read(pci->dbi_base + offset + i, 1, &val);
693 val &= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK;
694 val |= GEN3_GEN4_EQ_PRESET_INIT;
695 val &= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK;
696 val |= (GEN3_GEN4_EQ_PRESET_INIT <<
697 PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT);
698 dw_pcie_write(pci->dbi_base + offset + i, 1, val);
699 }
700
701 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
702 val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
703 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
704
705 val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
706 val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
707 val |= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
708 val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
709 dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
710
711 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
712 val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
713 val |= (0x1 << GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT);
714 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
715
716 val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
717 val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
718 val |= (0x360 << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
719 val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
720 dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
721
722 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
723 val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
724 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
725}
726
727static void tegra_pcie_prepare_host(struct pcie_port *pp)
728{
729 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
730 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
731 u32 val;
732
733 val = dw_pcie_readl_dbi(pci, PCI_IO_BASE);
734 val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8);
735 dw_pcie_writel_dbi(pci, PCI_IO_BASE, val);
736
737 val = dw_pcie_readl_dbi(pci, PCI_PREF_MEMORY_BASE);
738 val |= CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE;
739 val |= CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE;
740 dw_pcie_writel_dbi(pci, PCI_PREF_MEMORY_BASE, val);
741
742 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
743
744 /* Configure FTS */
745 val = dw_pcie_readl_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL);
746 val &= ~(N_FTS_MASK << N_FTS_SHIFT);
747 val |= N_FTS_VAL << N_FTS_SHIFT;
748 dw_pcie_writel_dbi(pci, PORT_LOGIC_ACK_F_ASPM_CTRL, val);
749
750 val = dw_pcie_readl_dbi(pci, PORT_LOGIC_GEN2_CTRL);
751 val &= ~FTS_MASK;
752 val |= FTS_VAL;
753 dw_pcie_writel_dbi(pci, PORT_LOGIC_GEN2_CTRL, val);
754
755 /* Enable as 0xFFFF0001 response for CRS */
756 val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT);
757 val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT);
758 val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 <<
759 AMBA_ERROR_RESPONSE_CRS_SHIFT);
760 dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val);
761
762 /* Configure Max Speed from DT */
763 if (pcie->max_speed && pcie->max_speed != -EINVAL) {
764 val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base +
765 PCI_EXP_LNKCAP);
766 val &= ~PCI_EXP_LNKCAP_SLS;
767 val |= pcie->max_speed;
768 dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP,
769 val);
770 }
771
772 /* Configure Max lane width from DT */
773 val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP);
774 val &= ~PCI_EXP_LNKCAP_MLW;
775 val |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, pcie->num_lanes);
776 dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val);
777
778 config_gen3_gen4_eq_presets(pcie);
779
780 init_host_aspm(pcie);
781
782 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
783 val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
784 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
785
786 if (pcie->update_fc_fixup) {
787 val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
788 val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
789 dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
790 }
791
792 dw_pcie_setup_rc(pp);
793
794 clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
795
796 /* Assert RST */
797 val = appl_readl(pcie, APPL_PINMUX);
798 val &= ~APPL_PINMUX_PEX_RST;
799 appl_writel(pcie, val, APPL_PINMUX);
800
801 usleep_range(100, 200);
802
803 /* Enable LTSSM */
804 val = appl_readl(pcie, APPL_CTRL);
805 val |= APPL_CTRL_LTSSM_EN;
806 appl_writel(pcie, val, APPL_CTRL);
807
808 /* De-assert RST */
809 val = appl_readl(pcie, APPL_PINMUX);
810 val |= APPL_PINMUX_PEX_RST;
811 appl_writel(pcie, val, APPL_PINMUX);
812
813 msleep(100);
814}
815
816static int tegra_pcie_dw_host_init(struct pcie_port *pp)
817{
818 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
819 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
820 u32 val, tmp, offset, speed;
821
822 tegra_pcie_prepare_host(pp);
823
824 if (dw_pcie_wait_for_link(pci)) {
825 /*
826 * There are some endpoints which can't get the link up if
827 * root port has Data Link Feature (DLF) enabled.
828 * Refer Spec rev 4.0 ver 1.0 sec 3.4.2 & 7.7.4 for more info
829 * on Scaled Flow Control and DLF.
830 * So, need to confirm that is indeed the case here and attempt
831 * link up once again with DLF disabled.
832 */
833 val = appl_readl(pcie, APPL_DEBUG);
834 val &= APPL_DEBUG_LTSSM_STATE_MASK;
835 val >>= APPL_DEBUG_LTSSM_STATE_SHIFT;
836 tmp = appl_readl(pcie, APPL_LINK_STATUS);
837 tmp &= APPL_LINK_STATUS_RDLH_LINK_UP;
838 if (!(val == 0x11 && !tmp)) {
839 /* Link is down for all good reasons */
840 return 0;
841 }
842
843 dev_info(pci->dev, "Link is down in DLL");
844 dev_info(pci->dev, "Trying again with DLFE disabled\n");
845 /* Disable LTSSM */
846 val = appl_readl(pcie, APPL_CTRL);
847 val &= ~APPL_CTRL_LTSSM_EN;
848 appl_writel(pcie, val, APPL_CTRL);
849
850 reset_control_assert(pcie->core_rst);
851 reset_control_deassert(pcie->core_rst);
852
853 offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF);
854 val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP);
855 val &= ~PCI_DLF_EXCHANGE_ENABLE;
856 dw_pcie_writel_dbi(pci, offset + PCI_DLF_CAP, val);
857
858 tegra_pcie_prepare_host(pp);
859
860 if (dw_pcie_wait_for_link(pci))
861 return 0;
862 }
863
864 speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
865 PCI_EXP_LNKSTA_CLS;
866 clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
867
868 tegra_pcie_enable_interrupts(pp);
869
870 return 0;
871}
872
873static int tegra_pcie_dw_link_up(struct dw_pcie *pci)
874{
875 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
876 u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
877
878 return !!(val & PCI_EXP_LNKSTA_DLLLA);
879}
880
881static void tegra_pcie_set_msi_vec_num(struct pcie_port *pp)
882{
883 pp->num_vectors = MAX_MSI_IRQS;
884}
885
886static const struct dw_pcie_ops tegra_dw_pcie_ops = {
887 .link_up = tegra_pcie_dw_link_up,
888};
889
890static struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
891 .rd_own_conf = tegra_pcie_dw_rd_own_conf,
892 .wr_own_conf = tegra_pcie_dw_wr_own_conf,
893 .host_init = tegra_pcie_dw_host_init,
894 .set_num_vectors = tegra_pcie_set_msi_vec_num,
895};
896
897static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
898{
899 unsigned int phy_count = pcie->phy_count;
900
901 while (phy_count--) {
902 phy_power_off(pcie->phys[phy_count]);
903 phy_exit(pcie->phys[phy_count]);
904 }
905}
906
907static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie)
908{
909 unsigned int i;
910 int ret;
911
912 for (i = 0; i < pcie->phy_count; i++) {
913 ret = phy_init(pcie->phys[i]);
914 if (ret < 0)
915 goto phy_power_off;
916
917 ret = phy_power_on(pcie->phys[i]);
918 if (ret < 0)
919 goto phy_exit;
920 }
921
922 return 0;
923
924phy_power_off:
925 while (i--) {
926 phy_power_off(pcie->phys[i]);
927phy_exit:
928 phy_exit(pcie->phys[i]);
929 }
930
931 return ret;
932}
933
934static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
935{
936 struct device_node *np = pcie->dev->of_node;
937 int ret;
938
939 ret = of_property_read_u32(np, "nvidia,aspm-cmrt-us", &pcie->aspm_cmrt);
940 if (ret < 0) {
941 dev_info(pcie->dev, "Failed to read ASPM T_cmrt: %d\n", ret);
942 return ret;
943 }
944
945 ret = of_property_read_u32(np, "nvidia,aspm-pwr-on-t-us",
946 &pcie->aspm_pwr_on_t);
947 if (ret < 0)
948 dev_info(pcie->dev, "Failed to read ASPM Power On time: %d\n",
949 ret);
950
951 ret = of_property_read_u32(np, "nvidia,aspm-l0s-entrance-latency-us",
952 &pcie->aspm_l0s_enter_lat);
953 if (ret < 0)
954 dev_info(pcie->dev,
955 "Failed to read ASPM L0s Entrance latency: %d\n", ret);
956
957 ret = of_property_read_u32(np, "num-lanes", &pcie->num_lanes);
958 if (ret < 0) {
959 dev_err(pcie->dev, "Failed to read num-lanes: %d\n", ret);
960 return ret;
961 }
962
963 pcie->max_speed = of_pci_get_max_link_speed(np);
964
965 ret = of_property_read_u32_index(np, "nvidia,bpmp", 1, &pcie->cid);
966 if (ret) {
967 dev_err(pcie->dev, "Failed to read Controller-ID: %d\n", ret);
968 return ret;
969 }
970
971 ret = of_property_count_strings(np, "phy-names");
972 if (ret < 0) {
973 dev_err(pcie->dev, "Failed to find PHY entries: %d\n",
974 ret);
975 return ret;
976 }
977 pcie->phy_count = ret;
978
979 if (of_property_read_bool(np, "nvidia,update-fc-fixup"))
980 pcie->update_fc_fixup = true;
981
982 pcie->supports_clkreq =
983 of_property_read_bool(pcie->dev->of_node, "supports-clkreq");
984
985 pcie->enable_cdm_check =
986 of_property_read_bool(np, "snps,enable-cdm-check");
987
988 return 0;
989}
990
991static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
992 bool enable)
993{
994 struct mrq_uphy_response resp;
995 struct tegra_bpmp_message msg;
996 struct mrq_uphy_request req;
997
998 /* Controller-5 doesn't need to have its state set by BPMP-FW */
999 if (pcie->cid == 5)
1000 return 0;
1001
1002 memset(&req, 0, sizeof(req));
1003 memset(&resp, 0, sizeof(resp));
1004
1005 req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE;
1006 req.controller_state.pcie_controller = pcie->cid;
1007 req.controller_state.enable = enable;
1008
1009 memset(&msg, 0, sizeof(msg));
1010 msg.mrq = MRQ_UPHY;
1011 msg.tx.data = &req;
1012 msg.tx.size = sizeof(req);
1013 msg.rx.data = &resp;
1014 msg.rx.size = sizeof(resp);
1015
1016 return tegra_bpmp_transfer(pcie->bpmp, &msg);
1017}
1018
1019static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
1020{
1021 struct pcie_port *pp = &pcie->pci.pp;
1022 struct pci_bus *child, *root_bus = NULL;
1023 struct pci_dev *pdev;
1024
1025 /*
1026 * link doesn't go into L2 state with some of the endpoints with Tegra
1027 * if they are not in D0 state. So, need to make sure that immediate
1028 * downstream devices are in D0 state before sending PME_TurnOff to put
1029 * link into L2 state.
1030 * This is as per PCI Express Base r4.0 v1.0 September 27-2017,
1031 * 5.2 Link State Power Management (Page #428).
1032 */
1033
1034 list_for_each_entry(child, &pp->root_bus->children, node) {
1035 /* Bring downstream devices to D0 if they are not already in */
1036 if (child->parent == pp->root_bus) {
1037 root_bus = child;
1038 break;
1039 }
1040 }
1041
1042 if (!root_bus) {
1043 dev_err(pcie->dev, "Failed to find downstream devices\n");
1044 return;
1045 }
1046
1047 list_for_each_entry(pdev, &root_bus->devices, bus_list) {
1048 if (PCI_SLOT(pdev->devfn) == 0) {
1049 if (pci_set_power_state(pdev, PCI_D0))
1050 dev_err(pcie->dev,
1051 "Failed to transition %s to D0 state\n",
1052 dev_name(&pdev->dev));
1053 }
1054 }
1055}
1056
1057static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie)
1058{
1059 pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3");
1060 if (IS_ERR(pcie->slot_ctl_3v3)) {
1061 if (PTR_ERR(pcie->slot_ctl_3v3) != -ENODEV)
1062 return PTR_ERR(pcie->slot_ctl_3v3);
1063
1064 pcie->slot_ctl_3v3 = NULL;
1065 }
1066
1067 pcie->slot_ctl_12v = devm_regulator_get_optional(pcie->dev, "vpcie12v");
1068 if (IS_ERR(pcie->slot_ctl_12v)) {
1069 if (PTR_ERR(pcie->slot_ctl_12v) != -ENODEV)
1070 return PTR_ERR(pcie->slot_ctl_12v);
1071
1072 pcie->slot_ctl_12v = NULL;
1073 }
1074
1075 return 0;
1076}
1077
1078static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie)
1079{
1080 int ret;
1081
1082 if (pcie->slot_ctl_3v3) {
1083 ret = regulator_enable(pcie->slot_ctl_3v3);
1084 if (ret < 0) {
1085 dev_err(pcie->dev,
1086 "Failed to enable 3.3V slot supply: %d\n", ret);
1087 return ret;
1088 }
1089 }
1090
1091 if (pcie->slot_ctl_12v) {
1092 ret = regulator_enable(pcie->slot_ctl_12v);
1093 if (ret < 0) {
1094 dev_err(pcie->dev,
1095 "Failed to enable 12V slot supply: %d\n", ret);
1096 goto fail_12v_enable;
1097 }
1098 }
1099
1100 /*
1101 * According to PCI Express Card Electromechanical Specification
1102 * Revision 1.1, Table-2.4, T_PVPERL (Power stable to PERST# inactive)
1103 * should be a minimum of 100ms.
1104 */
1105 if (pcie->slot_ctl_3v3 || pcie->slot_ctl_12v)
1106 msleep(100);
1107
1108 return 0;
1109
1110fail_12v_enable:
1111 if (pcie->slot_ctl_3v3)
1112 regulator_disable(pcie->slot_ctl_3v3);
1113 return ret;
1114}
1115
1116static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie)
1117{
1118 if (pcie->slot_ctl_12v)
1119 regulator_disable(pcie->slot_ctl_12v);
1120 if (pcie->slot_ctl_3v3)
1121 regulator_disable(pcie->slot_ctl_3v3);
1122}
1123
1124static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie,
1125 bool en_hw_hot_rst)
1126{
1127 int ret;
1128 u32 val;
1129
1130 ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true);
1131 if (ret) {
1132 dev_err(pcie->dev,
1133 "Failed to enable controller %u: %d\n", pcie->cid, ret);
1134 return ret;
1135 }
1136
1137 ret = tegra_pcie_enable_slot_regulators(pcie);
1138 if (ret < 0)
1139 goto fail_slot_reg_en;
1140
1141 ret = regulator_enable(pcie->pex_ctl_supply);
1142 if (ret < 0) {
1143 dev_err(pcie->dev, "Failed to enable regulator: %d\n", ret);
1144 goto fail_reg_en;
1145 }
1146
1147 ret = clk_prepare_enable(pcie->core_clk);
1148 if (ret) {
1149 dev_err(pcie->dev, "Failed to enable core clock: %d\n", ret);
1150 goto fail_core_clk;
1151 }
1152
1153 ret = reset_control_deassert(pcie->core_apb_rst);
1154 if (ret) {
1155 dev_err(pcie->dev, "Failed to deassert core APB reset: %d\n",
1156 ret);
1157 goto fail_core_apb_rst;
1158 }
1159
1160 if (en_hw_hot_rst) {
1161 /* Enable HW_HOT_RST mode */
1162 val = appl_readl(pcie, APPL_CTRL);
1163 val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
1164 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
1165 val |= APPL_CTRL_HW_HOT_RST_EN;
1166 appl_writel(pcie, val, APPL_CTRL);
1167 }
1168
1169 ret = tegra_pcie_enable_phy(pcie);
1170 if (ret) {
1171 dev_err(pcie->dev, "Failed to enable PHY: %d\n", ret);
1172 goto fail_phy;
1173 }
1174
1175 /* Update CFG base address */
1176 appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
1177 APPL_CFG_BASE_ADDR);
1178
1179 /* Configure this core for RP mode operation */
1180 appl_writel(pcie, APPL_DM_TYPE_RP, APPL_DM_TYPE);
1181
1182 appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
1183
1184 val = appl_readl(pcie, APPL_CTRL);
1185 appl_writel(pcie, val | APPL_CTRL_SYS_PRE_DET_STATE, APPL_CTRL);
1186
1187 val = appl_readl(pcie, APPL_CFG_MISC);
1188 val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
1189 appl_writel(pcie, val, APPL_CFG_MISC);
1190
1191 if (!pcie->supports_clkreq) {
1192 val = appl_readl(pcie, APPL_PINMUX);
1193 val |= APPL_PINMUX_CLKREQ_OUT_OVRD_EN;
1194 val |= APPL_PINMUX_CLKREQ_OUT_OVRD;
1195 appl_writel(pcie, val, APPL_PINMUX);
1196 }
1197
1198 /* Update iATU_DMA base address */
1199 appl_writel(pcie,
1200 pcie->atu_dma_res->start & APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
1201 APPL_CFG_IATU_DMA_BASE_ADDR);
1202
1203 reset_control_deassert(pcie->core_rst);
1204
1205 pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
1206 PCI_CAP_ID_EXP);
1207
1208 /* Disable ASPM-L1SS advertisement as there is no CLKREQ routing */
1209 if (!pcie->supports_clkreq) {
1210 disable_aspm_l11(pcie);
1211 disable_aspm_l12(pcie);
1212 }
1213
1214 return ret;
1215
1216fail_phy:
1217 reset_control_assert(pcie->core_apb_rst);
1218fail_core_apb_rst:
1219 clk_disable_unprepare(pcie->core_clk);
1220fail_core_clk:
1221 regulator_disable(pcie->pex_ctl_supply);
1222fail_reg_en:
1223 tegra_pcie_disable_slot_regulators(pcie);
1224fail_slot_reg_en:
1225 tegra_pcie_bpmp_set_ctrl_state(pcie, false);
1226
1227 return ret;
1228}
1229
1230static int __deinit_controller(struct tegra_pcie_dw *pcie)
1231{
1232 int ret;
1233
1234 ret = reset_control_assert(pcie->core_rst);
1235 if (ret) {
1236 dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n",
1237 ret);
1238 return ret;
1239 }
1240
1241 tegra_pcie_disable_phy(pcie);
1242
1243 ret = reset_control_assert(pcie->core_apb_rst);
1244 if (ret) {
1245 dev_err(pcie->dev, "Failed to assert APB reset: %d\n", ret);
1246 return ret;
1247 }
1248
1249 clk_disable_unprepare(pcie->core_clk);
1250
1251 ret = regulator_disable(pcie->pex_ctl_supply);
1252 if (ret) {
1253 dev_err(pcie->dev, "Failed to disable regulator: %d\n", ret);
1254 return ret;
1255 }
1256
1257 tegra_pcie_disable_slot_regulators(pcie);
1258
1259 ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
1260 if (ret) {
1261 dev_err(pcie->dev, "Failed to disable controller %d: %d\n",
1262 pcie->cid, ret);
1263 return ret;
1264 }
1265
1266 return ret;
1267}
1268
1269static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)
1270{
1271 struct dw_pcie *pci = &pcie->pci;
1272 struct pcie_port *pp = &pci->pp;
1273 int ret;
1274
1275 ret = tegra_pcie_config_controller(pcie, false);
1276 if (ret < 0)
1277 return ret;
1278
1279 pp->ops = &tegra_pcie_dw_host_ops;
1280
1281 ret = dw_pcie_host_init(pp);
1282 if (ret < 0) {
1283 dev_err(pcie->dev, "Failed to add PCIe port: %d\n", ret);
1284 goto fail_host_init;
1285 }
1286
1287 return 0;
1288
1289fail_host_init:
1290 return __deinit_controller(pcie);
1291}
1292
1293static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie)
1294{
1295 u32 val;
1296
1297 if (!tegra_pcie_dw_link_up(&pcie->pci))
1298 return 0;
1299
1300 val = appl_readl(pcie, APPL_RADM_STATUS);
1301 val |= APPL_PM_XMT_TURNOFF_STATE;
1302 appl_writel(pcie, val, APPL_RADM_STATUS);
1303
1304 return readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, val,
1305 val & APPL_DEBUG_PM_LINKST_IN_L2_LAT,
1306 1, PME_ACK_TIMEOUT);
1307}
1308
1309static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
1310{
1311 u32 data;
1312 int err;
1313
1314 if (!tegra_pcie_dw_link_up(&pcie->pci)) {
1315 dev_dbg(pcie->dev, "PCIe link is not up...!\n");
1316 return;
1317 }
1318
1319 if (tegra_pcie_try_link_l2(pcie)) {
1320 dev_info(pcie->dev, "Link didn't transition to L2 state\n");
1321 /*
1322 * TX lane clock freq will reset to Gen1 only if link is in L2
1323 * or detect state.
1324 * So apply pex_rst to end point to force RP to go into detect
1325 * state
1326 */
1327 data = appl_readl(pcie, APPL_PINMUX);
1328 data &= ~APPL_PINMUX_PEX_RST;
1329 appl_writel(pcie, data, APPL_PINMUX);
1330
1331 err = readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG,
1332 data,
1333 ((data &
1334 APPL_DEBUG_LTSSM_STATE_MASK) >>
1335 APPL_DEBUG_LTSSM_STATE_SHIFT) ==
1336 LTSSM_STATE_PRE_DETECT,
1337 1, LTSSM_TIMEOUT);
1338 if (err) {
1339 dev_info(pcie->dev, "Link didn't go to detect state\n");
1340 } else {
1341 /* Disable LTSSM after link is in detect state */
1342 data = appl_readl(pcie, APPL_CTRL);
1343 data &= ~APPL_CTRL_LTSSM_EN;
1344 appl_writel(pcie, data, APPL_CTRL);
1345 }
1346 }
1347 /*
1348 * DBI registers may not be accessible after this as PLL-E would be
1349 * down depending on how CLKREQ is pulled by end point
1350 */
1351 data = appl_readl(pcie, APPL_PINMUX);
1352 data |= (APPL_PINMUX_CLKREQ_OVERRIDE_EN | APPL_PINMUX_CLKREQ_OVERRIDE);
1353 /* Cut REFCLK to slot */
1354 data |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
1355 data &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
1356 appl_writel(pcie, data, APPL_PINMUX);
1357}
1358
1359static int tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
1360{
1361 tegra_pcie_downstream_dev_to_D0(pcie);
1362 dw_pcie_host_deinit(&pcie->pci.pp);
1363 tegra_pcie_dw_pme_turnoff(pcie);
1364
1365 return __deinit_controller(pcie);
1366}
1367
1368static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
1369{
1370 struct pcie_port *pp = &pcie->pci.pp;
1371 struct device *dev = pcie->dev;
1372 char *name;
1373 int ret;
1374
1375 if (IS_ENABLED(CONFIG_PCI_MSI)) {
1376 pp->msi_irq = of_irq_get_byname(dev->of_node, "msi");
1377 if (!pp->msi_irq) {
1378 dev_err(dev, "Failed to get MSI interrupt\n");
1379 return -ENODEV;
1380 }
1381 }
1382
1383 pm_runtime_enable(dev);
1384
1385 ret = pm_runtime_get_sync(dev);
1386 if (ret < 0) {
1387 dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
1388 ret);
1389 goto fail_pm_get_sync;
1390 }
1391
1392 ret = pinctrl_pm_select_default_state(dev);
1393 if (ret < 0) {
1394 dev_err(dev, "Failed to configure sideband pins: %d\n", ret);
1395 goto fail_pm_get_sync;
1396 }
1397
1398 tegra_pcie_init_controller(pcie);
1399
1400 pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci);
1401 if (!pcie->link_state) {
1402 ret = -ENOMEDIUM;
1403 goto fail_host_init;
1404 }
1405
1406 name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
1407 if (!name) {
1408 ret = -ENOMEM;
1409 goto fail_host_init;
1410 }
1411
1412 pcie->debugfs = debugfs_create_dir(name, NULL);
1413 if (!pcie->debugfs)
1414 dev_err(dev, "Failed to create debugfs\n");
1415 else
1416 init_debugfs(pcie);
1417
1418 return ret;
1419
1420fail_host_init:
1421 tegra_pcie_deinit_controller(pcie);
1422fail_pm_get_sync:
1423 pm_runtime_put_sync(dev);
1424 pm_runtime_disable(dev);
1425 return ret;
1426}
1427
1428static int tegra_pcie_dw_probe(struct platform_device *pdev)
1429{
1430 struct device *dev = &pdev->dev;
1431 struct resource *atu_dma_res;
1432 struct tegra_pcie_dw *pcie;
1433 struct resource *dbi_res;
1434 struct pcie_port *pp;
1435 struct dw_pcie *pci;
1436 struct phy **phys;
1437 char *name;
1438 int ret;
1439 u32 i;
1440
1441 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
1442 if (!pcie)
1443 return -ENOMEM;
1444
1445 pci = &pcie->pci;
1446 pci->dev = &pdev->dev;
1447 pci->ops = &tegra_dw_pcie_ops;
1448 pp = &pci->pp;
1449 pcie->dev = &pdev->dev;
1450
1451 ret = tegra_pcie_dw_parse_dt(pcie);
1452 if (ret < 0) {
1453 dev_err(dev, "Failed to parse device tree: %d\n", ret);
1454 return ret;
1455 }
1456
1457 ret = tegra_pcie_get_slot_regulators(pcie);
1458 if (ret < 0) {
1459 dev_err(dev, "Failed to get slot regulators: %d\n", ret);
1460 return ret;
1461 }
1462
1463 pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl");
1464 if (IS_ERR(pcie->pex_ctl_supply)) {
1465 ret = PTR_ERR(pcie->pex_ctl_supply);
1466 if (ret != -EPROBE_DEFER)
1467 dev_err(dev, "Failed to get regulator: %ld\n",
1468 PTR_ERR(pcie->pex_ctl_supply));
1469 return ret;
1470 }
1471
1472 pcie->core_clk = devm_clk_get(dev, "core");
1473 if (IS_ERR(pcie->core_clk)) {
1474 dev_err(dev, "Failed to get core clock: %ld\n",
1475 PTR_ERR(pcie->core_clk));
1476 return PTR_ERR(pcie->core_clk);
1477 }
1478
1479 pcie->appl_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1480 "appl");
1481 if (!pcie->appl_res) {
1482 dev_err(dev, "Failed to find \"appl\" region\n");
1483 return -ENODEV;
1484 }
1485
1486 pcie->appl_base = devm_ioremap_resource(dev, pcie->appl_res);
1487 if (IS_ERR(pcie->appl_base))
1488 return PTR_ERR(pcie->appl_base);
1489
1490 pcie->core_apb_rst = devm_reset_control_get(dev, "apb");
1491 if (IS_ERR(pcie->core_apb_rst)) {
1492 dev_err(dev, "Failed to get APB reset: %ld\n",
1493 PTR_ERR(pcie->core_apb_rst));
1494 return PTR_ERR(pcie->core_apb_rst);
1495 }
1496
1497 phys = devm_kcalloc(dev, pcie->phy_count, sizeof(*phys), GFP_KERNEL);
1498 if (!phys)
1499 return -ENOMEM;
1500
1501 for (i = 0; i < pcie->phy_count; i++) {
1502 name = kasprintf(GFP_KERNEL, "p2u-%u", i);
1503 if (!name) {
1504 dev_err(dev, "Failed to create P2U string\n");
1505 return -ENOMEM;
1506 }
1507 phys[i] = devm_phy_get(dev, name);
1508 kfree(name);
1509 if (IS_ERR(phys[i])) {
1510 ret = PTR_ERR(phys[i]);
1511 if (ret != -EPROBE_DEFER)
1512 dev_err(dev, "Failed to get PHY: %d\n", ret);
1513 return ret;
1514 }
1515 }
1516
1517 pcie->phys = phys;
1518
1519 dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
1520 if (!dbi_res) {
1521 dev_err(dev, "Failed to find \"dbi\" region\n");
1522 return -ENODEV;
1523 }
1524 pcie->dbi_res = dbi_res;
1525
1526 pci->dbi_base = devm_ioremap_resource(dev, dbi_res);
1527 if (IS_ERR(pci->dbi_base))
1528 return PTR_ERR(pci->dbi_base);
1529
1530 /* Tegra HW locates DBI2 at a fixed offset from DBI */
1531 pci->dbi_base2 = pci->dbi_base + 0x1000;
1532
1533 atu_dma_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1534 "atu_dma");
1535 if (!atu_dma_res) {
1536 dev_err(dev, "Failed to find \"atu_dma\" region\n");
1537 return -ENODEV;
1538 }
1539 pcie->atu_dma_res = atu_dma_res;
1540
1541 pci->atu_base = devm_ioremap_resource(dev, atu_dma_res);
1542 if (IS_ERR(pci->atu_base))
1543 return PTR_ERR(pci->atu_base);
1544
1545 pcie->core_rst = devm_reset_control_get(dev, "core");
1546 if (IS_ERR(pcie->core_rst)) {
1547 dev_err(dev, "Failed to get core reset: %ld\n",
1548 PTR_ERR(pcie->core_rst));
1549 return PTR_ERR(pcie->core_rst);
1550 }
1551
1552 pp->irq = platform_get_irq_byname(pdev, "intr");
1553 if (!pp->irq) {
1554 dev_err(dev, "Failed to get \"intr\" interrupt\n");
1555 return -ENODEV;
1556 }
1557
1558 ret = devm_request_irq(dev, pp->irq, tegra_pcie_irq_handler,
1559 IRQF_SHARED, "tegra-pcie-intr", pcie);
1560 if (ret) {
1561 dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, ret);
1562 return ret;
1563 }
1564
1565 pcie->bpmp = tegra_bpmp_get(dev);
1566 if (IS_ERR(pcie->bpmp))
1567 return PTR_ERR(pcie->bpmp);
1568
1569 platform_set_drvdata(pdev, pcie);
1570
1571 ret = tegra_pcie_config_rp(pcie);
1572 if (ret && ret != -ENOMEDIUM)
1573 goto fail;
1574 else
1575 return 0;
1576
1577fail:
1578 tegra_bpmp_put(pcie->bpmp);
1579 return ret;
1580}
1581
1582static int tegra_pcie_dw_remove(struct platform_device *pdev)
1583{
1584 struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
1585
1586 if (!pcie->link_state)
1587 return 0;
1588
1589 debugfs_remove_recursive(pcie->debugfs);
1590 tegra_pcie_deinit_controller(pcie);
1591 pm_runtime_put_sync(pcie->dev);
1592 pm_runtime_disable(pcie->dev);
1593 tegra_bpmp_put(pcie->bpmp);
1594
1595 return 0;
1596}
1597
1598static int tegra_pcie_dw_suspend_late(struct device *dev)
1599{
1600 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
1601 u32 val;
1602
1603 if (!pcie->link_state)
1604 return 0;
1605
1606 /* Enable HW_HOT_RST mode */
1607 val = appl_readl(pcie, APPL_CTRL);
1608 val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
1609 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
1610 val |= APPL_CTRL_HW_HOT_RST_EN;
1611 appl_writel(pcie, val, APPL_CTRL);
1612
1613 return 0;
1614}
1615
1616static int tegra_pcie_dw_suspend_noirq(struct device *dev)
1617{
1618 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
1619
1620 if (!pcie->link_state)
1621 return 0;
1622
1623 /* Save MSI interrupt vector */
1624 pcie->msi_ctrl_int = dw_pcie_readl_dbi(&pcie->pci,
1625 PORT_LOGIC_MSI_CTRL_INT_0_EN);
1626 tegra_pcie_downstream_dev_to_D0(pcie);
1627 tegra_pcie_dw_pme_turnoff(pcie);
1628
1629 return __deinit_controller(pcie);
1630}
1631
1632static int tegra_pcie_dw_resume_noirq(struct device *dev)
1633{
1634 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
1635 int ret;
1636
1637 if (!pcie->link_state)
1638 return 0;
1639
1640 ret = tegra_pcie_config_controller(pcie, true);
1641 if (ret < 0)
1642 return ret;
1643
1644 ret = tegra_pcie_dw_host_init(&pcie->pci.pp);
1645 if (ret < 0) {
1646 dev_err(dev, "Failed to init host: %d\n", ret);
1647 goto fail_host_init;
1648 }
1649
1650 /* Restore MSI interrupt vector */
1651 dw_pcie_writel_dbi(&pcie->pci, PORT_LOGIC_MSI_CTRL_INT_0_EN,
1652 pcie->msi_ctrl_int);
1653
1654 return 0;
1655
1656fail_host_init:
1657 return __deinit_controller(pcie);
1658}
1659
1660static int tegra_pcie_dw_resume_early(struct device *dev)
1661{
1662 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
1663 u32 val;
1664
1665 if (!pcie->link_state)
1666 return 0;
1667
1668 /* Disable HW_HOT_RST mode */
1669 val = appl_readl(pcie, APPL_CTRL);
1670 val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
1671 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
1672 val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST <<
1673 APPL_CTRL_HW_HOT_RST_MODE_SHIFT;
1674 val &= ~APPL_CTRL_HW_HOT_RST_EN;
1675 appl_writel(pcie, val, APPL_CTRL);
1676
1677 return 0;
1678}
1679
1680static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
1681{
1682 struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
1683
1684 if (!pcie->link_state)
1685 return;
1686
1687 debugfs_remove_recursive(pcie->debugfs);
1688 tegra_pcie_downstream_dev_to_D0(pcie);
1689
1690 disable_irq(pcie->pci.pp.irq);
1691 if (IS_ENABLED(CONFIG_PCI_MSI))
1692 disable_irq(pcie->pci.pp.msi_irq);
1693
1694 tegra_pcie_dw_pme_turnoff(pcie);
1695 __deinit_controller(pcie);
1696}
1697
1698static const struct of_device_id tegra_pcie_dw_of_match[] = {
1699 {
1700 .compatible = "nvidia,tegra194-pcie",
1701 },
1702 {},
1703};
1704
1705static const struct dev_pm_ops tegra_pcie_dw_pm_ops = {
1706 .suspend_late = tegra_pcie_dw_suspend_late,
1707 .suspend_noirq = tegra_pcie_dw_suspend_noirq,
1708 .resume_noirq = tegra_pcie_dw_resume_noirq,
1709 .resume_early = tegra_pcie_dw_resume_early,
1710};
1711
1712static struct platform_driver tegra_pcie_dw_driver = {
1713 .probe = tegra_pcie_dw_probe,
1714 .remove = tegra_pcie_dw_remove,
1715 .shutdown = tegra_pcie_dw_shutdown,
1716 .driver = {
1717 .name = "tegra194-pcie",
1718 .pm = &tegra_pcie_dw_pm_ops,
1719 .of_match_table = tegra_pcie_dw_of_match,
1720 },
1721};
1722module_platform_driver(tegra_pcie_dw_driver);
1723
1724MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match);
1725
1726MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
1727MODULE_DESCRIPTION("NVIDIA PCIe host controller driver");
1728MODULE_LICENSE("GPL v2");