b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Enable PCIe link L0s/L1 state and Clock Power Management |
| 4 | * |
| 5 | * Copyright (C) 2007 Intel |
| 6 | * Copyright (C) Zhang Yanmin (yanmin.zhang@intel.com) |
| 7 | * Copyright (C) Shaohua Li (shaohua.li@intel.com) |
| 8 | */ |
| 9 | |
| 10 | #include <linux/kernel.h> |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/moduleparam.h> |
| 13 | #include <linux/pci.h> |
| 14 | #include <linux/pci_regs.h> |
| 15 | #include <linux/errno.h> |
| 16 | #include <linux/pm.h> |
| 17 | #include <linux/init.h> |
| 18 | #include <linux/slab.h> |
| 19 | #include <linux/jiffies.h> |
| 20 | #include <linux/delay.h> |
| 21 | #include "../pci.h" |
| 22 | |
| 23 | #ifdef MODULE_PARAM_PREFIX |
| 24 | #undef MODULE_PARAM_PREFIX |
| 25 | #endif |
| 26 | #define MODULE_PARAM_PREFIX "pcie_aspm." |
| 27 | |
| 28 | /* Note: those are not register definitions */ |
| 29 | #define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */ |
| 30 | #define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */ |
| 31 | #define ASPM_STATE_L1 (4) /* L1 state */ |
| 32 | #define ASPM_STATE_L1_1 (8) /* ASPM L1.1 state */ |
| 33 | #define ASPM_STATE_L1_2 (0x10) /* ASPM L1.2 state */ |
| 34 | #define ASPM_STATE_L1_1_PCIPM (0x20) /* PCI PM L1.1 state */ |
| 35 | #define ASPM_STATE_L1_2_PCIPM (0x40) /* PCI PM L1.2 state */ |
| 36 | #define ASPM_STATE_L1_SS_PCIPM (ASPM_STATE_L1_1_PCIPM | ASPM_STATE_L1_2_PCIPM) |
| 37 | #define ASPM_STATE_L1_2_MASK (ASPM_STATE_L1_2 | ASPM_STATE_L1_2_PCIPM) |
| 38 | #define ASPM_STATE_L1SS (ASPM_STATE_L1_1 | ASPM_STATE_L1_1_PCIPM |\ |
| 39 | ASPM_STATE_L1_2_MASK) |
| 40 | #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW) |
| 41 | #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1 | \ |
| 42 | ASPM_STATE_L1SS) |
| 43 | |
| 44 | struct aspm_latency { |
| 45 | u32 l0s; /* L0s latency (nsec) */ |
| 46 | u32 l1; /* L1 latency (nsec) */ |
| 47 | }; |
| 48 | |
| 49 | struct pcie_link_state { |
| 50 | struct pci_dev *pdev; /* Upstream component of the Link */ |
| 51 | struct pci_dev *downstream; /* Downstream component, function 0 */ |
| 52 | struct pcie_link_state *root; /* pointer to the root port link */ |
| 53 | struct pcie_link_state *parent; /* pointer to the parent Link state */ |
| 54 | struct list_head sibling; /* node in link_list */ |
| 55 | |
| 56 | /* ASPM state */ |
| 57 | u32 aspm_support:7; /* Supported ASPM state */ |
| 58 | u32 aspm_enabled:7; /* Enabled ASPM state */ |
| 59 | u32 aspm_capable:7; /* Capable ASPM state with latency */ |
| 60 | u32 aspm_default:7; /* Default ASPM state by BIOS */ |
| 61 | u32 aspm_disable:7; /* Disabled ASPM state */ |
| 62 | |
| 63 | /* Clock PM state */ |
| 64 | u32 clkpm_capable:1; /* Clock PM capable? */ |
| 65 | u32 clkpm_enabled:1; /* Current Clock PM state */ |
| 66 | u32 clkpm_default:1; /* Default Clock PM state by BIOS */ |
| 67 | u32 clkpm_disable:1; /* Clock PM disabled */ |
| 68 | |
| 69 | /* Exit latencies */ |
| 70 | struct aspm_latency latency_up; /* Upstream direction exit latency */ |
| 71 | struct aspm_latency latency_dw; /* Downstream direction exit latency */ |
| 72 | /* |
| 73 | * Endpoint acceptable latencies. A pcie downstream port only |
| 74 | * has one slot under it, so at most there are 8 functions. |
| 75 | */ |
| 76 | struct aspm_latency acceptable[8]; |
| 77 | |
| 78 | /* L1 PM Substate info */ |
| 79 | struct { |
| 80 | u32 up_cap_ptr; /* L1SS cap ptr in upstream dev */ |
| 81 | u32 dw_cap_ptr; /* L1SS cap ptr in downstream dev */ |
| 82 | u32 ctl1; /* value to be programmed in ctl1 */ |
| 83 | u32 ctl2; /* value to be programmed in ctl2 */ |
| 84 | } l1ss; |
| 85 | }; |
| 86 | |
| 87 | static int aspm_disabled, aspm_force; |
| 88 | static bool aspm_support_enabled = true; |
| 89 | static DEFINE_MUTEX(aspm_lock); |
| 90 | static LIST_HEAD(link_list); |
| 91 | |
| 92 | #define POLICY_DEFAULT 0 /* BIOS default setting */ |
| 93 | #define POLICY_PERFORMANCE 1 /* high performance */ |
| 94 | #define POLICY_POWERSAVE 2 /* high power saving */ |
| 95 | #define POLICY_POWER_SUPERSAVE 3 /* possibly even more power saving */ |
| 96 | |
| 97 | #ifdef CONFIG_PCIEASPM_PERFORMANCE |
| 98 | static int aspm_policy = POLICY_PERFORMANCE; |
| 99 | #elif defined CONFIG_PCIEASPM_POWERSAVE |
| 100 | static int aspm_policy = POLICY_POWERSAVE; |
| 101 | #elif defined CONFIG_PCIEASPM_POWER_SUPERSAVE |
| 102 | static int aspm_policy = POLICY_POWER_SUPERSAVE; |
| 103 | #else |
| 104 | static int aspm_policy; |
| 105 | #endif |
| 106 | |
| 107 | static const char *policy_str[] = { |
| 108 | [POLICY_DEFAULT] = "default", |
| 109 | [POLICY_PERFORMANCE] = "performance", |
| 110 | [POLICY_POWERSAVE] = "powersave", |
| 111 | [POLICY_POWER_SUPERSAVE] = "powersupersave" |
| 112 | }; |
| 113 | |
| 114 | #define LINK_RETRAIN_TIMEOUT HZ |
| 115 | |
| 116 | static int policy_to_aspm_state(struct pcie_link_state *link) |
| 117 | { |
| 118 | switch (aspm_policy) { |
| 119 | case POLICY_PERFORMANCE: |
| 120 | /* Disable ASPM and Clock PM */ |
| 121 | return 0; |
| 122 | case POLICY_POWERSAVE: |
| 123 | /* Enable ASPM L0s/L1 */ |
| 124 | return (ASPM_STATE_L0S | ASPM_STATE_L1); |
| 125 | case POLICY_POWER_SUPERSAVE: |
| 126 | /* Enable Everything */ |
| 127 | return ASPM_STATE_ALL; |
| 128 | case POLICY_DEFAULT: |
| 129 | return link->aspm_default; |
| 130 | } |
| 131 | return 0; |
| 132 | } |
| 133 | |
| 134 | static int policy_to_clkpm_state(struct pcie_link_state *link) |
| 135 | { |
| 136 | switch (aspm_policy) { |
| 137 | case POLICY_PERFORMANCE: |
| 138 | /* Disable ASPM and Clock PM */ |
| 139 | return 0; |
| 140 | case POLICY_POWERSAVE: |
| 141 | case POLICY_POWER_SUPERSAVE: |
| 142 | /* Enable Clock PM */ |
| 143 | return 1; |
| 144 | case POLICY_DEFAULT: |
| 145 | return link->clkpm_default; |
| 146 | } |
| 147 | return 0; |
| 148 | } |
| 149 | |
| 150 | static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable) |
| 151 | { |
| 152 | struct pci_dev *child; |
| 153 | struct pci_bus *linkbus = link->pdev->subordinate; |
| 154 | u32 val = enable ? PCI_EXP_LNKCTL_CLKREQ_EN : 0; |
| 155 | |
| 156 | list_for_each_entry(child, &linkbus->devices, bus_list) |
| 157 | pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL, |
| 158 | PCI_EXP_LNKCTL_CLKREQ_EN, |
| 159 | val); |
| 160 | link->clkpm_enabled = !!enable; |
| 161 | } |
| 162 | |
| 163 | static void pcie_set_clkpm(struct pcie_link_state *link, int enable) |
| 164 | { |
| 165 | /* |
| 166 | * Don't enable Clock PM if the link is not Clock PM capable |
| 167 | * or Clock PM is disabled |
| 168 | */ |
| 169 | if (!link->clkpm_capable || link->clkpm_disable) |
| 170 | enable = 0; |
| 171 | /* Need nothing if the specified equals to current state */ |
| 172 | if (link->clkpm_enabled == enable) |
| 173 | return; |
| 174 | pcie_set_clkpm_nocheck(link, enable); |
| 175 | } |
| 176 | |
| 177 | static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist) |
| 178 | { |
| 179 | int capable = 1, enabled = 1; |
| 180 | u32 reg32; |
| 181 | u16 reg16; |
| 182 | struct pci_dev *child; |
| 183 | struct pci_bus *linkbus = link->pdev->subordinate; |
| 184 | |
| 185 | /* All functions should have the same cap and state, take the worst */ |
| 186 | list_for_each_entry(child, &linkbus->devices, bus_list) { |
| 187 | pcie_capability_read_dword(child, PCI_EXP_LNKCAP, ®32); |
| 188 | if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) { |
| 189 | capable = 0; |
| 190 | enabled = 0; |
| 191 | break; |
| 192 | } |
| 193 | pcie_capability_read_word(child, PCI_EXP_LNKCTL, ®16); |
| 194 | if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN)) |
| 195 | enabled = 0; |
| 196 | } |
| 197 | link->clkpm_enabled = enabled; |
| 198 | link->clkpm_default = enabled; |
| 199 | link->clkpm_capable = capable; |
| 200 | link->clkpm_disable = blacklist ? 1 : 0; |
| 201 | } |
| 202 | |
| 203 | static int pcie_wait_for_retrain(struct pci_dev *pdev) |
| 204 | { |
| 205 | unsigned long end_jiffies; |
| 206 | u16 reg16; |
| 207 | |
| 208 | /* Wait for Link Training to be cleared by hardware */ |
| 209 | end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT; |
| 210 | do { |
| 211 | pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, ®16); |
| 212 | if (!(reg16 & PCI_EXP_LNKSTA_LT)) |
| 213 | return 0; |
| 214 | msleep(1); |
| 215 | } while (time_before(jiffies, end_jiffies)); |
| 216 | |
| 217 | return -ETIMEDOUT; |
| 218 | } |
| 219 | |
| 220 | static int pcie_retrain_link(struct pcie_link_state *link) |
| 221 | { |
| 222 | struct pci_dev *parent = link->pdev; |
| 223 | int rc; |
| 224 | u16 reg16; |
| 225 | |
| 226 | /* |
| 227 | * Ensure the updated LNKCTL parameters are used during link |
| 228 | * training by checking that there is no ongoing link training to |
| 229 | * avoid LTSSM race as recommended in Implementation Note at the |
| 230 | * end of PCIe r6.0.1 sec 7.5.3.7. |
| 231 | */ |
| 232 | rc = pcie_wait_for_retrain(parent); |
| 233 | if (rc) |
| 234 | return rc; |
| 235 | |
| 236 | pcie_capability_read_word(parent, PCI_EXP_LNKCTL, ®16); |
| 237 | reg16 |= PCI_EXP_LNKCTL_RL; |
| 238 | pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); |
| 239 | if (parent->clear_retrain_link) { |
| 240 | /* |
| 241 | * Due to an erratum in some devices the Retrain Link bit |
| 242 | * needs to be cleared again manually to allow the link |
| 243 | * training to succeed. |
| 244 | */ |
| 245 | reg16 &= ~PCI_EXP_LNKCTL_RL; |
| 246 | pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); |
| 247 | } |
| 248 | |
| 249 | return pcie_wait_for_retrain(parent); |
| 250 | } |
| 251 | |
| 252 | /* |
| 253 | * pcie_aspm_configure_common_clock: check if the 2 ends of a link |
| 254 | * could use common clock. If they are, configure them to use the |
| 255 | * common clock. That will reduce the ASPM state exit latency. |
| 256 | */ |
| 257 | static void pcie_aspm_configure_common_clock(struct pcie_link_state *link) |
| 258 | { |
| 259 | int same_clock = 1; |
| 260 | u16 reg16, ccc, parent_old_ccc, child_old_ccc[8]; |
| 261 | struct pci_dev *child, *parent = link->pdev; |
| 262 | struct pci_bus *linkbus = parent->subordinate; |
| 263 | /* |
| 264 | * All functions of a slot should have the same Slot Clock |
| 265 | * Configuration, so just check one function |
| 266 | */ |
| 267 | child = list_entry(linkbus->devices.next, struct pci_dev, bus_list); |
| 268 | BUG_ON(!pci_is_pcie(child)); |
| 269 | |
| 270 | /* Check downstream component if bit Slot Clock Configuration is 1 */ |
| 271 | pcie_capability_read_word(child, PCI_EXP_LNKSTA, ®16); |
| 272 | if (!(reg16 & PCI_EXP_LNKSTA_SLC)) |
| 273 | same_clock = 0; |
| 274 | |
| 275 | /* Check upstream component if bit Slot Clock Configuration is 1 */ |
| 276 | pcie_capability_read_word(parent, PCI_EXP_LNKSTA, ®16); |
| 277 | if (!(reg16 & PCI_EXP_LNKSTA_SLC)) |
| 278 | same_clock = 0; |
| 279 | |
| 280 | /* Port might be already in common clock mode */ |
| 281 | pcie_capability_read_word(parent, PCI_EXP_LNKCTL, ®16); |
| 282 | parent_old_ccc = reg16 & PCI_EXP_LNKCTL_CCC; |
| 283 | if (same_clock && (reg16 & PCI_EXP_LNKCTL_CCC)) { |
| 284 | bool consistent = true; |
| 285 | |
| 286 | list_for_each_entry(child, &linkbus->devices, bus_list) { |
| 287 | pcie_capability_read_word(child, PCI_EXP_LNKCTL, |
| 288 | ®16); |
| 289 | if (!(reg16 & PCI_EXP_LNKCTL_CCC)) { |
| 290 | consistent = false; |
| 291 | break; |
| 292 | } |
| 293 | } |
| 294 | if (consistent) |
| 295 | return; |
| 296 | pci_warn(parent, "ASPM: current common clock configuration is broken, reconfiguring\n"); |
| 297 | } |
| 298 | |
| 299 | ccc = same_clock ? PCI_EXP_LNKCTL_CCC : 0; |
| 300 | /* Configure downstream component, all functions */ |
| 301 | list_for_each_entry(child, &linkbus->devices, bus_list) { |
| 302 | pcie_capability_read_word(child, PCI_EXP_LNKCTL, ®16); |
| 303 | child_old_ccc[PCI_FUNC(child->devfn)] = reg16 & PCI_EXP_LNKCTL_CCC; |
| 304 | pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL, |
| 305 | PCI_EXP_LNKCTL_CCC, ccc); |
| 306 | } |
| 307 | |
| 308 | /* Configure upstream component */ |
| 309 | pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL, |
| 310 | PCI_EXP_LNKCTL_CCC, ccc); |
| 311 | |
| 312 | if (pcie_retrain_link(link)) { |
| 313 | |
| 314 | /* Training failed. Restore common clock configurations */ |
| 315 | pci_err(parent, "ASPM: Could not configure common clock\n"); |
| 316 | list_for_each_entry(child, &linkbus->devices, bus_list) |
| 317 | pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL, |
| 318 | PCI_EXP_LNKCTL_CCC, |
| 319 | child_old_ccc[PCI_FUNC(child->devfn)]); |
| 320 | pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL, |
| 321 | PCI_EXP_LNKCTL_CCC, parent_old_ccc); |
| 322 | } |
| 323 | } |
| 324 | |
| 325 | /* Convert L0s latency encoding to ns */ |
| 326 | static u32 calc_l0s_latency(u32 encoding) |
| 327 | { |
| 328 | if (encoding == 0x7) |
| 329 | return (5 * 1000); /* > 4us */ |
| 330 | return (64 << encoding); |
| 331 | } |
| 332 | |
| 333 | /* Convert L0s acceptable latency encoding to ns */ |
| 334 | static u32 calc_l0s_acceptable(u32 encoding) |
| 335 | { |
| 336 | if (encoding == 0x7) |
| 337 | return -1U; |
| 338 | return (64 << encoding); |
| 339 | } |
| 340 | |
| 341 | /* Convert L1 latency encoding to ns */ |
| 342 | static u32 calc_l1_latency(u32 encoding) |
| 343 | { |
| 344 | if (encoding == 0x7) |
| 345 | return (65 * 1000); /* > 64us */ |
| 346 | return (1000 << encoding); |
| 347 | } |
| 348 | |
| 349 | /* Convert L1 acceptable latency encoding to ns */ |
| 350 | static u32 calc_l1_acceptable(u32 encoding) |
| 351 | { |
| 352 | if (encoding == 0x7) |
| 353 | return -1U; |
| 354 | return (1000 << encoding); |
| 355 | } |
| 356 | |
| 357 | /* Convert L1SS T_pwr encoding to usec */ |
| 358 | static u32 calc_l1ss_pwron(struct pci_dev *pdev, u32 scale, u32 val) |
| 359 | { |
| 360 | switch (scale) { |
| 361 | case 0: |
| 362 | return val * 2; |
| 363 | case 1: |
| 364 | return val * 10; |
| 365 | case 2: |
| 366 | return val * 100; |
| 367 | } |
| 368 | pci_err(pdev, "%s: Invalid T_PwrOn scale: %u\n", __func__, scale); |
| 369 | return 0; |
| 370 | } |
| 371 | |
| 372 | static void encode_l12_threshold(u32 threshold_us, u32 *scale, u32 *value) |
| 373 | { |
| 374 | u32 threshold_ns = threshold_us * 1000; |
| 375 | |
| 376 | /* See PCIe r3.1, sec 7.33.3 and sec 6.18 */ |
| 377 | if (threshold_ns < 32) { |
| 378 | *scale = 0; |
| 379 | *value = threshold_ns; |
| 380 | } else if (threshold_ns < 1024) { |
| 381 | *scale = 1; |
| 382 | *value = threshold_ns >> 5; |
| 383 | } else if (threshold_ns < 32768) { |
| 384 | *scale = 2; |
| 385 | *value = threshold_ns >> 10; |
| 386 | } else if (threshold_ns < 1048576) { |
| 387 | *scale = 3; |
| 388 | *value = threshold_ns >> 15; |
| 389 | } else if (threshold_ns < 33554432) { |
| 390 | *scale = 4; |
| 391 | *value = threshold_ns >> 20; |
| 392 | } else { |
| 393 | *scale = 5; |
| 394 | *value = threshold_ns >> 25; |
| 395 | } |
| 396 | } |
| 397 | |
| 398 | struct aspm_register_info { |
| 399 | u32 support:2; |
| 400 | u32 enabled:2; |
| 401 | u32 latency_encoding_l0s; |
| 402 | u32 latency_encoding_l1; |
| 403 | |
| 404 | /* L1 substates */ |
| 405 | u32 l1ss_cap_ptr; |
| 406 | u32 l1ss_cap; |
| 407 | u32 l1ss_ctl1; |
| 408 | u32 l1ss_ctl2; |
| 409 | }; |
| 410 | |
| 411 | static void pcie_get_aspm_reg(struct pci_dev *pdev, |
| 412 | struct aspm_register_info *info) |
| 413 | { |
| 414 | u16 reg16; |
| 415 | u32 reg32; |
| 416 | |
| 417 | pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, ®32); |
| 418 | info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10; |
| 419 | info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12; |
| 420 | info->latency_encoding_l1 = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15; |
| 421 | pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, ®16); |
| 422 | info->enabled = reg16 & PCI_EXP_LNKCTL_ASPMC; |
| 423 | |
| 424 | /* Read L1 PM substate capabilities */ |
| 425 | info->l1ss_cap = info->l1ss_ctl1 = info->l1ss_ctl2 = 0; |
| 426 | info->l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); |
| 427 | if (!info->l1ss_cap_ptr) |
| 428 | return; |
| 429 | pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CAP, |
| 430 | &info->l1ss_cap); |
| 431 | if (!(info->l1ss_cap & PCI_L1SS_CAP_L1_PM_SS)) { |
| 432 | info->l1ss_cap = 0; |
| 433 | return; |
| 434 | } |
| 435 | |
| 436 | /* |
| 437 | * If we don't have LTR for the entire path from the Root Complex |
| 438 | * to this device, we can't use ASPM L1.2 because it relies on the |
| 439 | * LTR_L1.2_THRESHOLD. See PCIe r4.0, secs 5.5.4, 6.18. |
| 440 | */ |
| 441 | if (!pdev->ltr_path) |
| 442 | info->l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2; |
| 443 | |
| 444 | pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL1, |
| 445 | &info->l1ss_ctl1); |
| 446 | pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL2, |
| 447 | &info->l1ss_ctl2); |
| 448 | } |
| 449 | |
| 450 | static void pcie_aspm_check_latency(struct pci_dev *endpoint) |
| 451 | { |
| 452 | u32 latency, l1_switch_latency = 0; |
| 453 | struct aspm_latency *acceptable; |
| 454 | struct pcie_link_state *link; |
| 455 | |
| 456 | /* Device not in D0 doesn't need latency check */ |
| 457 | if ((endpoint->current_state != PCI_D0) && |
| 458 | (endpoint->current_state != PCI_UNKNOWN)) |
| 459 | return; |
| 460 | |
| 461 | link = endpoint->bus->self->link_state; |
| 462 | acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)]; |
| 463 | |
| 464 | while (link) { |
| 465 | /* Check upstream direction L0s latency */ |
| 466 | if ((link->aspm_capable & ASPM_STATE_L0S_UP) && |
| 467 | (link->latency_up.l0s > acceptable->l0s)) |
| 468 | link->aspm_capable &= ~ASPM_STATE_L0S_UP; |
| 469 | |
| 470 | /* Check downstream direction L0s latency */ |
| 471 | if ((link->aspm_capable & ASPM_STATE_L0S_DW) && |
| 472 | (link->latency_dw.l0s > acceptable->l0s)) |
| 473 | link->aspm_capable &= ~ASPM_STATE_L0S_DW; |
| 474 | /* |
| 475 | * Check L1 latency. |
| 476 | * Every switch on the path to root complex need 1 |
| 477 | * more microsecond for L1. Spec doesn't mention L0s. |
| 478 | * |
| 479 | * The exit latencies for L1 substates are not advertised |
| 480 | * by a device. Since the spec also doesn't mention a way |
| 481 | * to determine max latencies introduced by enabling L1 |
| 482 | * substates on the components, it is not clear how to do |
| 483 | * a L1 substate exit latency check. We assume that the |
| 484 | * L1 exit latencies advertised by a device include L1 |
| 485 | * substate latencies (and hence do not do any check). |
| 486 | */ |
| 487 | latency = max_t(u32, link->latency_up.l1, link->latency_dw.l1); |
| 488 | if ((link->aspm_capable & ASPM_STATE_L1) && |
| 489 | (latency + l1_switch_latency > acceptable->l1)) |
| 490 | link->aspm_capable &= ~ASPM_STATE_L1; |
| 491 | l1_switch_latency += 1000; |
| 492 | |
| 493 | link = link->parent; |
| 494 | } |
| 495 | } |
| 496 | |
| 497 | /* |
| 498 | * The L1 PM substate capability is only implemented in function 0 in a |
| 499 | * multi function device. |
| 500 | */ |
| 501 | static struct pci_dev *pci_function_0(struct pci_bus *linkbus) |
| 502 | { |
| 503 | struct pci_dev *child; |
| 504 | |
| 505 | list_for_each_entry(child, &linkbus->devices, bus_list) |
| 506 | if (PCI_FUNC(child->devfn) == 0) |
| 507 | return child; |
| 508 | return NULL; |
| 509 | } |
| 510 | |
| 511 | /* Calculate L1.2 PM substate timing parameters */ |
| 512 | static void aspm_calc_l1ss_info(struct pcie_link_state *link, |
| 513 | struct aspm_register_info *upreg, |
| 514 | struct aspm_register_info *dwreg) |
| 515 | { |
| 516 | u32 val1, val2, scale1, scale2; |
| 517 | u32 t_common_mode, t_power_on, l1_2_threshold, scale, value; |
| 518 | |
| 519 | link->l1ss.up_cap_ptr = upreg->l1ss_cap_ptr; |
| 520 | link->l1ss.dw_cap_ptr = dwreg->l1ss_cap_ptr; |
| 521 | link->l1ss.ctl1 = link->l1ss.ctl2 = 0; |
| 522 | |
| 523 | if (!(link->aspm_support & ASPM_STATE_L1_2_MASK)) |
| 524 | return; |
| 525 | |
| 526 | /* Choose the greater of the two Port Common_Mode_Restore_Times */ |
| 527 | val1 = (upreg->l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8; |
| 528 | val2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8; |
| 529 | t_common_mode = max(val1, val2); |
| 530 | |
| 531 | /* Choose the greater of the two Port T_POWER_ON times */ |
| 532 | val1 = (upreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19; |
| 533 | scale1 = (upreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16; |
| 534 | val2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19; |
| 535 | scale2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16; |
| 536 | |
| 537 | if (calc_l1ss_pwron(link->pdev, scale1, val1) > |
| 538 | calc_l1ss_pwron(link->downstream, scale2, val2)) { |
| 539 | link->l1ss.ctl2 |= scale1 | (val1 << 3); |
| 540 | t_power_on = calc_l1ss_pwron(link->pdev, scale1, val1); |
| 541 | } else { |
| 542 | link->l1ss.ctl2 |= scale2 | (val2 << 3); |
| 543 | t_power_on = calc_l1ss_pwron(link->downstream, scale2, val2); |
| 544 | } |
| 545 | |
| 546 | /* |
| 547 | * Set LTR_L1.2_THRESHOLD to the time required to transition the |
| 548 | * Link from L0 to L1.2 and back to L0 so we enter L1.2 only if |
| 549 | * downstream devices report (via LTR) that they can tolerate at |
| 550 | * least that much latency. |
| 551 | * |
| 552 | * Based on PCIe r3.1, sec 5.5.3.3.1, Figures 5-16 and 5-17, and |
| 553 | * Table 5-11. T(POWER_OFF) is at most 2us and T(L1.2) is at |
| 554 | * least 4us. |
| 555 | */ |
| 556 | l1_2_threshold = 2 + 4 + t_common_mode + t_power_on; |
| 557 | encode_l12_threshold(l1_2_threshold, &scale, &value); |
| 558 | link->l1ss.ctl1 |= t_common_mode << 8 | scale << 29 | value << 16; |
| 559 | } |
| 560 | |
| 561 | static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) |
| 562 | { |
| 563 | struct pci_dev *child = link->downstream, *parent = link->pdev; |
| 564 | struct pci_bus *linkbus = parent->subordinate; |
| 565 | struct aspm_register_info upreg, dwreg; |
| 566 | |
| 567 | if (blacklist) { |
| 568 | /* Set enabled/disable so that we will disable ASPM later */ |
| 569 | link->aspm_enabled = ASPM_STATE_ALL; |
| 570 | link->aspm_disable = ASPM_STATE_ALL; |
| 571 | return; |
| 572 | } |
| 573 | |
| 574 | /* Get upstream/downstream components' register state */ |
| 575 | pcie_get_aspm_reg(parent, &upreg); |
| 576 | pcie_get_aspm_reg(child, &dwreg); |
| 577 | |
| 578 | /* |
| 579 | * If ASPM not supported, don't mess with the clocks and link, |
| 580 | * bail out now. |
| 581 | */ |
| 582 | if (!(upreg.support & dwreg.support)) |
| 583 | return; |
| 584 | |
| 585 | /* Configure common clock before checking latencies */ |
| 586 | pcie_aspm_configure_common_clock(link); |
| 587 | |
| 588 | /* |
| 589 | * Re-read upstream/downstream components' register state |
| 590 | * after clock configuration |
| 591 | */ |
| 592 | pcie_get_aspm_reg(parent, &upreg); |
| 593 | pcie_get_aspm_reg(child, &dwreg); |
| 594 | |
| 595 | /* |
| 596 | * Setup L0s state |
| 597 | * |
| 598 | * Note that we must not enable L0s in either direction on a |
| 599 | * given link unless components on both sides of the link each |
| 600 | * support L0s. |
| 601 | */ |
| 602 | if (dwreg.support & upreg.support & PCIE_LINK_STATE_L0S) |
| 603 | link->aspm_support |= ASPM_STATE_L0S; |
| 604 | if (dwreg.enabled & PCIE_LINK_STATE_L0S) |
| 605 | link->aspm_enabled |= ASPM_STATE_L0S_UP; |
| 606 | if (upreg.enabled & PCIE_LINK_STATE_L0S) |
| 607 | link->aspm_enabled |= ASPM_STATE_L0S_DW; |
| 608 | link->latency_up.l0s = calc_l0s_latency(upreg.latency_encoding_l0s); |
| 609 | link->latency_dw.l0s = calc_l0s_latency(dwreg.latency_encoding_l0s); |
| 610 | |
| 611 | /* Setup L1 state */ |
| 612 | if (upreg.support & dwreg.support & PCIE_LINK_STATE_L1) |
| 613 | link->aspm_support |= ASPM_STATE_L1; |
| 614 | if (upreg.enabled & dwreg.enabled & PCIE_LINK_STATE_L1) |
| 615 | link->aspm_enabled |= ASPM_STATE_L1; |
| 616 | link->latency_up.l1 = calc_l1_latency(upreg.latency_encoding_l1); |
| 617 | link->latency_dw.l1 = calc_l1_latency(dwreg.latency_encoding_l1); |
| 618 | |
| 619 | /* Setup L1 substate */ |
| 620 | if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1) |
| 621 | link->aspm_support |= ASPM_STATE_L1_1; |
| 622 | if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2) |
| 623 | link->aspm_support |= ASPM_STATE_L1_2; |
| 624 | if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1) |
| 625 | link->aspm_support |= ASPM_STATE_L1_1_PCIPM; |
| 626 | if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2) |
| 627 | link->aspm_support |= ASPM_STATE_L1_2_PCIPM; |
| 628 | |
| 629 | if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1) |
| 630 | link->aspm_enabled |= ASPM_STATE_L1_1; |
| 631 | if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2) |
| 632 | link->aspm_enabled |= ASPM_STATE_L1_2; |
| 633 | if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1) |
| 634 | link->aspm_enabled |= ASPM_STATE_L1_1_PCIPM; |
| 635 | if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2) |
| 636 | link->aspm_enabled |= ASPM_STATE_L1_2_PCIPM; |
| 637 | |
| 638 | if (link->aspm_support & ASPM_STATE_L1SS) |
| 639 | aspm_calc_l1ss_info(link, &upreg, &dwreg); |
| 640 | |
| 641 | /* Save default state */ |
| 642 | link->aspm_default = link->aspm_enabled; |
| 643 | |
| 644 | /* Setup initial capable state. Will be updated later */ |
| 645 | link->aspm_capable = link->aspm_support; |
| 646 | |
| 647 | /* Get and check endpoint acceptable latencies */ |
| 648 | list_for_each_entry(child, &linkbus->devices, bus_list) { |
| 649 | u32 reg32, encoding; |
| 650 | struct aspm_latency *acceptable = |
| 651 | &link->acceptable[PCI_FUNC(child->devfn)]; |
| 652 | |
| 653 | if (pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT && |
| 654 | pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END) |
| 655 | continue; |
| 656 | |
| 657 | pcie_capability_read_dword(child, PCI_EXP_DEVCAP, ®32); |
| 658 | /* Calculate endpoint L0s acceptable latency */ |
| 659 | encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6; |
| 660 | acceptable->l0s = calc_l0s_acceptable(encoding); |
| 661 | /* Calculate endpoint L1 acceptable latency */ |
| 662 | encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9; |
| 663 | acceptable->l1 = calc_l1_acceptable(encoding); |
| 664 | |
| 665 | pcie_aspm_check_latency(child); |
| 666 | } |
| 667 | } |
| 668 | |
| 669 | static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos, |
| 670 | u32 clear, u32 set) |
| 671 | { |
| 672 | u32 val; |
| 673 | |
| 674 | pci_read_config_dword(pdev, pos, &val); |
| 675 | val &= ~clear; |
| 676 | val |= set; |
| 677 | pci_write_config_dword(pdev, pos, val); |
| 678 | } |
| 679 | |
| 680 | /* Configure the ASPM L1 substates */ |
| 681 | static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) |
| 682 | { |
| 683 | u32 val, enable_req; |
| 684 | struct pci_dev *child = link->downstream, *parent = link->pdev; |
| 685 | u32 up_cap_ptr = link->l1ss.up_cap_ptr; |
| 686 | u32 dw_cap_ptr = link->l1ss.dw_cap_ptr; |
| 687 | |
| 688 | enable_req = (link->aspm_enabled ^ state) & state; |
| 689 | |
| 690 | /* |
| 691 | * Here are the rules specified in the PCIe spec for enabling L1SS: |
| 692 | * - When enabling L1.x, enable bit at parent first, then at child |
| 693 | * - When disabling L1.x, disable bit at child first, then at parent |
| 694 | * - When enabling ASPM L1.x, need to disable L1 |
| 695 | * (at child followed by parent). |
| 696 | * - The ASPM/PCIPM L1.2 must be disabled while programming timing |
| 697 | * parameters |
| 698 | * |
| 699 | * To keep it simple, disable all L1SS bits first, and later enable |
| 700 | * what is needed. |
| 701 | */ |
| 702 | |
| 703 | /* Disable all L1 substates */ |
| 704 | pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1, |
| 705 | PCI_L1SS_CTL1_L1SS_MASK, 0); |
| 706 | pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1, |
| 707 | PCI_L1SS_CTL1_L1SS_MASK, 0); |
| 708 | /* |
| 709 | * If needed, disable L1, and it gets enabled later |
| 710 | * in pcie_config_aspm_link(). |
| 711 | */ |
| 712 | if (enable_req & (ASPM_STATE_L1_1 | ASPM_STATE_L1_2)) { |
| 713 | pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL, |
| 714 | PCI_EXP_LNKCTL_ASPM_L1, 0); |
| 715 | pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL, |
| 716 | PCI_EXP_LNKCTL_ASPM_L1, 0); |
| 717 | } |
| 718 | |
| 719 | if (enable_req & ASPM_STATE_L1_2_MASK) { |
| 720 | |
| 721 | /* Program T_POWER_ON times in both ports */ |
| 722 | pci_write_config_dword(parent, up_cap_ptr + PCI_L1SS_CTL2, |
| 723 | link->l1ss.ctl2); |
| 724 | pci_write_config_dword(child, dw_cap_ptr + PCI_L1SS_CTL2, |
| 725 | link->l1ss.ctl2); |
| 726 | |
| 727 | /* Program Common_Mode_Restore_Time in upstream device */ |
| 728 | pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1, |
| 729 | PCI_L1SS_CTL1_CM_RESTORE_TIME, |
| 730 | link->l1ss.ctl1); |
| 731 | |
| 732 | /* Program LTR_L1.2_THRESHOLD time in both ports */ |
| 733 | pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1, |
| 734 | PCI_L1SS_CTL1_LTR_L12_TH_VALUE | |
| 735 | PCI_L1SS_CTL1_LTR_L12_TH_SCALE, |
| 736 | link->l1ss.ctl1); |
| 737 | pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1, |
| 738 | PCI_L1SS_CTL1_LTR_L12_TH_VALUE | |
| 739 | PCI_L1SS_CTL1_LTR_L12_TH_SCALE, |
| 740 | link->l1ss.ctl1); |
| 741 | } |
| 742 | |
| 743 | val = 0; |
| 744 | if (state & ASPM_STATE_L1_1) |
| 745 | val |= PCI_L1SS_CTL1_ASPM_L1_1; |
| 746 | if (state & ASPM_STATE_L1_2) |
| 747 | val |= PCI_L1SS_CTL1_ASPM_L1_2; |
| 748 | if (state & ASPM_STATE_L1_1_PCIPM) |
| 749 | val |= PCI_L1SS_CTL1_PCIPM_L1_1; |
| 750 | if (state & ASPM_STATE_L1_2_PCIPM) |
| 751 | val |= PCI_L1SS_CTL1_PCIPM_L1_2; |
| 752 | |
| 753 | /* Enable what we need to enable */ |
| 754 | pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1, |
| 755 | PCI_L1SS_CTL1_L1SS_MASK, val); |
| 756 | pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1, |
| 757 | PCI_L1SS_CTL1_L1SS_MASK, val); |
| 758 | } |
| 759 | |
| 760 | static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val) |
| 761 | { |
| 762 | pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL, |
| 763 | PCI_EXP_LNKCTL_ASPMC, val); |
| 764 | } |
| 765 | |
| 766 | static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state) |
| 767 | { |
| 768 | u32 upstream = 0, dwstream = 0; |
| 769 | struct pci_dev *child = link->downstream, *parent = link->pdev; |
| 770 | struct pci_bus *linkbus = parent->subordinate; |
| 771 | |
| 772 | /* Enable only the states that were not explicitly disabled */ |
| 773 | state &= (link->aspm_capable & ~link->aspm_disable); |
| 774 | |
| 775 | /* Can't enable any substates if L1 is not enabled */ |
| 776 | if (!(state & ASPM_STATE_L1)) |
| 777 | state &= ~ASPM_STATE_L1SS; |
| 778 | |
| 779 | /* Spec says both ports must be in D0 before enabling PCI PM substates*/ |
| 780 | if (parent->current_state != PCI_D0 || child->current_state != PCI_D0) { |
| 781 | state &= ~ASPM_STATE_L1_SS_PCIPM; |
| 782 | state |= (link->aspm_enabled & ASPM_STATE_L1_SS_PCIPM); |
| 783 | } |
| 784 | |
| 785 | /* Nothing to do if the link is already in the requested state */ |
| 786 | if (link->aspm_enabled == state) |
| 787 | return; |
| 788 | /* Convert ASPM state to upstream/downstream ASPM register state */ |
| 789 | if (state & ASPM_STATE_L0S_UP) |
| 790 | dwstream |= PCI_EXP_LNKCTL_ASPM_L0S; |
| 791 | if (state & ASPM_STATE_L0S_DW) |
| 792 | upstream |= PCI_EXP_LNKCTL_ASPM_L0S; |
| 793 | if (state & ASPM_STATE_L1) { |
| 794 | upstream |= PCI_EXP_LNKCTL_ASPM_L1; |
| 795 | dwstream |= PCI_EXP_LNKCTL_ASPM_L1; |
| 796 | } |
| 797 | |
| 798 | if (link->aspm_capable & ASPM_STATE_L1SS) |
| 799 | pcie_config_aspm_l1ss(link, state); |
| 800 | |
| 801 | /* |
| 802 | * Spec 2.0 suggests all functions should be configured the |
| 803 | * same setting for ASPM. Enabling ASPM L1 should be done in |
| 804 | * upstream component first and then downstream, and vice |
| 805 | * versa for disabling ASPM L1. Spec doesn't mention L0S. |
| 806 | */ |
| 807 | if (state & ASPM_STATE_L1) |
| 808 | pcie_config_aspm_dev(parent, upstream); |
| 809 | list_for_each_entry(child, &linkbus->devices, bus_list) |
| 810 | pcie_config_aspm_dev(child, dwstream); |
| 811 | if (!(state & ASPM_STATE_L1)) |
| 812 | pcie_config_aspm_dev(parent, upstream); |
| 813 | |
| 814 | link->aspm_enabled = state; |
| 815 | } |
| 816 | |
| 817 | static void pcie_config_aspm_path(struct pcie_link_state *link) |
| 818 | { |
| 819 | while (link) { |
| 820 | pcie_config_aspm_link(link, policy_to_aspm_state(link)); |
| 821 | link = link->parent; |
| 822 | } |
| 823 | } |
| 824 | |
| 825 | static void free_link_state(struct pcie_link_state *link) |
| 826 | { |
| 827 | link->pdev->link_state = NULL; |
| 828 | kfree(link); |
| 829 | } |
| 830 | |
| 831 | static int pcie_aspm_sanity_check(struct pci_dev *pdev) |
| 832 | { |
| 833 | struct pci_dev *child; |
| 834 | u32 reg32; |
| 835 | |
| 836 | /* |
| 837 | * Some functions in a slot might not all be PCIe functions, |
| 838 | * very strange. Disable ASPM for the whole slot |
| 839 | */ |
| 840 | list_for_each_entry(child, &pdev->subordinate->devices, bus_list) { |
| 841 | if (!pci_is_pcie(child)) |
| 842 | return -EINVAL; |
| 843 | |
| 844 | /* |
| 845 | * If ASPM is disabled then we're not going to change |
| 846 | * the BIOS state. It's safe to continue even if it's a |
| 847 | * pre-1.1 device |
| 848 | */ |
| 849 | |
| 850 | if (aspm_disabled) |
| 851 | continue; |
| 852 | |
| 853 | /* |
| 854 | * Disable ASPM for pre-1.1 PCIe device, we follow MS to use |
| 855 | * RBER bit to determine if a function is 1.1 version device |
| 856 | */ |
| 857 | pcie_capability_read_dword(child, PCI_EXP_DEVCAP, ®32); |
| 858 | if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) { |
| 859 | pci_info(child, "disabling ASPM on pre-1.1 PCIe device. You can enable it with 'pcie_aspm=force'\n"); |
| 860 | return -EINVAL; |
| 861 | } |
| 862 | } |
| 863 | return 0; |
| 864 | } |
| 865 | |
| 866 | static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev) |
| 867 | { |
| 868 | struct pcie_link_state *link; |
| 869 | |
| 870 | link = kzalloc(sizeof(*link), GFP_KERNEL); |
| 871 | if (!link) |
| 872 | return NULL; |
| 873 | |
| 874 | INIT_LIST_HEAD(&link->sibling); |
| 875 | link->pdev = pdev; |
| 876 | link->downstream = pci_function_0(pdev->subordinate); |
| 877 | |
| 878 | /* |
| 879 | * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe |
| 880 | * hierarchies. Note that some PCIe host implementations omit |
| 881 | * the root ports entirely, in which case a downstream port on |
| 882 | * a switch may become the root of the link state chain for all |
| 883 | * its subordinate endpoints. |
| 884 | */ |
| 885 | if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT || |
| 886 | pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE || |
| 887 | !pdev->bus->parent->self) { |
| 888 | link->root = link; |
| 889 | } else { |
| 890 | struct pcie_link_state *parent; |
| 891 | |
| 892 | parent = pdev->bus->parent->self->link_state; |
| 893 | if (!parent) { |
| 894 | kfree(link); |
| 895 | return NULL; |
| 896 | } |
| 897 | |
| 898 | link->parent = parent; |
| 899 | link->root = link->parent->root; |
| 900 | } |
| 901 | |
| 902 | list_add(&link->sibling, &link_list); |
| 903 | pdev->link_state = link; |
| 904 | return link; |
| 905 | } |
| 906 | |
| 907 | /* |
| 908 | * pcie_aspm_init_link_state: Initiate PCI express link state. |
| 909 | * It is called after the pcie and its children devices are scanned. |
| 910 | * @pdev: the root port or switch downstream port |
| 911 | */ |
| 912 | void pcie_aspm_init_link_state(struct pci_dev *pdev) |
| 913 | { |
| 914 | struct pcie_link_state *link; |
| 915 | int blacklist = !!pcie_aspm_sanity_check(pdev); |
| 916 | |
| 917 | if (!aspm_support_enabled) |
| 918 | return; |
| 919 | |
| 920 | if (pdev->link_state) |
| 921 | return; |
| 922 | |
| 923 | /* |
| 924 | * We allocate pcie_link_state for the component on the upstream |
| 925 | * end of a Link, so there's nothing to do unless this device is |
| 926 | * downstream port. |
| 927 | */ |
| 928 | if (!pcie_downstream_port(pdev)) |
| 929 | return; |
| 930 | |
| 931 | /* VIA has a strange chipset, root port is under a bridge */ |
| 932 | if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT && |
| 933 | pdev->bus->self) |
| 934 | return; |
| 935 | |
| 936 | down_read(&pci_bus_sem); |
| 937 | if (list_empty(&pdev->subordinate->devices)) |
| 938 | goto out; |
| 939 | |
| 940 | mutex_lock(&aspm_lock); |
| 941 | link = alloc_pcie_link_state(pdev); |
| 942 | if (!link) |
| 943 | goto unlock; |
| 944 | /* |
| 945 | * Setup initial ASPM state. Note that we need to configure |
| 946 | * upstream links also because capable state of them can be |
| 947 | * update through pcie_aspm_cap_init(). |
| 948 | */ |
| 949 | pcie_aspm_cap_init(link, blacklist); |
| 950 | |
| 951 | /* Setup initial Clock PM state */ |
| 952 | pcie_clkpm_cap_init(link, blacklist); |
| 953 | |
| 954 | /* |
| 955 | * At this stage drivers haven't had an opportunity to change the |
| 956 | * link policy setting. Enabling ASPM on broken hardware can cripple |
| 957 | * it even before the driver has had a chance to disable ASPM, so |
| 958 | * default to a safe level right now. If we're enabling ASPM beyond |
| 959 | * the BIOS's expectation, we'll do so once pci_enable_device() is |
| 960 | * called. |
| 961 | */ |
| 962 | if (aspm_policy != POLICY_POWERSAVE && |
| 963 | aspm_policy != POLICY_POWER_SUPERSAVE) { |
| 964 | pcie_config_aspm_path(link); |
| 965 | pcie_set_clkpm(link, policy_to_clkpm_state(link)); |
| 966 | } |
| 967 | |
| 968 | unlock: |
| 969 | mutex_unlock(&aspm_lock); |
| 970 | out: |
| 971 | up_read(&pci_bus_sem); |
| 972 | } |
| 973 | |
| 974 | /* Recheck latencies and update aspm_capable for links under the root */ |
| 975 | static void pcie_update_aspm_capable(struct pcie_link_state *root) |
| 976 | { |
| 977 | struct pcie_link_state *link; |
| 978 | BUG_ON(root->parent); |
| 979 | list_for_each_entry(link, &link_list, sibling) { |
| 980 | if (link->root != root) |
| 981 | continue; |
| 982 | link->aspm_capable = link->aspm_support; |
| 983 | } |
| 984 | list_for_each_entry(link, &link_list, sibling) { |
| 985 | struct pci_dev *child; |
| 986 | struct pci_bus *linkbus = link->pdev->subordinate; |
| 987 | if (link->root != root) |
| 988 | continue; |
| 989 | list_for_each_entry(child, &linkbus->devices, bus_list) { |
| 990 | if ((pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT) && |
| 991 | (pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END)) |
| 992 | continue; |
| 993 | pcie_aspm_check_latency(child); |
| 994 | } |
| 995 | } |
| 996 | } |
| 997 | |
| 998 | /* @pdev: the endpoint device */ |
| 999 | void pcie_aspm_exit_link_state(struct pci_dev *pdev) |
| 1000 | { |
| 1001 | struct pci_dev *parent = pdev->bus->self; |
| 1002 | struct pcie_link_state *link, *root, *parent_link; |
| 1003 | |
| 1004 | if (!parent || !parent->link_state) |
| 1005 | return; |
| 1006 | |
| 1007 | down_read(&pci_bus_sem); |
| 1008 | mutex_lock(&aspm_lock); |
| 1009 | |
| 1010 | link = parent->link_state; |
| 1011 | root = link->root; |
| 1012 | parent_link = link->parent; |
| 1013 | |
| 1014 | /* |
| 1015 | * link->downstream is a pointer to the pci_dev of function 0. If |
| 1016 | * we remove that function, the pci_dev is about to be deallocated, |
| 1017 | * so we can't use link->downstream again. Free the link state to |
| 1018 | * avoid this. |
| 1019 | * |
| 1020 | * If we're removing a non-0 function, it's possible we could |
| 1021 | * retain the link state, but PCIe r6.0, sec 7.5.3.7, recommends |
| 1022 | * programming the same ASPM Control value for all functions of |
| 1023 | * multi-function devices, so disable ASPM for all of them. |
| 1024 | */ |
| 1025 | pcie_config_aspm_link(link, 0); |
| 1026 | list_del(&link->sibling); |
| 1027 | free_link_state(link); |
| 1028 | |
| 1029 | /* Recheck latencies and configure upstream links */ |
| 1030 | if (parent_link) { |
| 1031 | pcie_update_aspm_capable(root); |
| 1032 | pcie_config_aspm_path(parent_link); |
| 1033 | } |
| 1034 | |
| 1035 | mutex_unlock(&aspm_lock); |
| 1036 | up_read(&pci_bus_sem); |
| 1037 | } |
| 1038 | |
| 1039 | /* @pdev: the root port or switch downstream port */ |
| 1040 | void pcie_aspm_pm_state_change(struct pci_dev *pdev) |
| 1041 | { |
| 1042 | struct pcie_link_state *link = pdev->link_state; |
| 1043 | |
| 1044 | if (aspm_disabled || !link) |
| 1045 | return; |
| 1046 | /* |
| 1047 | * Devices changed PM state, we should recheck if latency |
| 1048 | * meets all functions' requirement |
| 1049 | */ |
| 1050 | down_read(&pci_bus_sem); |
| 1051 | mutex_lock(&aspm_lock); |
| 1052 | pcie_update_aspm_capable(link->root); |
| 1053 | pcie_config_aspm_path(link); |
| 1054 | mutex_unlock(&aspm_lock); |
| 1055 | up_read(&pci_bus_sem); |
| 1056 | } |
| 1057 | |
| 1058 | void pcie_aspm_powersave_config_link(struct pci_dev *pdev) |
| 1059 | { |
| 1060 | struct pcie_link_state *link = pdev->link_state; |
| 1061 | |
| 1062 | if (aspm_disabled || !link) |
| 1063 | return; |
| 1064 | |
| 1065 | if (aspm_policy != POLICY_POWERSAVE && |
| 1066 | aspm_policy != POLICY_POWER_SUPERSAVE) |
| 1067 | return; |
| 1068 | |
| 1069 | down_read(&pci_bus_sem); |
| 1070 | mutex_lock(&aspm_lock); |
| 1071 | pcie_config_aspm_path(link); |
| 1072 | pcie_set_clkpm(link, policy_to_clkpm_state(link)); |
| 1073 | mutex_unlock(&aspm_lock); |
| 1074 | up_read(&pci_bus_sem); |
| 1075 | } |
| 1076 | |
| 1077 | static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem) |
| 1078 | { |
| 1079 | struct pci_dev *parent = pdev->bus->self; |
| 1080 | struct pcie_link_state *link; |
| 1081 | |
| 1082 | if (!pci_is_pcie(pdev)) |
| 1083 | return 0; |
| 1084 | |
| 1085 | if (pcie_downstream_port(pdev)) |
| 1086 | parent = pdev; |
| 1087 | if (!parent || !parent->link_state) |
| 1088 | return -EINVAL; |
| 1089 | |
| 1090 | /* |
| 1091 | * A driver requested that ASPM be disabled on this device, but |
| 1092 | * if we don't have permission to manage ASPM (e.g., on ACPI |
| 1093 | * systems we have to observe the FADT ACPI_FADT_NO_ASPM bit and |
| 1094 | * the _OSC method), we can't honor that request. Windows has |
| 1095 | * a similar mechanism using "PciASPMOptOut", which is also |
| 1096 | * ignored in this situation. |
| 1097 | */ |
| 1098 | if (aspm_disabled) { |
| 1099 | pci_warn(pdev, "can't disable ASPM; OS doesn't have ASPM control\n"); |
| 1100 | return -EPERM; |
| 1101 | } |
| 1102 | |
| 1103 | if (sem) |
| 1104 | down_read(&pci_bus_sem); |
| 1105 | mutex_lock(&aspm_lock); |
| 1106 | link = parent->link_state; |
| 1107 | if (state & PCIE_LINK_STATE_L0S) |
| 1108 | link->aspm_disable |= ASPM_STATE_L0S; |
| 1109 | if (state & PCIE_LINK_STATE_L1) |
| 1110 | link->aspm_disable |= ASPM_STATE_L1; |
| 1111 | pcie_config_aspm_link(link, policy_to_aspm_state(link)); |
| 1112 | |
| 1113 | if (state & PCIE_LINK_STATE_CLKPM) |
| 1114 | link->clkpm_disable = 1; |
| 1115 | pcie_set_clkpm(link, policy_to_clkpm_state(link)); |
| 1116 | mutex_unlock(&aspm_lock); |
| 1117 | if (sem) |
| 1118 | up_read(&pci_bus_sem); |
| 1119 | |
| 1120 | return 0; |
| 1121 | } |
| 1122 | |
| 1123 | int pci_disable_link_state_locked(struct pci_dev *pdev, int state) |
| 1124 | { |
| 1125 | return __pci_disable_link_state(pdev, state, false); |
| 1126 | } |
| 1127 | EXPORT_SYMBOL(pci_disable_link_state_locked); |
| 1128 | |
| 1129 | /** |
| 1130 | * pci_disable_link_state - Disable device's link state, so the link will |
| 1131 | * never enter specific states. Note that if the BIOS didn't grant ASPM |
| 1132 | * control to the OS, this does nothing because we can't touch the LNKCTL |
| 1133 | * register. Returns 0 or a negative errno. |
| 1134 | * |
| 1135 | * @pdev: PCI device |
| 1136 | * @state: ASPM link state to disable |
| 1137 | */ |
| 1138 | int pci_disable_link_state(struct pci_dev *pdev, int state) |
| 1139 | { |
| 1140 | return __pci_disable_link_state(pdev, state, true); |
| 1141 | } |
| 1142 | EXPORT_SYMBOL(pci_disable_link_state); |
| 1143 | |
| 1144 | static int pcie_aspm_set_policy(const char *val, |
| 1145 | const struct kernel_param *kp) |
| 1146 | { |
| 1147 | int i; |
| 1148 | struct pcie_link_state *link; |
| 1149 | |
| 1150 | if (aspm_disabled) |
| 1151 | return -EPERM; |
| 1152 | i = sysfs_match_string(policy_str, val); |
| 1153 | if (i < 0) |
| 1154 | return i; |
| 1155 | if (i == aspm_policy) |
| 1156 | return 0; |
| 1157 | |
| 1158 | down_read(&pci_bus_sem); |
| 1159 | mutex_lock(&aspm_lock); |
| 1160 | aspm_policy = i; |
| 1161 | list_for_each_entry(link, &link_list, sibling) { |
| 1162 | pcie_config_aspm_link(link, policy_to_aspm_state(link)); |
| 1163 | pcie_set_clkpm(link, policy_to_clkpm_state(link)); |
| 1164 | } |
| 1165 | mutex_unlock(&aspm_lock); |
| 1166 | up_read(&pci_bus_sem); |
| 1167 | return 0; |
| 1168 | } |
| 1169 | |
| 1170 | static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp) |
| 1171 | { |
| 1172 | int i, cnt = 0; |
| 1173 | for (i = 0; i < ARRAY_SIZE(policy_str); i++) |
| 1174 | if (i == aspm_policy) |
| 1175 | cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]); |
| 1176 | else |
| 1177 | cnt += sprintf(buffer + cnt, "%s ", policy_str[i]); |
| 1178 | cnt += sprintf(buffer + cnt, "\n"); |
| 1179 | return cnt; |
| 1180 | } |
| 1181 | |
| 1182 | module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy, |
| 1183 | NULL, 0644); |
| 1184 | |
| 1185 | /** |
| 1186 | * pcie_aspm_enabled - Check if PCIe ASPM has been enabled for a device. |
| 1187 | * @pdev: Target device. |
| 1188 | */ |
| 1189 | bool pcie_aspm_enabled(struct pci_dev *pdev) |
| 1190 | { |
| 1191 | struct pci_dev *bridge = pci_upstream_bridge(pdev); |
| 1192 | bool ret; |
| 1193 | |
| 1194 | if (!bridge) |
| 1195 | return false; |
| 1196 | |
| 1197 | mutex_lock(&aspm_lock); |
| 1198 | ret = bridge->link_state ? !!bridge->link_state->aspm_enabled : false; |
| 1199 | mutex_unlock(&aspm_lock); |
| 1200 | |
| 1201 | return ret; |
| 1202 | } |
| 1203 | EXPORT_SYMBOL_GPL(pcie_aspm_enabled); |
| 1204 | |
| 1205 | #ifdef CONFIG_PCIEASPM_DEBUG |
| 1206 | static ssize_t link_state_show(struct device *dev, |
| 1207 | struct device_attribute *attr, |
| 1208 | char *buf) |
| 1209 | { |
| 1210 | struct pci_dev *pci_device = to_pci_dev(dev); |
| 1211 | struct pcie_link_state *link_state = pci_device->link_state; |
| 1212 | |
| 1213 | return sprintf(buf, "%d\n", link_state->aspm_enabled); |
| 1214 | } |
| 1215 | |
| 1216 | static ssize_t link_state_store(struct device *dev, |
| 1217 | struct device_attribute *attr, |
| 1218 | const char *buf, |
| 1219 | size_t n) |
| 1220 | { |
| 1221 | struct pci_dev *pdev = to_pci_dev(dev); |
| 1222 | struct pcie_link_state *link, *root = pdev->link_state->root; |
| 1223 | u32 state; |
| 1224 | |
| 1225 | if (aspm_disabled) |
| 1226 | return -EPERM; |
| 1227 | |
| 1228 | if (kstrtouint(buf, 10, &state)) |
| 1229 | return -EINVAL; |
| 1230 | if ((state & ~ASPM_STATE_ALL) != 0) |
| 1231 | return -EINVAL; |
| 1232 | |
| 1233 | down_read(&pci_bus_sem); |
| 1234 | mutex_lock(&aspm_lock); |
| 1235 | list_for_each_entry(link, &link_list, sibling) { |
| 1236 | if (link->root != root) |
| 1237 | continue; |
| 1238 | pcie_config_aspm_link(link, state); |
| 1239 | } |
| 1240 | mutex_unlock(&aspm_lock); |
| 1241 | up_read(&pci_bus_sem); |
| 1242 | return n; |
| 1243 | } |
| 1244 | |
| 1245 | static ssize_t clk_ctl_show(struct device *dev, |
| 1246 | struct device_attribute *attr, |
| 1247 | char *buf) |
| 1248 | { |
| 1249 | struct pci_dev *pci_device = to_pci_dev(dev); |
| 1250 | struct pcie_link_state *link_state = pci_device->link_state; |
| 1251 | |
| 1252 | return sprintf(buf, "%d\n", link_state->clkpm_enabled); |
| 1253 | } |
| 1254 | |
| 1255 | static ssize_t clk_ctl_store(struct device *dev, |
| 1256 | struct device_attribute *attr, |
| 1257 | const char *buf, |
| 1258 | size_t n) |
| 1259 | { |
| 1260 | struct pci_dev *pdev = to_pci_dev(dev); |
| 1261 | bool state; |
| 1262 | |
| 1263 | if (strtobool(buf, &state)) |
| 1264 | return -EINVAL; |
| 1265 | |
| 1266 | down_read(&pci_bus_sem); |
| 1267 | mutex_lock(&aspm_lock); |
| 1268 | pcie_set_clkpm_nocheck(pdev->link_state, state); |
| 1269 | mutex_unlock(&aspm_lock); |
| 1270 | up_read(&pci_bus_sem); |
| 1271 | |
| 1272 | return n; |
| 1273 | } |
| 1274 | |
| 1275 | static DEVICE_ATTR_RW(link_state); |
| 1276 | static DEVICE_ATTR_RW(clk_ctl); |
| 1277 | |
| 1278 | static char power_group[] = "power"; |
| 1279 | void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev) |
| 1280 | { |
| 1281 | struct pcie_link_state *link_state = pdev->link_state; |
| 1282 | |
| 1283 | if (!link_state) |
| 1284 | return; |
| 1285 | |
| 1286 | if (link_state->aspm_support) |
| 1287 | sysfs_add_file_to_group(&pdev->dev.kobj, |
| 1288 | &dev_attr_link_state.attr, power_group); |
| 1289 | if (link_state->clkpm_capable) |
| 1290 | sysfs_add_file_to_group(&pdev->dev.kobj, |
| 1291 | &dev_attr_clk_ctl.attr, power_group); |
| 1292 | } |
| 1293 | |
| 1294 | void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev) |
| 1295 | { |
| 1296 | struct pcie_link_state *link_state = pdev->link_state; |
| 1297 | |
| 1298 | if (!link_state) |
| 1299 | return; |
| 1300 | |
| 1301 | if (link_state->aspm_support) |
| 1302 | sysfs_remove_file_from_group(&pdev->dev.kobj, |
| 1303 | &dev_attr_link_state.attr, power_group); |
| 1304 | if (link_state->clkpm_capable) |
| 1305 | sysfs_remove_file_from_group(&pdev->dev.kobj, |
| 1306 | &dev_attr_clk_ctl.attr, power_group); |
| 1307 | } |
| 1308 | #endif |
| 1309 | |
| 1310 | static int __init pcie_aspm_disable(char *str) |
| 1311 | { |
| 1312 | if (!strcmp(str, "off")) { |
| 1313 | aspm_policy = POLICY_DEFAULT; |
| 1314 | aspm_disabled = 1; |
| 1315 | aspm_support_enabled = false; |
| 1316 | printk(KERN_INFO "PCIe ASPM is disabled\n"); |
| 1317 | } else if (!strcmp(str, "force")) { |
| 1318 | aspm_force = 1; |
| 1319 | printk(KERN_INFO "PCIe ASPM is forcibly enabled\n"); |
| 1320 | } |
| 1321 | return 1; |
| 1322 | } |
| 1323 | |
| 1324 | __setup("pcie_aspm=", pcie_aspm_disable); |
| 1325 | |
| 1326 | void pcie_no_aspm(void) |
| 1327 | { |
| 1328 | /* |
| 1329 | * Disabling ASPM is intended to prevent the kernel from modifying |
| 1330 | * existing hardware state, not to clear existing state. To that end: |
| 1331 | * (a) set policy to POLICY_DEFAULT in order to avoid changing state |
| 1332 | * (b) prevent userspace from changing policy |
| 1333 | */ |
| 1334 | if (!aspm_force) { |
| 1335 | aspm_policy = POLICY_DEFAULT; |
| 1336 | aspm_disabled = 1; |
| 1337 | } |
| 1338 | } |
| 1339 | |
| 1340 | bool pcie_aspm_support_enabled(void) |
| 1341 | { |
| 1342 | return aspm_support_enabled; |
| 1343 | } |
| 1344 | EXPORT_SYMBOL(pcie_aspm_support_enabled); |