rjw | 1f88458 | 2022-01-06 17:20:42 +0800 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * File: drivers/pci/pcie/aspm.c |
| 4 | * Enabling PCIe link L0s/L1 state and Clock Power Management |
| 5 | * |
| 6 | * Copyright (C) 2007 Intel |
| 7 | * Copyright (C) Zhang Yanmin (yanmin.zhang@intel.com) |
| 8 | * Copyright (C) Shaohua Li (shaohua.li@intel.com) |
| 9 | */ |
| 10 | |
| 11 | #include <linux/kernel.h> |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/moduleparam.h> |
| 14 | #include <linux/pci.h> |
| 15 | #include <linux/pci_regs.h> |
| 16 | #include <linux/errno.h> |
| 17 | #include <linux/pm.h> |
| 18 | #include <linux/init.h> |
| 19 | #include <linux/slab.h> |
| 20 | #include <linux/jiffies.h> |
| 21 | #include <linux/delay.h> |
| 22 | #include <linux/pci-aspm.h> |
| 23 | #include "../pci.h" |
| 24 | |
| 25 | #ifdef MODULE_PARAM_PREFIX |
| 26 | #undef MODULE_PARAM_PREFIX |
| 27 | #endif |
| 28 | #define MODULE_PARAM_PREFIX "pcie_aspm." |
| 29 | |
| 30 | /* Note: those are not register definitions */ |
| 31 | #define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */ |
| 32 | #define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */ |
| 33 | #define ASPM_STATE_L1 (4) /* L1 state */ |
| 34 | #define ASPM_STATE_L1_1 (8) /* ASPM L1.1 state */ |
| 35 | #define ASPM_STATE_L1_2 (0x10) /* ASPM L1.2 state */ |
| 36 | #define ASPM_STATE_L1_1_PCIPM (0x20) /* PCI PM L1.1 state */ |
| 37 | #define ASPM_STATE_L1_2_PCIPM (0x40) /* PCI PM L1.2 state */ |
| 38 | #define ASPM_STATE_L1_SS_PCIPM (ASPM_STATE_L1_1_PCIPM | ASPM_STATE_L1_2_PCIPM) |
| 39 | #define ASPM_STATE_L1_2_MASK (ASPM_STATE_L1_2 | ASPM_STATE_L1_2_PCIPM) |
| 40 | #define ASPM_STATE_L1SS (ASPM_STATE_L1_1 | ASPM_STATE_L1_1_PCIPM |\ |
| 41 | ASPM_STATE_L1_2_MASK) |
| 42 | #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW) |
| 43 | #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1 | \ |
| 44 | ASPM_STATE_L1SS) |
| 45 | |
| 46 | /* |
| 47 | * When L1 substates are enabled, the LTR L1.2 threshold is a timing parameter |
| 48 | * that decides whether L1.1 or L1.2 is entered (Refer PCIe spec for details). |
| 49 | * Not sure is there is a way to "calculate" this on the fly, but maybe we |
| 50 | * could turn it into a parameter in future. This value has been taken from |
| 51 | * the following files from Intel's coreboot (which is the only code I found |
| 52 | * to have used this): |
| 53 | * https://www.coreboot.org/pipermail/coreboot-gerrit/2015-March/021134.html |
| 54 | * https://review.coreboot.org/#/c/8832/ |
| 55 | */ |
| 56 | #define LTR_L1_2_THRESHOLD_BITS ((1 << 21) | (1 << 23) | (1 << 30)) |
| 57 | |
| 58 | struct aspm_latency { |
| 59 | u32 l0s; /* L0s latency (nsec) */ |
| 60 | u32 l1; /* L1 latency (nsec) */ |
| 61 | }; |
| 62 | |
| 63 | struct pcie_link_state { |
| 64 | struct pci_dev *pdev; /* Upstream component of the Link */ |
| 65 | struct pci_dev *downstream; /* Downstream component, function 0 */ |
| 66 | struct pcie_link_state *root; /* pointer to the root port link */ |
| 67 | struct pcie_link_state *parent; /* pointer to the parent Link state */ |
| 68 | struct list_head sibling; /* node in link_list */ |
| 69 | struct list_head children; /* list of child link states */ |
| 70 | struct list_head link; /* node in parent's children list */ |
| 71 | |
| 72 | /* ASPM state */ |
| 73 | u32 aspm_support:7; /* Supported ASPM state */ |
| 74 | u32 aspm_enabled:7; /* Enabled ASPM state */ |
| 75 | u32 aspm_capable:7; /* Capable ASPM state with latency */ |
| 76 | u32 aspm_default:7; /* Default ASPM state by BIOS */ |
| 77 | u32 aspm_disable:7; /* Disabled ASPM state */ |
| 78 | |
| 79 | /* Clock PM state */ |
| 80 | u32 clkpm_capable:1; /* Clock PM capable? */ |
| 81 | u32 clkpm_enabled:1; /* Current Clock PM state */ |
| 82 | u32 clkpm_default:1; /* Default Clock PM state by BIOS */ |
| 83 | u32 clkpm_disable:1; /* Clock PM disabled */ |
| 84 | |
| 85 | /* Exit latencies */ |
| 86 | struct aspm_latency latency_up; /* Upstream direction exit latency */ |
| 87 | struct aspm_latency latency_dw; /* Downstream direction exit latency */ |
| 88 | /* |
| 89 | * Endpoint acceptable latencies. A pcie downstream port only |
| 90 | * has one slot under it, so at most there are 8 functions. |
| 91 | */ |
| 92 | struct aspm_latency acceptable[8]; |
| 93 | |
| 94 | /* L1 PM Substate info */ |
| 95 | struct { |
| 96 | u32 up_cap_ptr; /* L1SS cap ptr in upstream dev */ |
| 97 | u32 dw_cap_ptr; /* L1SS cap ptr in downstream dev */ |
| 98 | u32 ctl1; /* value to be programmed in ctl1 */ |
| 99 | u32 ctl2; /* value to be programmed in ctl2 */ |
| 100 | } l1ss; |
| 101 | }; |
| 102 | |
| 103 | static int aspm_disabled, aspm_force; |
| 104 | static bool aspm_support_enabled = true; |
| 105 | static DEFINE_MUTEX(aspm_lock); |
| 106 | static LIST_HEAD(link_list); |
| 107 | |
| 108 | #define POLICY_DEFAULT 0 /* BIOS default setting */ |
| 109 | #define POLICY_PERFORMANCE 1 /* high performance */ |
| 110 | #define POLICY_POWERSAVE 2 /* high power saving */ |
| 111 | #define POLICY_POWER_SUPERSAVE 3 /* possibly even more power saving */ |
| 112 | |
| 113 | #ifdef CONFIG_PCIEASPM_PERFORMANCE |
| 114 | static int aspm_policy = POLICY_PERFORMANCE; |
| 115 | #elif defined CONFIG_PCIEASPM_POWERSAVE |
| 116 | static int aspm_policy = POLICY_POWERSAVE; |
| 117 | #elif defined CONFIG_PCIEASPM_POWER_SUPERSAVE |
| 118 | static int aspm_policy = POLICY_POWER_SUPERSAVE; |
| 119 | #else |
| 120 | static int aspm_policy; |
| 121 | #endif |
| 122 | |
| 123 | static const char *policy_str[] = { |
| 124 | [POLICY_DEFAULT] = "default", |
| 125 | [POLICY_PERFORMANCE] = "performance", |
| 126 | [POLICY_POWERSAVE] = "powersave", |
| 127 | [POLICY_POWER_SUPERSAVE] = "powersupersave" |
| 128 | }; |
| 129 | |
| 130 | #define LINK_RETRAIN_TIMEOUT HZ |
| 131 | |
| 132 | static int policy_to_aspm_state(struct pcie_link_state *link) |
| 133 | { |
| 134 | switch (aspm_policy) { |
| 135 | case POLICY_PERFORMANCE: |
| 136 | /* Disable ASPM and Clock PM */ |
| 137 | return 0; |
| 138 | case POLICY_POWERSAVE: |
| 139 | /* Enable ASPM L0s/L1 */ |
| 140 | return (ASPM_STATE_L0S | ASPM_STATE_L1); |
| 141 | case POLICY_POWER_SUPERSAVE: |
| 142 | /* Enable Everything */ |
| 143 | return ASPM_STATE_ALL; |
| 144 | case POLICY_DEFAULT: |
| 145 | return link->aspm_default; |
| 146 | } |
| 147 | return 0; |
| 148 | } |
| 149 | |
| 150 | static int policy_to_clkpm_state(struct pcie_link_state *link) |
| 151 | { |
| 152 | switch (aspm_policy) { |
| 153 | case POLICY_PERFORMANCE: |
| 154 | /* Disable ASPM and Clock PM */ |
| 155 | return 0; |
| 156 | case POLICY_POWERSAVE: |
| 157 | case POLICY_POWER_SUPERSAVE: |
| 158 | /* Enable Clock PM */ |
| 159 | return 1; |
| 160 | case POLICY_DEFAULT: |
| 161 | return link->clkpm_default; |
| 162 | } |
| 163 | return 0; |
| 164 | } |
| 165 | |
| 166 | static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable) |
| 167 | { |
| 168 | struct pci_dev *child; |
| 169 | struct pci_bus *linkbus = link->pdev->subordinate; |
| 170 | u32 val = enable ? PCI_EXP_LNKCTL_CLKREQ_EN : 0; |
| 171 | |
| 172 | list_for_each_entry(child, &linkbus->devices, bus_list) |
| 173 | pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL, |
| 174 | PCI_EXP_LNKCTL_CLKREQ_EN, |
| 175 | val); |
| 176 | link->clkpm_enabled = !!enable; |
| 177 | } |
| 178 | |
| 179 | static void pcie_set_clkpm(struct pcie_link_state *link, int enable) |
| 180 | { |
| 181 | /* |
| 182 | * Don't enable Clock PM if the link is not Clock PM capable |
| 183 | * or Clock PM is disabled |
| 184 | */ |
| 185 | if (!link->clkpm_capable || link->clkpm_disable) |
| 186 | enable = 0; |
| 187 | /* Need nothing if the specified equals to current state */ |
| 188 | if (link->clkpm_enabled == enable) |
| 189 | return; |
| 190 | pcie_set_clkpm_nocheck(link, enable); |
| 191 | } |
| 192 | |
| 193 | static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist) |
| 194 | { |
| 195 | int capable = 1, enabled = 1; |
| 196 | u32 reg32; |
| 197 | u16 reg16; |
| 198 | struct pci_dev *child; |
| 199 | struct pci_bus *linkbus = link->pdev->subordinate; |
| 200 | |
| 201 | /* All functions should have the same cap and state, take the worst */ |
| 202 | list_for_each_entry(child, &linkbus->devices, bus_list) { |
| 203 | pcie_capability_read_dword(child, PCI_EXP_LNKCAP, ®32); |
| 204 | if (!(reg32 & PCI_EXP_LNKCAP_CLKPM)) { |
| 205 | capable = 0; |
| 206 | enabled = 0; |
| 207 | break; |
| 208 | } |
| 209 | pcie_capability_read_word(child, PCI_EXP_LNKCTL, ®16); |
| 210 | if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN)) |
| 211 | enabled = 0; |
| 212 | } |
| 213 | link->clkpm_enabled = enabled; |
| 214 | link->clkpm_default = enabled; |
| 215 | link->clkpm_capable = capable; |
| 216 | link->clkpm_disable = blacklist ? 1 : 0; |
| 217 | } |
| 218 | |
| 219 | static bool pcie_retrain_link(struct pcie_link_state *link) |
| 220 | { |
| 221 | struct pci_dev *parent = link->pdev; |
| 222 | unsigned long start_jiffies; |
| 223 | u16 reg16; |
| 224 | |
| 225 | pcie_capability_read_word(parent, PCI_EXP_LNKCTL, ®16); |
| 226 | reg16 |= PCI_EXP_LNKCTL_RL; |
| 227 | pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); |
| 228 | if (parent->clear_retrain_link) { |
| 229 | /* |
| 230 | * Due to an erratum in some devices the Retrain Link bit |
| 231 | * needs to be cleared again manually to allow the link |
| 232 | * training to succeed. |
| 233 | */ |
| 234 | reg16 &= ~PCI_EXP_LNKCTL_RL; |
| 235 | pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); |
| 236 | } |
| 237 | |
| 238 | /* Wait for link training end. Break out after waiting for timeout */ |
| 239 | start_jiffies = jiffies; |
| 240 | for (;;) { |
| 241 | pcie_capability_read_word(parent, PCI_EXP_LNKSTA, ®16); |
| 242 | if (!(reg16 & PCI_EXP_LNKSTA_LT)) |
| 243 | break; |
| 244 | if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) |
| 245 | break; |
| 246 | msleep(1); |
| 247 | } |
| 248 | return !(reg16 & PCI_EXP_LNKSTA_LT); |
| 249 | } |
| 250 | |
| 251 | /* |
| 252 | * pcie_aspm_configure_common_clock: check if the 2 ends of a link |
| 253 | * could use common clock. If they are, configure them to use the |
| 254 | * common clock. That will reduce the ASPM state exit latency. |
| 255 | */ |
| 256 | static void pcie_aspm_configure_common_clock(struct pcie_link_state *link) |
| 257 | { |
| 258 | int same_clock = 1; |
| 259 | u16 reg16, parent_reg, child_reg[8]; |
| 260 | struct pci_dev *child, *parent = link->pdev; |
| 261 | struct pci_bus *linkbus = parent->subordinate; |
| 262 | /* |
| 263 | * All functions of a slot should have the same Slot Clock |
| 264 | * Configuration, so just check one function |
| 265 | */ |
| 266 | child = list_entry(linkbus->devices.next, struct pci_dev, bus_list); |
| 267 | BUG_ON(!pci_is_pcie(child)); |
| 268 | |
| 269 | /* Check downstream component if bit Slot Clock Configuration is 1 */ |
| 270 | pcie_capability_read_word(child, PCI_EXP_LNKSTA, ®16); |
| 271 | if (!(reg16 & PCI_EXP_LNKSTA_SLC)) |
| 272 | same_clock = 0; |
| 273 | |
| 274 | /* Check upstream component if bit Slot Clock Configuration is 1 */ |
| 275 | pcie_capability_read_word(parent, PCI_EXP_LNKSTA, ®16); |
| 276 | if (!(reg16 & PCI_EXP_LNKSTA_SLC)) |
| 277 | same_clock = 0; |
| 278 | |
| 279 | /* Configure downstream component, all functions */ |
| 280 | list_for_each_entry(child, &linkbus->devices, bus_list) { |
| 281 | pcie_capability_read_word(child, PCI_EXP_LNKCTL, ®16); |
| 282 | child_reg[PCI_FUNC(child->devfn)] = reg16; |
| 283 | if (same_clock) |
| 284 | reg16 |= PCI_EXP_LNKCTL_CCC; |
| 285 | else |
| 286 | reg16 &= ~PCI_EXP_LNKCTL_CCC; |
| 287 | pcie_capability_write_word(child, PCI_EXP_LNKCTL, reg16); |
| 288 | } |
| 289 | |
| 290 | /* Configure upstream component */ |
| 291 | pcie_capability_read_word(parent, PCI_EXP_LNKCTL, ®16); |
| 292 | parent_reg = reg16; |
| 293 | if (same_clock) |
| 294 | reg16 |= PCI_EXP_LNKCTL_CCC; |
| 295 | else |
| 296 | reg16 &= ~PCI_EXP_LNKCTL_CCC; |
| 297 | pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); |
| 298 | |
| 299 | if (pcie_retrain_link(link)) |
| 300 | return; |
| 301 | |
| 302 | /* Training failed. Restore common clock configurations */ |
| 303 | dev_err(&parent->dev, "ASPM: Could not configure common clock\n"); |
| 304 | list_for_each_entry(child, &linkbus->devices, bus_list) |
| 305 | pcie_capability_write_word(child, PCI_EXP_LNKCTL, |
| 306 | child_reg[PCI_FUNC(child->devfn)]); |
| 307 | pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_reg); |
| 308 | } |
| 309 | |
| 310 | /* Convert L0s latency encoding to ns */ |
| 311 | static u32 calc_l0s_latency(u32 encoding) |
| 312 | { |
| 313 | if (encoding == 0x7) |
| 314 | return (5 * 1000); /* > 4us */ |
| 315 | return (64 << encoding); |
| 316 | } |
| 317 | |
| 318 | /* Convert L0s acceptable latency encoding to ns */ |
| 319 | static u32 calc_l0s_acceptable(u32 encoding) |
| 320 | { |
| 321 | if (encoding == 0x7) |
| 322 | return -1U; |
| 323 | return (64 << encoding); |
| 324 | } |
| 325 | |
| 326 | /* Convert L1 latency encoding to ns */ |
| 327 | static u32 calc_l1_latency(u32 encoding) |
| 328 | { |
| 329 | if (encoding == 0x7) |
| 330 | return (65 * 1000); /* > 64us */ |
| 331 | return (1000 << encoding); |
| 332 | } |
| 333 | |
| 334 | /* Convert L1 acceptable latency encoding to ns */ |
| 335 | static u32 calc_l1_acceptable(u32 encoding) |
| 336 | { |
| 337 | if (encoding == 0x7) |
| 338 | return -1U; |
| 339 | return (1000 << encoding); |
| 340 | } |
| 341 | |
| 342 | /* Convert L1SS T_pwr encoding to usec */ |
| 343 | static u32 calc_l1ss_pwron(struct pci_dev *pdev, u32 scale, u32 val) |
| 344 | { |
| 345 | switch (scale) { |
| 346 | case 0: |
| 347 | return val * 2; |
| 348 | case 1: |
| 349 | return val * 10; |
| 350 | case 2: |
| 351 | return val * 100; |
| 352 | } |
| 353 | dev_err(&pdev->dev, "%s: Invalid T_PwrOn scale: %u\n", |
| 354 | __func__, scale); |
| 355 | return 0; |
| 356 | } |
| 357 | |
| 358 | struct aspm_register_info { |
| 359 | u32 support:2; |
| 360 | u32 enabled:2; |
| 361 | u32 latency_encoding_l0s; |
| 362 | u32 latency_encoding_l1; |
| 363 | |
| 364 | /* L1 substates */ |
| 365 | u32 l1ss_cap_ptr; |
| 366 | u32 l1ss_cap; |
| 367 | u32 l1ss_ctl1; |
| 368 | u32 l1ss_ctl2; |
| 369 | }; |
| 370 | |
| 371 | static void pcie_get_aspm_reg(struct pci_dev *pdev, |
| 372 | struct aspm_register_info *info) |
| 373 | { |
| 374 | u16 reg16; |
| 375 | u32 reg32; |
| 376 | |
| 377 | pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, ®32); |
| 378 | info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10; |
| 379 | info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12; |
| 380 | info->latency_encoding_l1 = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15; |
| 381 | pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, ®16); |
| 382 | info->enabled = reg16 & PCI_EXP_LNKCTL_ASPMC; |
| 383 | |
| 384 | /* Read L1 PM substate capabilities */ |
| 385 | info->l1ss_cap = info->l1ss_ctl1 = info->l1ss_ctl2 = 0; |
| 386 | info->l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS); |
| 387 | if (!info->l1ss_cap_ptr) |
| 388 | return; |
| 389 | pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CAP, |
| 390 | &info->l1ss_cap); |
| 391 | if (!(info->l1ss_cap & PCI_L1SS_CAP_L1_PM_SS)) { |
| 392 | info->l1ss_cap = 0; |
| 393 | return; |
| 394 | } |
| 395 | pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL1, |
| 396 | &info->l1ss_ctl1); |
| 397 | pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL2, |
| 398 | &info->l1ss_ctl2); |
| 399 | } |
| 400 | |
| 401 | static void pcie_aspm_check_latency(struct pci_dev *endpoint) |
| 402 | { |
| 403 | u32 latency, l1_switch_latency = 0; |
| 404 | struct aspm_latency *acceptable; |
| 405 | struct pcie_link_state *link; |
| 406 | |
| 407 | /* Device not in D0 doesn't need latency check */ |
| 408 | if ((endpoint->current_state != PCI_D0) && |
| 409 | (endpoint->current_state != PCI_UNKNOWN)) |
| 410 | return; |
| 411 | |
| 412 | link = endpoint->bus->self->link_state; |
| 413 | acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)]; |
| 414 | |
| 415 | while (link) { |
| 416 | /* Check upstream direction L0s latency */ |
| 417 | if ((link->aspm_capable & ASPM_STATE_L0S_UP) && |
| 418 | (link->latency_up.l0s > acceptable->l0s)) |
| 419 | link->aspm_capable &= ~ASPM_STATE_L0S_UP; |
| 420 | |
| 421 | /* Check downstream direction L0s latency */ |
| 422 | if ((link->aspm_capable & ASPM_STATE_L0S_DW) && |
| 423 | (link->latency_dw.l0s > acceptable->l0s)) |
| 424 | link->aspm_capable &= ~ASPM_STATE_L0S_DW; |
| 425 | /* |
| 426 | * Check L1 latency. |
| 427 | * Every switch on the path to root complex need 1 |
| 428 | * more microsecond for L1. Spec doesn't mention L0s. |
| 429 | * |
| 430 | * The exit latencies for L1 substates are not advertised |
| 431 | * by a device. Since the spec also doesn't mention a way |
| 432 | * to determine max latencies introduced by enabling L1 |
| 433 | * substates on the components, it is not clear how to do |
| 434 | * a L1 substate exit latency check. We assume that the |
| 435 | * L1 exit latencies advertised by a device include L1 |
| 436 | * substate latencies (and hence do not do any check). |
| 437 | */ |
| 438 | latency = max_t(u32, link->latency_up.l1, link->latency_dw.l1); |
| 439 | if ((link->aspm_capable & ASPM_STATE_L1) && |
| 440 | (latency + l1_switch_latency > acceptable->l1)) |
| 441 | link->aspm_capable &= ~ASPM_STATE_L1; |
| 442 | l1_switch_latency += 1000; |
| 443 | |
| 444 | link = link->parent; |
| 445 | } |
| 446 | } |
| 447 | |
| 448 | /* |
| 449 | * The L1 PM substate capability is only implemented in function 0 in a |
| 450 | * multi function device. |
| 451 | */ |
| 452 | static struct pci_dev *pci_function_0(struct pci_bus *linkbus) |
| 453 | { |
| 454 | struct pci_dev *child; |
| 455 | |
| 456 | list_for_each_entry(child, &linkbus->devices, bus_list) |
| 457 | if (PCI_FUNC(child->devfn) == 0) |
| 458 | return child; |
| 459 | return NULL; |
| 460 | } |
| 461 | |
| 462 | /* Calculate L1.2 PM substate timing parameters */ |
| 463 | static void aspm_calc_l1ss_info(struct pcie_link_state *link, |
| 464 | struct aspm_register_info *upreg, |
| 465 | struct aspm_register_info *dwreg) |
| 466 | { |
| 467 | u32 val1, val2, scale1, scale2; |
| 468 | |
| 469 | link->l1ss.up_cap_ptr = upreg->l1ss_cap_ptr; |
| 470 | link->l1ss.dw_cap_ptr = dwreg->l1ss_cap_ptr; |
| 471 | link->l1ss.ctl1 = link->l1ss.ctl2 = 0; |
| 472 | |
| 473 | if (!(link->aspm_support & ASPM_STATE_L1_2_MASK)) |
| 474 | return; |
| 475 | |
| 476 | /* Choose the greater of the two T_cmn_mode_rstr_time */ |
| 477 | val1 = (upreg->l1ss_cap >> 8) & 0xFF; |
| 478 | val2 = (dwreg->l1ss_cap >> 8) & 0xFF; |
| 479 | if (val1 > val2) |
| 480 | link->l1ss.ctl1 |= val1 << 8; |
| 481 | else |
| 482 | link->l1ss.ctl1 |= val2 << 8; |
| 483 | /* |
| 484 | * We currently use LTR L1.2 threshold to be fixed constant picked from |
| 485 | * Intel's coreboot. |
| 486 | */ |
| 487 | link->l1ss.ctl1 |= LTR_L1_2_THRESHOLD_BITS; |
| 488 | |
| 489 | /* Choose the greater of the two T_pwr_on */ |
| 490 | val1 = (upreg->l1ss_cap >> 19) & 0x1F; |
| 491 | scale1 = (upreg->l1ss_cap >> 16) & 0x03; |
| 492 | val2 = (dwreg->l1ss_cap >> 19) & 0x1F; |
| 493 | scale2 = (dwreg->l1ss_cap >> 16) & 0x03; |
| 494 | |
| 495 | if (calc_l1ss_pwron(link->pdev, scale1, val1) > |
| 496 | calc_l1ss_pwron(link->downstream, scale2, val2)) |
| 497 | link->l1ss.ctl2 |= scale1 | (val1 << 3); |
| 498 | else |
| 499 | link->l1ss.ctl2 |= scale2 | (val2 << 3); |
| 500 | } |
| 501 | |
| 502 | static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) |
| 503 | { |
| 504 | struct pci_dev *child = link->downstream, *parent = link->pdev; |
| 505 | struct pci_bus *linkbus = parent->subordinate; |
| 506 | struct aspm_register_info upreg, dwreg; |
| 507 | |
| 508 | if (blacklist) { |
| 509 | /* Set enabled/disable so that we will disable ASPM later */ |
| 510 | link->aspm_enabled = ASPM_STATE_ALL; |
| 511 | link->aspm_disable = ASPM_STATE_ALL; |
| 512 | return; |
| 513 | } |
| 514 | |
| 515 | /* Get upstream/downstream components' register state */ |
| 516 | pcie_get_aspm_reg(parent, &upreg); |
| 517 | pcie_get_aspm_reg(child, &dwreg); |
| 518 | |
| 519 | /* |
| 520 | * If ASPM not supported, don't mess with the clocks and link, |
| 521 | * bail out now. |
| 522 | */ |
| 523 | if (!(upreg.support & dwreg.support)) |
| 524 | return; |
| 525 | |
| 526 | /* Configure common clock before checking latencies */ |
| 527 | pcie_aspm_configure_common_clock(link); |
| 528 | |
| 529 | /* |
| 530 | * Re-read upstream/downstream components' register state |
| 531 | * after clock configuration |
| 532 | */ |
| 533 | pcie_get_aspm_reg(parent, &upreg); |
| 534 | pcie_get_aspm_reg(child, &dwreg); |
| 535 | |
| 536 | /* |
| 537 | * Setup L0s state |
| 538 | * |
| 539 | * Note that we must not enable L0s in either direction on a |
| 540 | * given link unless components on both sides of the link each |
| 541 | * support L0s. |
| 542 | */ |
| 543 | if (dwreg.support & upreg.support & PCIE_LINK_STATE_L0S) |
| 544 | link->aspm_support |= ASPM_STATE_L0S; |
| 545 | if (dwreg.enabled & PCIE_LINK_STATE_L0S) |
| 546 | link->aspm_enabled |= ASPM_STATE_L0S_UP; |
| 547 | if (upreg.enabled & PCIE_LINK_STATE_L0S) |
| 548 | link->aspm_enabled |= ASPM_STATE_L0S_DW; |
| 549 | link->latency_up.l0s = calc_l0s_latency(upreg.latency_encoding_l0s); |
| 550 | link->latency_dw.l0s = calc_l0s_latency(dwreg.latency_encoding_l0s); |
| 551 | |
| 552 | /* Setup L1 state */ |
| 553 | if (upreg.support & dwreg.support & PCIE_LINK_STATE_L1) |
| 554 | link->aspm_support |= ASPM_STATE_L1; |
| 555 | if (upreg.enabled & dwreg.enabled & PCIE_LINK_STATE_L1) |
| 556 | link->aspm_enabled |= ASPM_STATE_L1; |
| 557 | link->latency_up.l1 = calc_l1_latency(upreg.latency_encoding_l1); |
| 558 | link->latency_dw.l1 = calc_l1_latency(dwreg.latency_encoding_l1); |
| 559 | |
| 560 | /* Setup L1 substate */ |
| 561 | if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1) |
| 562 | link->aspm_support |= ASPM_STATE_L1_1; |
| 563 | if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2) |
| 564 | link->aspm_support |= ASPM_STATE_L1_2; |
| 565 | if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1) |
| 566 | link->aspm_support |= ASPM_STATE_L1_1_PCIPM; |
| 567 | if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2) |
| 568 | link->aspm_support |= ASPM_STATE_L1_2_PCIPM; |
| 569 | |
| 570 | if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1) |
| 571 | link->aspm_enabled |= ASPM_STATE_L1_1; |
| 572 | if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2) |
| 573 | link->aspm_enabled |= ASPM_STATE_L1_2; |
| 574 | if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1) |
| 575 | link->aspm_enabled |= ASPM_STATE_L1_1_PCIPM; |
| 576 | if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2) |
| 577 | link->aspm_enabled |= ASPM_STATE_L1_2_PCIPM; |
| 578 | |
| 579 | if (link->aspm_support & ASPM_STATE_L1SS) |
| 580 | aspm_calc_l1ss_info(link, &upreg, &dwreg); |
| 581 | |
| 582 | /* Save default state */ |
| 583 | link->aspm_default = link->aspm_enabled; |
| 584 | |
| 585 | /* Setup initial capable state. Will be updated later */ |
| 586 | link->aspm_capable = link->aspm_support; |
| 587 | |
| 588 | /* Get and check endpoint acceptable latencies */ |
| 589 | list_for_each_entry(child, &linkbus->devices, bus_list) { |
| 590 | u32 reg32, encoding; |
| 591 | struct aspm_latency *acceptable = |
| 592 | &link->acceptable[PCI_FUNC(child->devfn)]; |
| 593 | |
| 594 | if (pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT && |
| 595 | pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END) |
| 596 | continue; |
| 597 | |
| 598 | pcie_capability_read_dword(child, PCI_EXP_DEVCAP, ®32); |
| 599 | /* Calculate endpoint L0s acceptable latency */ |
| 600 | encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6; |
| 601 | acceptable->l0s = calc_l0s_acceptable(encoding); |
| 602 | /* Calculate endpoint L1 acceptable latency */ |
| 603 | encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9; |
| 604 | acceptable->l1 = calc_l1_acceptable(encoding); |
| 605 | |
| 606 | pcie_aspm_check_latency(child); |
| 607 | } |
| 608 | } |
| 609 | |
| 610 | static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos, |
| 611 | u32 clear, u32 set) |
| 612 | { |
| 613 | u32 val; |
| 614 | |
| 615 | pci_read_config_dword(pdev, pos, &val); |
| 616 | val &= ~clear; |
| 617 | val |= set; |
| 618 | pci_write_config_dword(pdev, pos, val); |
| 619 | } |
| 620 | |
| 621 | /* Configure the ASPM L1 substates */ |
| 622 | static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) |
| 623 | { |
| 624 | u32 val, enable_req; |
| 625 | struct pci_dev *child = link->downstream, *parent = link->pdev; |
| 626 | u32 up_cap_ptr = link->l1ss.up_cap_ptr; |
| 627 | u32 dw_cap_ptr = link->l1ss.dw_cap_ptr; |
| 628 | |
| 629 | enable_req = (link->aspm_enabled ^ state) & state; |
| 630 | |
| 631 | /* |
| 632 | * Here are the rules specified in the PCIe spec for enabling L1SS: |
| 633 | * - When enabling L1.x, enable bit at parent first, then at child |
| 634 | * - When disabling L1.x, disable bit at child first, then at parent |
| 635 | * - When enabling ASPM L1.x, need to disable L1 |
| 636 | * (at child followed by parent). |
| 637 | * - The ASPM/PCIPM L1.2 must be disabled while programming timing |
| 638 | * parameters |
| 639 | * |
| 640 | * To keep it simple, disable all L1SS bits first, and later enable |
| 641 | * what is needed. |
| 642 | */ |
| 643 | |
| 644 | /* Disable all L1 substates */ |
| 645 | pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1, |
| 646 | PCI_L1SS_CTL1_L1SS_MASK, 0); |
| 647 | pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1, |
| 648 | PCI_L1SS_CTL1_L1SS_MASK, 0); |
| 649 | /* |
| 650 | * If needed, disable L1, and it gets enabled later |
| 651 | * in pcie_config_aspm_link(). |
| 652 | */ |
| 653 | if (enable_req & (ASPM_STATE_L1_1 | ASPM_STATE_L1_2)) { |
| 654 | pcie_capability_clear_and_set_word(child, PCI_EXP_LNKCTL, |
| 655 | PCI_EXP_LNKCTL_ASPM_L1, 0); |
| 656 | pcie_capability_clear_and_set_word(parent, PCI_EXP_LNKCTL, |
| 657 | PCI_EXP_LNKCTL_ASPM_L1, 0); |
| 658 | } |
| 659 | |
| 660 | if (enable_req & ASPM_STATE_L1_2_MASK) { |
| 661 | |
| 662 | /* Program T_pwr_on in both ports */ |
| 663 | pci_write_config_dword(parent, up_cap_ptr + PCI_L1SS_CTL2, |
| 664 | link->l1ss.ctl2); |
| 665 | pci_write_config_dword(child, dw_cap_ptr + PCI_L1SS_CTL2, |
| 666 | link->l1ss.ctl2); |
| 667 | |
| 668 | /* Program T_cmn_mode in parent */ |
| 669 | pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1, |
| 670 | 0xFF00, link->l1ss.ctl1); |
| 671 | |
| 672 | /* Program LTR L1.2 threshold in both ports */ |
| 673 | pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1, |
| 674 | 0xE3FF0000, link->l1ss.ctl1); |
| 675 | pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1, |
| 676 | 0xE3FF0000, link->l1ss.ctl1); |
| 677 | } |
| 678 | |
| 679 | val = 0; |
| 680 | if (state & ASPM_STATE_L1_1) |
| 681 | val |= PCI_L1SS_CTL1_ASPM_L1_1; |
| 682 | if (state & ASPM_STATE_L1_2) |
| 683 | val |= PCI_L1SS_CTL1_ASPM_L1_2; |
| 684 | if (state & ASPM_STATE_L1_1_PCIPM) |
| 685 | val |= PCI_L1SS_CTL1_PCIPM_L1_1; |
| 686 | if (state & ASPM_STATE_L1_2_PCIPM) |
| 687 | val |= PCI_L1SS_CTL1_PCIPM_L1_2; |
| 688 | |
| 689 | /* Enable what we need to enable */ |
| 690 | pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1, |
| 691 | PCI_L1SS_CTL1_L1SS_MASK, val); |
| 692 | pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1, |
| 693 | PCI_L1SS_CTL1_L1SS_MASK, val); |
| 694 | } |
| 695 | |
| 696 | static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val) |
| 697 | { |
| 698 | pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL, |
| 699 | PCI_EXP_LNKCTL_ASPMC, val); |
| 700 | } |
| 701 | |
| 702 | static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state) |
| 703 | { |
| 704 | u32 upstream = 0, dwstream = 0; |
| 705 | struct pci_dev *child = link->downstream, *parent = link->pdev; |
| 706 | struct pci_bus *linkbus = parent->subordinate; |
| 707 | |
| 708 | /* Enable only the states that were not explicitly disabled */ |
| 709 | state &= (link->aspm_capable & ~link->aspm_disable); |
| 710 | |
| 711 | /* Can't enable any substates if L1 is not enabled */ |
| 712 | if (!(state & ASPM_STATE_L1)) |
| 713 | state &= ~ASPM_STATE_L1SS; |
| 714 | |
| 715 | /* Spec says both ports must be in D0 before enabling PCI PM substates*/ |
| 716 | if (parent->current_state != PCI_D0 || child->current_state != PCI_D0) { |
| 717 | state &= ~ASPM_STATE_L1_SS_PCIPM; |
| 718 | state |= (link->aspm_enabled & ASPM_STATE_L1_SS_PCIPM); |
| 719 | } |
| 720 | |
| 721 | /* Nothing to do if the link is already in the requested state */ |
| 722 | if (link->aspm_enabled == state) |
| 723 | return; |
| 724 | /* Convert ASPM state to upstream/downstream ASPM register state */ |
| 725 | if (state & ASPM_STATE_L0S_UP) |
| 726 | dwstream |= PCI_EXP_LNKCTL_ASPM_L0S; |
| 727 | if (state & ASPM_STATE_L0S_DW) |
| 728 | upstream |= PCI_EXP_LNKCTL_ASPM_L0S; |
| 729 | if (state & ASPM_STATE_L1) { |
| 730 | upstream |= PCI_EXP_LNKCTL_ASPM_L1; |
| 731 | dwstream |= PCI_EXP_LNKCTL_ASPM_L1; |
| 732 | } |
| 733 | |
| 734 | if (link->aspm_capable & ASPM_STATE_L1SS) |
| 735 | pcie_config_aspm_l1ss(link, state); |
| 736 | |
| 737 | /* |
| 738 | * Spec 2.0 suggests all functions should be configured the |
| 739 | * same setting for ASPM. Enabling ASPM L1 should be done in |
| 740 | * upstream component first and then downstream, and vice |
| 741 | * versa for disabling ASPM L1. Spec doesn't mention L0S. |
| 742 | */ |
| 743 | if (state & ASPM_STATE_L1) |
| 744 | pcie_config_aspm_dev(parent, upstream); |
| 745 | list_for_each_entry(child, &linkbus->devices, bus_list) |
| 746 | pcie_config_aspm_dev(child, dwstream); |
| 747 | if (!(state & ASPM_STATE_L1)) |
| 748 | pcie_config_aspm_dev(parent, upstream); |
| 749 | |
| 750 | link->aspm_enabled = state; |
| 751 | } |
| 752 | |
| 753 | static void pcie_config_aspm_path(struct pcie_link_state *link) |
| 754 | { |
| 755 | while (link) { |
| 756 | pcie_config_aspm_link(link, policy_to_aspm_state(link)); |
| 757 | link = link->parent; |
| 758 | } |
| 759 | } |
| 760 | |
| 761 | static void free_link_state(struct pcie_link_state *link) |
| 762 | { |
| 763 | link->pdev->link_state = NULL; |
| 764 | kfree(link); |
| 765 | } |
| 766 | |
| 767 | static int pcie_aspm_sanity_check(struct pci_dev *pdev) |
| 768 | { |
| 769 | struct pci_dev *child; |
| 770 | u32 reg32; |
| 771 | |
| 772 | /* |
| 773 | * Some functions in a slot might not all be PCIe functions, |
| 774 | * very strange. Disable ASPM for the whole slot |
| 775 | */ |
| 776 | list_for_each_entry(child, &pdev->subordinate->devices, bus_list) { |
| 777 | if (!pci_is_pcie(child)) |
| 778 | return -EINVAL; |
| 779 | |
| 780 | /* |
| 781 | * If ASPM is disabled then we're not going to change |
| 782 | * the BIOS state. It's safe to continue even if it's a |
| 783 | * pre-1.1 device |
| 784 | */ |
| 785 | |
| 786 | if (aspm_disabled) |
| 787 | continue; |
| 788 | |
| 789 | /* |
| 790 | * Disable ASPM for pre-1.1 PCIe device, we follow MS to use |
| 791 | * RBER bit to determine if a function is 1.1 version device |
| 792 | */ |
| 793 | pcie_capability_read_dword(child, PCI_EXP_DEVCAP, ®32); |
| 794 | if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) { |
| 795 | dev_info(&child->dev, "disabling ASPM on pre-1.1 PCIe device. You can enable it with 'pcie_aspm=force'\n"); |
| 796 | return -EINVAL; |
| 797 | } |
| 798 | } |
| 799 | return 0; |
| 800 | } |
| 801 | |
| 802 | static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev) |
| 803 | { |
| 804 | struct pcie_link_state *link; |
| 805 | |
| 806 | link = kzalloc(sizeof(*link), GFP_KERNEL); |
| 807 | if (!link) |
| 808 | return NULL; |
| 809 | |
| 810 | INIT_LIST_HEAD(&link->sibling); |
| 811 | INIT_LIST_HEAD(&link->children); |
| 812 | INIT_LIST_HEAD(&link->link); |
| 813 | link->pdev = pdev; |
| 814 | link->downstream = pci_function_0(pdev->subordinate); |
| 815 | |
| 816 | /* |
| 817 | * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe |
| 818 | * hierarchies. Note that some PCIe host implementations omit |
| 819 | * the root ports entirely, in which case a downstream port on |
| 820 | * a switch may become the root of the link state chain for all |
| 821 | * its subordinate endpoints. |
| 822 | */ |
| 823 | if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT || |
| 824 | pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE || |
| 825 | !pdev->bus->parent->self) { |
| 826 | link->root = link; |
| 827 | } else { |
| 828 | struct pcie_link_state *parent; |
| 829 | |
| 830 | parent = pdev->bus->parent->self->link_state; |
| 831 | if (!parent) { |
| 832 | kfree(link); |
| 833 | return NULL; |
| 834 | } |
| 835 | |
| 836 | link->parent = parent; |
| 837 | link->root = link->parent->root; |
| 838 | list_add(&link->link, &parent->children); |
| 839 | } |
| 840 | |
| 841 | list_add(&link->sibling, &link_list); |
| 842 | pdev->link_state = link; |
| 843 | return link; |
| 844 | } |
| 845 | |
| 846 | /* |
| 847 | * pcie_aspm_init_link_state: Initiate PCI express link state. |
| 848 | * It is called after the pcie and its children devices are scanned. |
| 849 | * @pdev: the root port or switch downstream port |
| 850 | */ |
| 851 | void pcie_aspm_init_link_state(struct pci_dev *pdev) |
| 852 | { |
| 853 | struct pcie_link_state *link; |
| 854 | int blacklist = !!pcie_aspm_sanity_check(pdev); |
| 855 | |
| 856 | if (!aspm_support_enabled) |
| 857 | return; |
| 858 | |
| 859 | if (pdev->link_state) |
| 860 | return; |
| 861 | |
| 862 | /* |
| 863 | * We allocate pcie_link_state for the component on the upstream |
| 864 | * end of a Link, so there's nothing to do unless this device has a |
| 865 | * Link on its secondary side. |
| 866 | */ |
| 867 | if (!pdev->has_secondary_link) |
| 868 | return; |
| 869 | |
| 870 | /* VIA has a strange chipset, root port is under a bridge */ |
| 871 | if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT && |
| 872 | pdev->bus->self) |
| 873 | return; |
| 874 | |
| 875 | down_read(&pci_bus_sem); |
| 876 | if (list_empty(&pdev->subordinate->devices)) |
| 877 | goto out; |
| 878 | |
| 879 | mutex_lock(&aspm_lock); |
| 880 | link = alloc_pcie_link_state(pdev); |
| 881 | if (!link) |
| 882 | goto unlock; |
| 883 | /* |
| 884 | * Setup initial ASPM state. Note that we need to configure |
| 885 | * upstream links also because capable state of them can be |
| 886 | * update through pcie_aspm_cap_init(). |
| 887 | */ |
| 888 | pcie_aspm_cap_init(link, blacklist); |
| 889 | |
| 890 | /* Setup initial Clock PM state */ |
| 891 | pcie_clkpm_cap_init(link, blacklist); |
| 892 | |
| 893 | /* |
| 894 | * At this stage drivers haven't had an opportunity to change the |
| 895 | * link policy setting. Enabling ASPM on broken hardware can cripple |
| 896 | * it even before the driver has had a chance to disable ASPM, so |
| 897 | * default to a safe level right now. If we're enabling ASPM beyond |
| 898 | * the BIOS's expectation, we'll do so once pci_enable_device() is |
| 899 | * called. |
| 900 | */ |
| 901 | if (aspm_policy != POLICY_POWERSAVE && |
| 902 | aspm_policy != POLICY_POWER_SUPERSAVE) { |
| 903 | pcie_config_aspm_path(link); |
| 904 | pcie_set_clkpm(link, policy_to_clkpm_state(link)); |
| 905 | } |
| 906 | |
| 907 | unlock: |
| 908 | mutex_unlock(&aspm_lock); |
| 909 | out: |
| 910 | up_read(&pci_bus_sem); |
| 911 | } |
| 912 | |
| 913 | /* Recheck latencies and update aspm_capable for links under the root */ |
| 914 | static void pcie_update_aspm_capable(struct pcie_link_state *root) |
| 915 | { |
| 916 | struct pcie_link_state *link; |
| 917 | BUG_ON(root->parent); |
| 918 | list_for_each_entry(link, &link_list, sibling) { |
| 919 | if (link->root != root) |
| 920 | continue; |
| 921 | link->aspm_capable = link->aspm_support; |
| 922 | } |
| 923 | list_for_each_entry(link, &link_list, sibling) { |
| 924 | struct pci_dev *child; |
| 925 | struct pci_bus *linkbus = link->pdev->subordinate; |
| 926 | if (link->root != root) |
| 927 | continue; |
| 928 | list_for_each_entry(child, &linkbus->devices, bus_list) { |
| 929 | if ((pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT) && |
| 930 | (pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END)) |
| 931 | continue; |
| 932 | pcie_aspm_check_latency(child); |
| 933 | } |
| 934 | } |
| 935 | } |
| 936 | |
| 937 | /* @pdev: the endpoint device */ |
| 938 | void pcie_aspm_exit_link_state(struct pci_dev *pdev) |
| 939 | { |
| 940 | struct pci_dev *parent = pdev->bus->self; |
| 941 | struct pcie_link_state *link, *root, *parent_link; |
| 942 | |
| 943 | if (!parent || !parent->link_state) |
| 944 | return; |
| 945 | |
| 946 | down_read(&pci_bus_sem); |
| 947 | mutex_lock(&aspm_lock); |
| 948 | /* |
| 949 | * All PCIe functions are in one slot, remove one function will remove |
| 950 | * the whole slot, so just wait until we are the last function left. |
| 951 | */ |
| 952 | if (!list_empty(&parent->subordinate->devices)) |
| 953 | goto out; |
| 954 | |
| 955 | link = parent->link_state; |
| 956 | root = link->root; |
| 957 | parent_link = link->parent; |
| 958 | |
| 959 | /* All functions are removed, so just disable ASPM for the link */ |
| 960 | pcie_config_aspm_link(link, 0); |
| 961 | list_del(&link->sibling); |
| 962 | list_del(&link->link); |
| 963 | /* Clock PM is for endpoint device */ |
| 964 | free_link_state(link); |
| 965 | |
| 966 | /* Recheck latencies and configure upstream links */ |
| 967 | if (parent_link) { |
| 968 | pcie_update_aspm_capable(root); |
| 969 | pcie_config_aspm_path(parent_link); |
| 970 | } |
| 971 | out: |
| 972 | mutex_unlock(&aspm_lock); |
| 973 | up_read(&pci_bus_sem); |
| 974 | } |
| 975 | |
| 976 | /* @pdev: the root port or switch downstream port */ |
| 977 | void pcie_aspm_pm_state_change(struct pci_dev *pdev) |
| 978 | { |
| 979 | struct pcie_link_state *link = pdev->link_state; |
| 980 | |
| 981 | if (aspm_disabled || !link) |
| 982 | return; |
| 983 | /* |
| 984 | * Devices changed PM state, we should recheck if latency |
| 985 | * meets all functions' requirement |
| 986 | */ |
| 987 | down_read(&pci_bus_sem); |
| 988 | mutex_lock(&aspm_lock); |
| 989 | pcie_update_aspm_capable(link->root); |
| 990 | pcie_config_aspm_path(link); |
| 991 | mutex_unlock(&aspm_lock); |
| 992 | up_read(&pci_bus_sem); |
| 993 | } |
| 994 | |
| 995 | void pcie_aspm_powersave_config_link(struct pci_dev *pdev) |
| 996 | { |
| 997 | struct pcie_link_state *link = pdev->link_state; |
| 998 | |
| 999 | if (aspm_disabled || !link) |
| 1000 | return; |
| 1001 | |
| 1002 | if (aspm_policy != POLICY_POWERSAVE && |
| 1003 | aspm_policy != POLICY_POWER_SUPERSAVE) |
| 1004 | return; |
| 1005 | |
| 1006 | down_read(&pci_bus_sem); |
| 1007 | mutex_lock(&aspm_lock); |
| 1008 | pcie_config_aspm_path(link); |
| 1009 | pcie_set_clkpm(link, policy_to_clkpm_state(link)); |
| 1010 | mutex_unlock(&aspm_lock); |
| 1011 | up_read(&pci_bus_sem); |
| 1012 | } |
| 1013 | |
| 1014 | static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem) |
| 1015 | { |
| 1016 | struct pci_dev *parent = pdev->bus->self; |
| 1017 | struct pcie_link_state *link; |
| 1018 | |
| 1019 | if (!pci_is_pcie(pdev)) |
| 1020 | return; |
| 1021 | |
| 1022 | if (pdev->has_secondary_link) |
| 1023 | parent = pdev; |
| 1024 | if (!parent || !parent->link_state) |
| 1025 | return; |
| 1026 | |
| 1027 | /* |
| 1028 | * A driver requested that ASPM be disabled on this device, but |
| 1029 | * if we don't have permission to manage ASPM (e.g., on ACPI |
| 1030 | * systems we have to observe the FADT ACPI_FADT_NO_ASPM bit and |
| 1031 | * the _OSC method), we can't honor that request. Windows has |
| 1032 | * a similar mechanism using "PciASPMOptOut", which is also |
| 1033 | * ignored in this situation. |
| 1034 | */ |
| 1035 | if (aspm_disabled) { |
| 1036 | dev_warn(&pdev->dev, "can't disable ASPM; OS doesn't have ASPM control\n"); |
| 1037 | return; |
| 1038 | } |
| 1039 | |
| 1040 | if (sem) |
| 1041 | down_read(&pci_bus_sem); |
| 1042 | mutex_lock(&aspm_lock); |
| 1043 | link = parent->link_state; |
| 1044 | if (state & PCIE_LINK_STATE_L0S) |
| 1045 | link->aspm_disable |= ASPM_STATE_L0S; |
| 1046 | if (state & PCIE_LINK_STATE_L1) |
| 1047 | link->aspm_disable |= ASPM_STATE_L1; |
| 1048 | pcie_config_aspm_link(link, policy_to_aspm_state(link)); |
| 1049 | |
| 1050 | if (state & PCIE_LINK_STATE_CLKPM) |
| 1051 | link->clkpm_disable = 1; |
| 1052 | pcie_set_clkpm(link, policy_to_clkpm_state(link)); |
| 1053 | mutex_unlock(&aspm_lock); |
| 1054 | if (sem) |
| 1055 | up_read(&pci_bus_sem); |
| 1056 | } |
| 1057 | |
| 1058 | void pci_disable_link_state_locked(struct pci_dev *pdev, int state) |
| 1059 | { |
| 1060 | __pci_disable_link_state(pdev, state, false); |
| 1061 | } |
| 1062 | EXPORT_SYMBOL(pci_disable_link_state_locked); |
| 1063 | |
| 1064 | /** |
| 1065 | * pci_disable_link_state - Disable device's link state, so the link will |
| 1066 | * never enter specific states. Note that if the BIOS didn't grant ASPM |
| 1067 | * control to the OS, this does nothing because we can't touch the LNKCTL |
| 1068 | * register. |
| 1069 | * |
| 1070 | * @pdev: PCI device |
| 1071 | * @state: ASPM link state to disable |
| 1072 | */ |
| 1073 | void pci_disable_link_state(struct pci_dev *pdev, int state) |
| 1074 | { |
| 1075 | __pci_disable_link_state(pdev, state, true); |
| 1076 | } |
| 1077 | EXPORT_SYMBOL(pci_disable_link_state); |
| 1078 | |
| 1079 | static int pcie_aspm_set_policy(const char *val, |
| 1080 | const struct kernel_param *kp) |
| 1081 | { |
| 1082 | int i; |
| 1083 | struct pcie_link_state *link; |
| 1084 | |
| 1085 | if (aspm_disabled) |
| 1086 | return -EPERM; |
| 1087 | for (i = 0; i < ARRAY_SIZE(policy_str); i++) |
| 1088 | if (!strncmp(val, policy_str[i], strlen(policy_str[i]))) |
| 1089 | break; |
| 1090 | if (i >= ARRAY_SIZE(policy_str)) |
| 1091 | return -EINVAL; |
| 1092 | if (i == aspm_policy) |
| 1093 | return 0; |
| 1094 | |
| 1095 | down_read(&pci_bus_sem); |
| 1096 | mutex_lock(&aspm_lock); |
| 1097 | aspm_policy = i; |
| 1098 | list_for_each_entry(link, &link_list, sibling) { |
| 1099 | pcie_config_aspm_link(link, policy_to_aspm_state(link)); |
| 1100 | pcie_set_clkpm(link, policy_to_clkpm_state(link)); |
| 1101 | } |
| 1102 | mutex_unlock(&aspm_lock); |
| 1103 | up_read(&pci_bus_sem); |
| 1104 | return 0; |
| 1105 | } |
| 1106 | |
| 1107 | static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp) |
| 1108 | { |
| 1109 | int i, cnt = 0; |
| 1110 | for (i = 0; i < ARRAY_SIZE(policy_str); i++) |
| 1111 | if (i == aspm_policy) |
| 1112 | cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]); |
| 1113 | else |
| 1114 | cnt += sprintf(buffer + cnt, "%s ", policy_str[i]); |
| 1115 | cnt += sprintf(buffer + cnt, "\n"); |
| 1116 | return cnt; |
| 1117 | } |
| 1118 | |
| 1119 | module_param_call(policy, pcie_aspm_set_policy, pcie_aspm_get_policy, |
| 1120 | NULL, 0644); |
| 1121 | |
| 1122 | #ifdef CONFIG_PCIEASPM_DEBUG |
| 1123 | static ssize_t link_state_show(struct device *dev, |
| 1124 | struct device_attribute *attr, |
| 1125 | char *buf) |
| 1126 | { |
| 1127 | struct pci_dev *pci_device = to_pci_dev(dev); |
| 1128 | struct pcie_link_state *link_state = pci_device->link_state; |
| 1129 | |
| 1130 | return sprintf(buf, "%d\n", link_state->aspm_enabled); |
| 1131 | } |
| 1132 | |
| 1133 | static ssize_t link_state_store(struct device *dev, |
| 1134 | struct device_attribute *attr, |
| 1135 | const char *buf, |
| 1136 | size_t n) |
| 1137 | { |
| 1138 | struct pci_dev *pdev = to_pci_dev(dev); |
| 1139 | struct pcie_link_state *link, *root = pdev->link_state->root; |
| 1140 | u32 state; |
| 1141 | |
| 1142 | if (aspm_disabled) |
| 1143 | return -EPERM; |
| 1144 | |
| 1145 | if (kstrtouint(buf, 10, &state)) |
| 1146 | return -EINVAL; |
| 1147 | if ((state & ~ASPM_STATE_ALL) != 0) |
| 1148 | return -EINVAL; |
| 1149 | |
| 1150 | down_read(&pci_bus_sem); |
| 1151 | mutex_lock(&aspm_lock); |
| 1152 | list_for_each_entry(link, &link_list, sibling) { |
| 1153 | if (link->root != root) |
| 1154 | continue; |
| 1155 | pcie_config_aspm_link(link, state); |
| 1156 | } |
| 1157 | mutex_unlock(&aspm_lock); |
| 1158 | up_read(&pci_bus_sem); |
| 1159 | return n; |
| 1160 | } |
| 1161 | |
| 1162 | static ssize_t clk_ctl_show(struct device *dev, |
| 1163 | struct device_attribute *attr, |
| 1164 | char *buf) |
| 1165 | { |
| 1166 | struct pci_dev *pci_device = to_pci_dev(dev); |
| 1167 | struct pcie_link_state *link_state = pci_device->link_state; |
| 1168 | |
| 1169 | return sprintf(buf, "%d\n", link_state->clkpm_enabled); |
| 1170 | } |
| 1171 | |
| 1172 | static ssize_t clk_ctl_store(struct device *dev, |
| 1173 | struct device_attribute *attr, |
| 1174 | const char *buf, |
| 1175 | size_t n) |
| 1176 | { |
| 1177 | struct pci_dev *pdev = to_pci_dev(dev); |
| 1178 | bool state; |
| 1179 | |
| 1180 | if (strtobool(buf, &state)) |
| 1181 | return -EINVAL; |
| 1182 | |
| 1183 | down_read(&pci_bus_sem); |
| 1184 | mutex_lock(&aspm_lock); |
| 1185 | pcie_set_clkpm_nocheck(pdev->link_state, state); |
| 1186 | mutex_unlock(&aspm_lock); |
| 1187 | up_read(&pci_bus_sem); |
| 1188 | |
| 1189 | return n; |
| 1190 | } |
| 1191 | |
| 1192 | static DEVICE_ATTR_RW(link_state); |
| 1193 | static DEVICE_ATTR_RW(clk_ctl); |
| 1194 | |
| 1195 | static char power_group[] = "power"; |
| 1196 | void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev) |
| 1197 | { |
| 1198 | struct pcie_link_state *link_state = pdev->link_state; |
| 1199 | |
| 1200 | if (!link_state) |
| 1201 | return; |
| 1202 | |
| 1203 | if (link_state->aspm_support) |
| 1204 | sysfs_add_file_to_group(&pdev->dev.kobj, |
| 1205 | &dev_attr_link_state.attr, power_group); |
| 1206 | if (link_state->clkpm_capable) |
| 1207 | sysfs_add_file_to_group(&pdev->dev.kobj, |
| 1208 | &dev_attr_clk_ctl.attr, power_group); |
| 1209 | } |
| 1210 | |
| 1211 | void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev) |
| 1212 | { |
| 1213 | struct pcie_link_state *link_state = pdev->link_state; |
| 1214 | |
| 1215 | if (!link_state) |
| 1216 | return; |
| 1217 | |
| 1218 | if (link_state->aspm_support) |
| 1219 | sysfs_remove_file_from_group(&pdev->dev.kobj, |
| 1220 | &dev_attr_link_state.attr, power_group); |
| 1221 | if (link_state->clkpm_capable) |
| 1222 | sysfs_remove_file_from_group(&pdev->dev.kobj, |
| 1223 | &dev_attr_clk_ctl.attr, power_group); |
| 1224 | } |
| 1225 | #endif |
| 1226 | |
| 1227 | static int __init pcie_aspm_disable(char *str) |
| 1228 | { |
| 1229 | if (!strcmp(str, "off")) { |
| 1230 | aspm_policy = POLICY_DEFAULT; |
| 1231 | aspm_disabled = 1; |
| 1232 | aspm_support_enabled = false; |
| 1233 | printk(KERN_INFO "PCIe ASPM is disabled\n"); |
| 1234 | } else if (!strcmp(str, "force")) { |
| 1235 | aspm_force = 1; |
| 1236 | printk(KERN_INFO "PCIe ASPM is forcibly enabled\n"); |
| 1237 | } |
| 1238 | return 1; |
| 1239 | } |
| 1240 | |
| 1241 | __setup("pcie_aspm=", pcie_aspm_disable); |
| 1242 | |
| 1243 | void pcie_no_aspm(void) |
| 1244 | { |
| 1245 | /* |
| 1246 | * Disabling ASPM is intended to prevent the kernel from modifying |
| 1247 | * existing hardware state, not to clear existing state. To that end: |
| 1248 | * (a) set policy to POLICY_DEFAULT in order to avoid changing state |
| 1249 | * (b) prevent userspace from changing policy |
| 1250 | */ |
| 1251 | if (!aspm_force) { |
| 1252 | aspm_policy = POLICY_DEFAULT; |
| 1253 | aspm_disabled = 1; |
| 1254 | } |
| 1255 | } |
| 1256 | |
| 1257 | bool pcie_aspm_support_enabled(void) |
| 1258 | { |
| 1259 | return aspm_support_enabled; |
| 1260 | } |
| 1261 | EXPORT_SYMBOL(pcie_aspm_support_enabled); |