| xj | b04a402 | 2021-11-25 15:01:52 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 | 
|  | 2 | /* | 
|  | 3 | * Implement the AER root port service driver. The driver registers an IRQ | 
|  | 4 | * handler. When a root port triggers an AER interrupt, the IRQ handler | 
|  | 5 | * collects root port status and schedules work. | 
|  | 6 | * | 
|  | 7 | * Copyright (C) 2006 Intel Corp. | 
|  | 8 | *	Tom Long Nguyen (tom.l.nguyen@intel.com) | 
|  | 9 | *	Zhang Yanmin (yanmin.zhang@intel.com) | 
|  | 10 | * | 
|  | 11 | * (C) Copyright 2009 Hewlett-Packard Development Company, L.P. | 
|  | 12 | *    Andrew Patterson <andrew.patterson@hp.com> | 
|  | 13 | */ | 
|  | 14 |  | 
|  | 15 | #include <linux/cper.h> | 
|  | 16 | #include <linux/pci.h> | 
|  | 17 | #include <linux/pci-acpi.h> | 
|  | 18 | #include <linux/sched.h> | 
|  | 19 | #include <linux/kernel.h> | 
|  | 20 | #include <linux/errno.h> | 
|  | 21 | #include <linux/pm.h> | 
|  | 22 | #include <linux/init.h> | 
|  | 23 | #include <linux/interrupt.h> | 
|  | 24 | #include <linux/delay.h> | 
|  | 25 | #include <linux/kfifo.h> | 
|  | 26 | #include <linux/slab.h> | 
|  | 27 | #include <acpi/apei.h> | 
|  | 28 | #include <ras/ras_event.h> | 
|  | 29 |  | 
|  | 30 | #include "../pci.h" | 
|  | 31 | #include "portdrv.h" | 
|  | 32 |  | 
|  | 33 | #define AER_ERROR_SOURCES_MAX		100 | 
|  | 34 |  | 
|  | 35 | #define AER_MAX_TYPEOF_COR_ERRS		16	/* as per PCI_ERR_COR_STATUS */ | 
|  | 36 | #define AER_MAX_TYPEOF_UNCOR_ERRS	26	/* as per PCI_ERR_UNCOR_STATUS*/ | 
|  | 37 |  | 
|  | 38 | struct aer_err_source { | 
|  | 39 | unsigned int status; | 
|  | 40 | unsigned int id; | 
|  | 41 | }; | 
|  | 42 |  | 
|  | 43 | struct aer_rpc { | 
|  | 44 | struct pci_dev *rpd;		/* Root Port device */ | 
|  | 45 | struct work_struct dpc_handler; | 
|  | 46 | struct aer_err_source e_sources[AER_ERROR_SOURCES_MAX]; | 
|  | 47 | struct aer_err_info e_info; | 
|  | 48 | unsigned short prod_idx;	/* Error Producer Index */ | 
|  | 49 | unsigned short cons_idx;	/* Error Consumer Index */ | 
|  | 50 | int isr; | 
|  | 51 | spinlock_t e_lock;		/* | 
|  | 52 | * Lock access to Error Status/ID Regs | 
|  | 53 | * and error producer/consumer index | 
|  | 54 | */ | 
|  | 55 | struct mutex rpc_mutex;		/* | 
|  | 56 | * only one thread could do | 
|  | 57 | * recovery on the same | 
|  | 58 | * root port hierarchy | 
|  | 59 | */ | 
|  | 60 | }; | 
|  | 61 |  | 
|  | 62 | /* AER stats for the device */ | 
|  | 63 | struct aer_stats { | 
|  | 64 |  | 
|  | 65 | /* | 
|  | 66 | * Fields for all AER capable devices. They indicate the errors | 
|  | 67 | * "as seen by this device". Note that this may mean that if an | 
|  | 68 | * end point is causing problems, the AER counters may increment | 
|  | 69 | * at its link partner (e.g. root port) because the errors will be | 
|  | 70 | * "seen" by the link partner and not the the problematic end point | 
|  | 71 | * itself (which may report all counters as 0 as it never saw any | 
|  | 72 | * problems). | 
|  | 73 | */ | 
|  | 74 | /* Counters for different type of correctable errors */ | 
|  | 75 | u64 dev_cor_errs[AER_MAX_TYPEOF_COR_ERRS]; | 
|  | 76 | /* Counters for different type of fatal uncorrectable errors */ | 
|  | 77 | u64 dev_fatal_errs[AER_MAX_TYPEOF_UNCOR_ERRS]; | 
|  | 78 | /* Counters for different type of nonfatal uncorrectable errors */ | 
|  | 79 | u64 dev_nonfatal_errs[AER_MAX_TYPEOF_UNCOR_ERRS]; | 
|  | 80 | /* Total number of ERR_COR sent by this device */ | 
|  | 81 | u64 dev_total_cor_errs; | 
|  | 82 | /* Total number of ERR_FATAL sent by this device */ | 
|  | 83 | u64 dev_total_fatal_errs; | 
|  | 84 | /* Total number of ERR_NONFATAL sent by this device */ | 
|  | 85 | u64 dev_total_nonfatal_errs; | 
|  | 86 |  | 
|  | 87 | /* | 
|  | 88 | * Fields for Root ports & root complex event collectors only, these | 
|  | 89 | * indicate the total number of ERR_COR, ERR_FATAL, and ERR_NONFATAL | 
|  | 90 | * messages received by the root port / event collector, INCLUDING the | 
|  | 91 | * ones that are generated internally (by the rootport itself) | 
|  | 92 | */ | 
|  | 93 | u64 rootport_total_cor_errs; | 
|  | 94 | u64 rootport_total_fatal_errs; | 
|  | 95 | u64 rootport_total_nonfatal_errs; | 
|  | 96 | }; | 
|  | 97 |  | 
|  | 98 | #define AER_LOG_TLP_MASKS		(PCI_ERR_UNC_POISON_TLP|	\ | 
|  | 99 | PCI_ERR_UNC_ECRC|		\ | 
|  | 100 | PCI_ERR_UNC_UNSUP|		\ | 
|  | 101 | PCI_ERR_UNC_COMP_ABORT|		\ | 
|  | 102 | PCI_ERR_UNC_UNX_COMP|		\ | 
|  | 103 | PCI_ERR_UNC_MALF_TLP) | 
|  | 104 |  | 
|  | 105 | #define SYSTEM_ERROR_INTR_ON_MESG_MASK	(PCI_EXP_RTCTL_SECEE|	\ | 
|  | 106 | PCI_EXP_RTCTL_SENFEE|	\ | 
|  | 107 | PCI_EXP_RTCTL_SEFEE) | 
|  | 108 | #define ROOT_PORT_INTR_ON_MESG_MASK	(PCI_ERR_ROOT_CMD_COR_EN|	\ | 
|  | 109 | PCI_ERR_ROOT_CMD_NONFATAL_EN|	\ | 
|  | 110 | PCI_ERR_ROOT_CMD_FATAL_EN) | 
|  | 111 | #define ERR_COR_ID(d)			(d & 0xffff) | 
|  | 112 | #define ERR_UNCOR_ID(d)			(d >> 16) | 
|  | 113 |  | 
|  | 114 | static int pcie_aer_disable; | 
|  | 115 |  | 
|  | 116 | void pci_no_aer(void) | 
|  | 117 | { | 
|  | 118 | pcie_aer_disable = 1; | 
|  | 119 | } | 
|  | 120 |  | 
|  | 121 | bool pci_aer_available(void) | 
|  | 122 | { | 
|  | 123 | return !pcie_aer_disable && pci_msi_enabled(); | 
|  | 124 | } | 
|  | 125 |  | 
|  | 126 | #ifdef CONFIG_PCIE_ECRC | 
|  | 127 |  | 
|  | 128 | #define ECRC_POLICY_DEFAULT 0		/* ECRC set by BIOS */ | 
|  | 129 | #define ECRC_POLICY_OFF     1		/* ECRC off for performance */ | 
|  | 130 | #define ECRC_POLICY_ON      2		/* ECRC on for data integrity */ | 
|  | 131 |  | 
|  | 132 | static int ecrc_policy = ECRC_POLICY_DEFAULT; | 
|  | 133 |  | 
|  | 134 | static const char *ecrc_policy_str[] = { | 
|  | 135 | [ECRC_POLICY_DEFAULT] = "bios", | 
|  | 136 | [ECRC_POLICY_OFF] = "off", | 
|  | 137 | [ECRC_POLICY_ON] = "on" | 
|  | 138 | }; | 
|  | 139 |  | 
|  | 140 | /** | 
|  | 141 | * enable_ercr_checking - enable PCIe ECRC checking for a device | 
|  | 142 | * @dev: the PCI device | 
|  | 143 | * | 
|  | 144 | * Returns 0 on success, or negative on failure. | 
|  | 145 | */ | 
|  | 146 | static int enable_ecrc_checking(struct pci_dev *dev) | 
|  | 147 | { | 
|  | 148 | int pos; | 
|  | 149 | u32 reg32; | 
|  | 150 |  | 
|  | 151 | if (!pci_is_pcie(dev)) | 
|  | 152 | return -ENODEV; | 
|  | 153 |  | 
|  | 154 | pos = dev->aer_cap; | 
|  | 155 | if (!pos) | 
|  | 156 | return -ENODEV; | 
|  | 157 |  | 
|  | 158 | pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32); | 
|  | 159 | if (reg32 & PCI_ERR_CAP_ECRC_GENC) | 
|  | 160 | reg32 |= PCI_ERR_CAP_ECRC_GENE; | 
|  | 161 | if (reg32 & PCI_ERR_CAP_ECRC_CHKC) | 
|  | 162 | reg32 |= PCI_ERR_CAP_ECRC_CHKE; | 
|  | 163 | pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); | 
|  | 164 |  | 
|  | 165 | return 0; | 
|  | 166 | } | 
|  | 167 |  | 
|  | 168 | /** | 
|  | 169 | * disable_ercr_checking - disables PCIe ECRC checking for a device | 
|  | 170 | * @dev: the PCI device | 
|  | 171 | * | 
|  | 172 | * Returns 0 on success, or negative on failure. | 
|  | 173 | */ | 
|  | 174 | static int disable_ecrc_checking(struct pci_dev *dev) | 
|  | 175 | { | 
|  | 176 | int pos; | 
|  | 177 | u32 reg32; | 
|  | 178 |  | 
|  | 179 | if (!pci_is_pcie(dev)) | 
|  | 180 | return -ENODEV; | 
|  | 181 |  | 
|  | 182 | pos = dev->aer_cap; | 
|  | 183 | if (!pos) | 
|  | 184 | return -ENODEV; | 
|  | 185 |  | 
|  | 186 | pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32); | 
|  | 187 | reg32 &= ~(PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE); | 
|  | 188 | pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); | 
|  | 189 |  | 
|  | 190 | return 0; | 
|  | 191 | } | 
|  | 192 |  | 
|  | 193 | /** | 
|  | 194 | * pcie_set_ecrc_checking - set/unset PCIe ECRC checking for a device based on global policy | 
|  | 195 | * @dev: the PCI device | 
|  | 196 | */ | 
|  | 197 | void pcie_set_ecrc_checking(struct pci_dev *dev) | 
|  | 198 | { | 
|  | 199 | switch (ecrc_policy) { | 
|  | 200 | case ECRC_POLICY_DEFAULT: | 
|  | 201 | return; | 
|  | 202 | case ECRC_POLICY_OFF: | 
|  | 203 | disable_ecrc_checking(dev); | 
|  | 204 | break; | 
|  | 205 | case ECRC_POLICY_ON: | 
|  | 206 | enable_ecrc_checking(dev); | 
|  | 207 | break; | 
|  | 208 | default: | 
|  | 209 | return; | 
|  | 210 | } | 
|  | 211 | } | 
|  | 212 |  | 
|  | 213 | /** | 
|  | 214 | * pcie_ecrc_get_policy - parse kernel command-line ecrc option | 
|  | 215 | */ | 
|  | 216 | void pcie_ecrc_get_policy(char *str) | 
|  | 217 | { | 
|  | 218 | int i; | 
|  | 219 |  | 
|  | 220 | for (i = 0; i < ARRAY_SIZE(ecrc_policy_str); i++) | 
|  | 221 | if (!strncmp(str, ecrc_policy_str[i], | 
|  | 222 | strlen(ecrc_policy_str[i]))) | 
|  | 223 | break; | 
|  | 224 | if (i >= ARRAY_SIZE(ecrc_policy_str)) | 
|  | 225 | return; | 
|  | 226 |  | 
|  | 227 | ecrc_policy = i; | 
|  | 228 | } | 
|  | 229 | #endif	/* CONFIG_PCIE_ECRC */ | 
|  | 230 |  | 
|  | 231 | #ifdef CONFIG_ACPI_APEI | 
|  | 232 | static inline int hest_match_pci(struct acpi_hest_aer_common *p, | 
|  | 233 | struct pci_dev *pci) | 
|  | 234 | { | 
|  | 235 | return   ACPI_HEST_SEGMENT(p->bus) == pci_domain_nr(pci->bus) && | 
|  | 236 | ACPI_HEST_BUS(p->bus)     == pci->bus->number && | 
|  | 237 | p->device                 == PCI_SLOT(pci->devfn) && | 
|  | 238 | p->function               == PCI_FUNC(pci->devfn); | 
|  | 239 | } | 
|  | 240 |  | 
|  | 241 | static inline bool hest_match_type(struct acpi_hest_header *hest_hdr, | 
|  | 242 | struct pci_dev *dev) | 
|  | 243 | { | 
|  | 244 | u16 hest_type = hest_hdr->type; | 
|  | 245 | u8 pcie_type = pci_pcie_type(dev); | 
|  | 246 |  | 
|  | 247 | if ((hest_type == ACPI_HEST_TYPE_AER_ROOT_PORT && | 
|  | 248 | pcie_type == PCI_EXP_TYPE_ROOT_PORT) || | 
|  | 249 | (hest_type == ACPI_HEST_TYPE_AER_ENDPOINT && | 
|  | 250 | pcie_type == PCI_EXP_TYPE_ENDPOINT) || | 
|  | 251 | (hest_type == ACPI_HEST_TYPE_AER_BRIDGE && | 
|  | 252 | (dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)) | 
|  | 253 | return true; | 
|  | 254 | return false; | 
|  | 255 | } | 
|  | 256 |  | 
|  | 257 | struct aer_hest_parse_info { | 
|  | 258 | struct pci_dev *pci_dev; | 
|  | 259 | int firmware_first; | 
|  | 260 | }; | 
|  | 261 |  | 
|  | 262 | static int hest_source_is_pcie_aer(struct acpi_hest_header *hest_hdr) | 
|  | 263 | { | 
|  | 264 | if (hest_hdr->type == ACPI_HEST_TYPE_AER_ROOT_PORT || | 
|  | 265 | hest_hdr->type == ACPI_HEST_TYPE_AER_ENDPOINT || | 
|  | 266 | hest_hdr->type == ACPI_HEST_TYPE_AER_BRIDGE) | 
|  | 267 | return 1; | 
|  | 268 | return 0; | 
|  | 269 | } | 
|  | 270 |  | 
|  | 271 | static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data) | 
|  | 272 | { | 
|  | 273 | struct aer_hest_parse_info *info = data; | 
|  | 274 | struct acpi_hest_aer_common *p; | 
|  | 275 | int ff; | 
|  | 276 |  | 
|  | 277 | if (!hest_source_is_pcie_aer(hest_hdr)) | 
|  | 278 | return 0; | 
|  | 279 |  | 
|  | 280 | p = (struct acpi_hest_aer_common *)(hest_hdr + 1); | 
|  | 281 | ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); | 
|  | 282 |  | 
|  | 283 | /* | 
|  | 284 | * If no specific device is supplied, determine whether | 
|  | 285 | * FIRMWARE_FIRST is set for *any* PCIe device. | 
|  | 286 | */ | 
|  | 287 | if (!info->pci_dev) { | 
|  | 288 | info->firmware_first |= ff; | 
|  | 289 | return 0; | 
|  | 290 | } | 
|  | 291 |  | 
|  | 292 | /* Otherwise, check the specific device */ | 
|  | 293 | if (p->flags & ACPI_HEST_GLOBAL) { | 
|  | 294 | if (hest_match_type(hest_hdr, info->pci_dev)) | 
|  | 295 | info->firmware_first = ff; | 
|  | 296 | } else | 
|  | 297 | if (hest_match_pci(p, info->pci_dev)) | 
|  | 298 | info->firmware_first = ff; | 
|  | 299 |  | 
|  | 300 | return 0; | 
|  | 301 | } | 
|  | 302 |  | 
|  | 303 | static void aer_set_firmware_first(struct pci_dev *pci_dev) | 
|  | 304 | { | 
|  | 305 | int rc; | 
|  | 306 | struct aer_hest_parse_info info = { | 
|  | 307 | .pci_dev	= pci_dev, | 
|  | 308 | .firmware_first	= 0, | 
|  | 309 | }; | 
|  | 310 |  | 
|  | 311 | rc = apei_hest_parse(aer_hest_parse, &info); | 
|  | 312 |  | 
|  | 313 | if (rc) | 
|  | 314 | pci_dev->__aer_firmware_first = 0; | 
|  | 315 | else | 
|  | 316 | pci_dev->__aer_firmware_first = info.firmware_first; | 
|  | 317 | pci_dev->__aer_firmware_first_valid = 1; | 
|  | 318 | } | 
|  | 319 |  | 
|  | 320 | int pcie_aer_get_firmware_first(struct pci_dev *dev) | 
|  | 321 | { | 
|  | 322 | if (!pci_is_pcie(dev)) | 
|  | 323 | return 0; | 
|  | 324 |  | 
|  | 325 | if (pcie_ports_native) | 
|  | 326 | return 0; | 
|  | 327 |  | 
|  | 328 | if (!dev->__aer_firmware_first_valid) | 
|  | 329 | aer_set_firmware_first(dev); | 
|  | 330 | return dev->__aer_firmware_first; | 
|  | 331 | } | 
|  | 332 |  | 
|  | 333 | static bool aer_firmware_first; | 
|  | 334 |  | 
|  | 335 | /** | 
|  | 336 | * aer_acpi_firmware_first - Check if APEI should control AER. | 
|  | 337 | */ | 
|  | 338 | bool aer_acpi_firmware_first(void) | 
|  | 339 | { | 
|  | 340 | static bool parsed = false; | 
|  | 341 | struct aer_hest_parse_info info = { | 
|  | 342 | .pci_dev	= NULL,	/* Check all PCIe devices */ | 
|  | 343 | .firmware_first	= 0, | 
|  | 344 | }; | 
|  | 345 |  | 
|  | 346 | if (pcie_ports_native) | 
|  | 347 | return false; | 
|  | 348 |  | 
|  | 349 | if (!parsed) { | 
|  | 350 | apei_hest_parse(aer_hest_parse, &info); | 
|  | 351 | aer_firmware_first = info.firmware_first; | 
|  | 352 | parsed = true; | 
|  | 353 | } | 
|  | 354 | return aer_firmware_first; | 
|  | 355 | } | 
|  | 356 | #endif | 
|  | 357 |  | 
|  | 358 | #define	PCI_EXP_AER_FLAGS	(PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \ | 
|  | 359 | PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE) | 
|  | 360 |  | 
|  | 361 | int pci_enable_pcie_error_reporting(struct pci_dev *dev) | 
|  | 362 | { | 
|  | 363 | if (pcie_aer_get_firmware_first(dev)) | 
|  | 364 | return -EIO; | 
|  | 365 |  | 
|  | 366 | if (!dev->aer_cap) | 
|  | 367 | return -EIO; | 
|  | 368 |  | 
|  | 369 | return pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS); | 
|  | 370 | } | 
|  | 371 | EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting); | 
|  | 372 |  | 
|  | 373 | int pci_disable_pcie_error_reporting(struct pci_dev *dev) | 
|  | 374 | { | 
|  | 375 | if (pcie_aer_get_firmware_first(dev)) | 
|  | 376 | return -EIO; | 
|  | 377 |  | 
|  | 378 | return pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, | 
|  | 379 | PCI_EXP_AER_FLAGS); | 
|  | 380 | } | 
|  | 381 | EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting); | 
|  | 382 |  | 
|  | 383 | void pci_aer_clear_device_status(struct pci_dev *dev) | 
|  | 384 | { | 
|  | 385 | u16 sta; | 
|  | 386 |  | 
|  | 387 | pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta); | 
|  | 388 | pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta); | 
|  | 389 | } | 
|  | 390 |  | 
|  | 391 | int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) | 
|  | 392 | { | 
|  | 393 | int pos; | 
|  | 394 | u32 status, sev; | 
|  | 395 |  | 
|  | 396 | pos = dev->aer_cap; | 
|  | 397 | if (!pos) | 
|  | 398 | return -EIO; | 
|  | 399 |  | 
|  | 400 | if (pcie_aer_get_firmware_first(dev)) | 
|  | 401 | return -EIO; | 
|  | 402 |  | 
|  | 403 | /* Clear status bits for ERR_NONFATAL errors only */ | 
|  | 404 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); | 
|  | 405 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev); | 
|  | 406 | status &= ~sev; | 
|  | 407 | if (status) | 
|  | 408 | pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); | 
|  | 409 |  | 
|  | 410 | return 0; | 
|  | 411 | } | 
|  | 412 | EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status); | 
|  | 413 |  | 
|  | 414 | void pci_aer_clear_fatal_status(struct pci_dev *dev) | 
|  | 415 | { | 
|  | 416 | int pos; | 
|  | 417 | u32 status, sev; | 
|  | 418 |  | 
|  | 419 | pos = dev->aer_cap; | 
|  | 420 | if (!pos) | 
|  | 421 | return; | 
|  | 422 |  | 
|  | 423 | if (pcie_aer_get_firmware_first(dev)) | 
|  | 424 | return; | 
|  | 425 |  | 
|  | 426 | /* Clear status bits for ERR_FATAL errors only */ | 
|  | 427 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); | 
|  | 428 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &sev); | 
|  | 429 | status &= sev; | 
|  | 430 | if (status) | 
|  | 431 | pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); | 
|  | 432 | } | 
|  | 433 |  | 
|  | 434 | int pci_cleanup_aer_error_status_regs(struct pci_dev *dev) | 
|  | 435 | { | 
|  | 436 | int pos; | 
|  | 437 | u32 status; | 
|  | 438 | int port_type; | 
|  | 439 |  | 
|  | 440 | if (!pci_is_pcie(dev)) | 
|  | 441 | return -ENODEV; | 
|  | 442 |  | 
|  | 443 | pos = dev->aer_cap; | 
|  | 444 | if (!pos) | 
|  | 445 | return -EIO; | 
|  | 446 |  | 
|  | 447 | if (pcie_aer_get_firmware_first(dev)) | 
|  | 448 | return -EIO; | 
|  | 449 |  | 
|  | 450 | port_type = pci_pcie_type(dev); | 
|  | 451 | if (port_type == PCI_EXP_TYPE_ROOT_PORT) { | 
|  | 452 | pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status); | 
|  | 453 | pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, status); | 
|  | 454 | } | 
|  | 455 |  | 
|  | 456 | pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status); | 
|  | 457 | pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, status); | 
|  | 458 |  | 
|  | 459 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); | 
|  | 460 | pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); | 
|  | 461 |  | 
|  | 462 | return 0; | 
|  | 463 | } | 
|  | 464 |  | 
|  | 465 | void pci_aer_init(struct pci_dev *dev) | 
|  | 466 | { | 
|  | 467 | dev->aer_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); | 
|  | 468 |  | 
|  | 469 | if (dev->aer_cap) | 
|  | 470 | dev->aer_stats = kzalloc(sizeof(struct aer_stats), GFP_KERNEL); | 
|  | 471 |  | 
|  | 472 | pci_cleanup_aer_error_status_regs(dev); | 
|  | 473 | } | 
|  | 474 |  | 
|  | 475 | void pci_aer_exit(struct pci_dev *dev) | 
|  | 476 | { | 
|  | 477 | kfree(dev->aer_stats); | 
|  | 478 | dev->aer_stats = NULL; | 
|  | 479 | } | 
|  | 480 |  | 
|  | 481 | #define AER_AGENT_RECEIVER		0 | 
|  | 482 | #define AER_AGENT_REQUESTER		1 | 
|  | 483 | #define AER_AGENT_COMPLETER		2 | 
|  | 484 | #define AER_AGENT_TRANSMITTER		3 | 
|  | 485 |  | 
|  | 486 | #define AER_AGENT_REQUESTER_MASK(t)	((t == AER_CORRECTABLE) ?	\ | 
|  | 487 | 0 : (PCI_ERR_UNC_COMP_TIME|PCI_ERR_UNC_UNSUP)) | 
|  | 488 | #define AER_AGENT_COMPLETER_MASK(t)	((t == AER_CORRECTABLE) ?	\ | 
|  | 489 | 0 : PCI_ERR_UNC_COMP_ABORT) | 
|  | 490 | #define AER_AGENT_TRANSMITTER_MASK(t)	((t == AER_CORRECTABLE) ?	\ | 
|  | 491 | (PCI_ERR_COR_REP_ROLL|PCI_ERR_COR_REP_TIMER) : 0) | 
|  | 492 |  | 
|  | 493 | #define AER_GET_AGENT(t, e)						\ | 
|  | 494 | ((e & AER_AGENT_COMPLETER_MASK(t)) ? AER_AGENT_COMPLETER :	\ | 
|  | 495 | (e & AER_AGENT_REQUESTER_MASK(t)) ? AER_AGENT_REQUESTER :	\ | 
|  | 496 | (e & AER_AGENT_TRANSMITTER_MASK(t)) ? AER_AGENT_TRANSMITTER :	\ | 
|  | 497 | AER_AGENT_RECEIVER) | 
|  | 498 |  | 
|  | 499 | #define AER_PHYSICAL_LAYER_ERROR	0 | 
|  | 500 | #define AER_DATA_LINK_LAYER_ERROR	1 | 
|  | 501 | #define AER_TRANSACTION_LAYER_ERROR	2 | 
|  | 502 |  | 
|  | 503 | #define AER_PHYSICAL_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ?	\ | 
|  | 504 | PCI_ERR_COR_RCVR : 0) | 
|  | 505 | #define AER_DATA_LINK_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ?	\ | 
|  | 506 | (PCI_ERR_COR_BAD_TLP|						\ | 
|  | 507 | PCI_ERR_COR_BAD_DLLP|						\ | 
|  | 508 | PCI_ERR_COR_REP_ROLL|						\ | 
|  | 509 | PCI_ERR_COR_REP_TIMER) : PCI_ERR_UNC_DLP) | 
|  | 510 |  | 
|  | 511 | #define AER_GET_LAYER_ERROR(t, e)					\ | 
|  | 512 | ((e & AER_PHYSICAL_LAYER_ERROR_MASK(t)) ? AER_PHYSICAL_LAYER_ERROR : \ | 
|  | 513 | (e & AER_DATA_LINK_LAYER_ERROR_MASK(t)) ? AER_DATA_LINK_LAYER_ERROR : \ | 
|  | 514 | AER_TRANSACTION_LAYER_ERROR) | 
|  | 515 |  | 
|  | 516 | /* | 
|  | 517 | * AER error strings | 
|  | 518 | */ | 
|  | 519 | static const char *aer_error_severity_string[] = { | 
|  | 520 | "Uncorrected (Non-Fatal)", | 
|  | 521 | "Uncorrected (Fatal)", | 
|  | 522 | "Corrected" | 
|  | 523 | }; | 
|  | 524 |  | 
|  | 525 | static const char *aer_error_layer[] = { | 
|  | 526 | "Physical Layer", | 
|  | 527 | "Data Link Layer", | 
|  | 528 | "Transaction Layer" | 
|  | 529 | }; | 
|  | 530 |  | 
|  | 531 | static const char *aer_correctable_error_string[AER_MAX_TYPEOF_COR_ERRS] = { | 
|  | 532 | "RxErr",			/* Bit Position 0	*/ | 
|  | 533 | NULL, | 
|  | 534 | NULL, | 
|  | 535 | NULL, | 
|  | 536 | NULL, | 
|  | 537 | NULL, | 
|  | 538 | "BadTLP",			/* Bit Position 6	*/ | 
|  | 539 | "BadDLLP",			/* Bit Position 7	*/ | 
|  | 540 | "Rollover",			/* Bit Position 8	*/ | 
|  | 541 | NULL, | 
|  | 542 | NULL, | 
|  | 543 | NULL, | 
|  | 544 | "Timeout",			/* Bit Position 12	*/ | 
|  | 545 | "NonFatalErr",			/* Bit Position 13	*/ | 
|  | 546 | "CorrIntErr",			/* Bit Position 14	*/ | 
|  | 547 | "HeaderOF",			/* Bit Position 15	*/ | 
|  | 548 | }; | 
|  | 549 |  | 
|  | 550 | static const char *aer_uncorrectable_error_string[AER_MAX_TYPEOF_UNCOR_ERRS] = { | 
|  | 551 | "Undefined",			/* Bit Position 0	*/ | 
|  | 552 | NULL, | 
|  | 553 | NULL, | 
|  | 554 | NULL, | 
|  | 555 | "DLP",				/* Bit Position 4	*/ | 
|  | 556 | "SDES",				/* Bit Position 5	*/ | 
|  | 557 | NULL, | 
|  | 558 | NULL, | 
|  | 559 | NULL, | 
|  | 560 | NULL, | 
|  | 561 | NULL, | 
|  | 562 | NULL, | 
|  | 563 | "TLP",				/* Bit Position 12	*/ | 
|  | 564 | "FCP",				/* Bit Position 13	*/ | 
|  | 565 | "CmpltTO",			/* Bit Position 14	*/ | 
|  | 566 | "CmpltAbrt",			/* Bit Position 15	*/ | 
|  | 567 | "UnxCmplt",			/* Bit Position 16	*/ | 
|  | 568 | "RxOF",				/* Bit Position 17	*/ | 
|  | 569 | "MalfTLP",			/* Bit Position 18	*/ | 
|  | 570 | "ECRC",				/* Bit Position 19	*/ | 
|  | 571 | "UnsupReq",			/* Bit Position 20	*/ | 
|  | 572 | "ACSViol",			/* Bit Position 21	*/ | 
|  | 573 | "UncorrIntErr",			/* Bit Position 22	*/ | 
|  | 574 | "BlockedTLP",			/* Bit Position 23	*/ | 
|  | 575 | "AtomicOpBlocked",		/* Bit Position 24	*/ | 
|  | 576 | "TLPBlockedErr",		/* Bit Position 25	*/ | 
|  | 577 | }; | 
|  | 578 |  | 
|  | 579 | static const char *aer_agent_string[] = { | 
|  | 580 | "Receiver ID", | 
|  | 581 | "Requester ID", | 
|  | 582 | "Completer ID", | 
|  | 583 | "Transmitter ID" | 
|  | 584 | }; | 
|  | 585 |  | 
|  | 586 | #define aer_stats_dev_attr(name, stats_array, strings_array,		\ | 
|  | 587 | total_string, total_field)			\ | 
|  | 588 | static ssize_t							\ | 
|  | 589 | name##_show(struct device *dev, struct device_attribute *attr,	\ | 
|  | 590 | char *buf)						\ | 
|  | 591 | {									\ | 
|  | 592 | unsigned int i;							\ | 
|  | 593 | char *str = buf;						\ | 
|  | 594 | struct pci_dev *pdev = to_pci_dev(dev);				\ | 
|  | 595 | u64 *stats = pdev->aer_stats->stats_array;			\ | 
|  | 596 | \ | 
|  | 597 | for (i = 0; i < ARRAY_SIZE(strings_array); i++) {		\ | 
|  | 598 | if (strings_array[i])					\ | 
|  | 599 | str += sprintf(str, "%s %llu\n",		\ | 
|  | 600 | strings_array[i], stats[i]);	\ | 
|  | 601 | else if (stats[i])					\ | 
|  | 602 | str += sprintf(str, #stats_array "_bit[%d] %llu\n",\ | 
|  | 603 | i, stats[i]);			\ | 
|  | 604 | }								\ | 
|  | 605 | str += sprintf(str, "TOTAL_%s %llu\n", total_string,		\ | 
|  | 606 | pdev->aer_stats->total_field);			\ | 
|  | 607 | return str-buf;							\ | 
|  | 608 | }									\ | 
|  | 609 | static DEVICE_ATTR_RO(name) | 
|  | 610 |  | 
|  | 611 | aer_stats_dev_attr(aer_dev_correctable, dev_cor_errs, | 
|  | 612 | aer_correctable_error_string, "ERR_COR", | 
|  | 613 | dev_total_cor_errs); | 
|  | 614 | aer_stats_dev_attr(aer_dev_fatal, dev_fatal_errs, | 
|  | 615 | aer_uncorrectable_error_string, "ERR_FATAL", | 
|  | 616 | dev_total_fatal_errs); | 
|  | 617 | aer_stats_dev_attr(aer_dev_nonfatal, dev_nonfatal_errs, | 
|  | 618 | aer_uncorrectable_error_string, "ERR_NONFATAL", | 
|  | 619 | dev_total_nonfatal_errs); | 
|  | 620 |  | 
|  | 621 | #define aer_stats_rootport_attr(name, field)				\ | 
|  | 622 | static ssize_t							\ | 
|  | 623 | name##_show(struct device *dev, struct device_attribute *attr,	\ | 
|  | 624 | char *buf)						\ | 
|  | 625 | {									\ | 
|  | 626 | struct pci_dev *pdev = to_pci_dev(dev);				\ | 
|  | 627 | return sprintf(buf, "%llu\n", pdev->aer_stats->field);		\ | 
|  | 628 | }									\ | 
|  | 629 | static DEVICE_ATTR_RO(name) | 
|  | 630 |  | 
|  | 631 | aer_stats_rootport_attr(aer_rootport_total_err_cor, | 
|  | 632 | rootport_total_cor_errs); | 
|  | 633 | aer_stats_rootport_attr(aer_rootport_total_err_fatal, | 
|  | 634 | rootport_total_fatal_errs); | 
|  | 635 | aer_stats_rootport_attr(aer_rootport_total_err_nonfatal, | 
|  | 636 | rootport_total_nonfatal_errs); | 
|  | 637 |  | 
|  | 638 | static struct attribute *aer_stats_attrs[] __ro_after_init = { | 
|  | 639 | &dev_attr_aer_dev_correctable.attr, | 
|  | 640 | &dev_attr_aer_dev_fatal.attr, | 
|  | 641 | &dev_attr_aer_dev_nonfatal.attr, | 
|  | 642 | &dev_attr_aer_rootport_total_err_cor.attr, | 
|  | 643 | &dev_attr_aer_rootport_total_err_fatal.attr, | 
|  | 644 | &dev_attr_aer_rootport_total_err_nonfatal.attr, | 
|  | 645 | NULL | 
|  | 646 | }; | 
|  | 647 |  | 
|  | 648 | static umode_t aer_stats_attrs_are_visible(struct kobject *kobj, | 
|  | 649 | struct attribute *a, int n) | 
|  | 650 | { | 
|  | 651 | struct device *dev = kobj_to_dev(kobj); | 
|  | 652 | struct pci_dev *pdev = to_pci_dev(dev); | 
|  | 653 |  | 
|  | 654 | if (!pdev->aer_stats) | 
|  | 655 | return 0; | 
|  | 656 |  | 
|  | 657 | if ((a == &dev_attr_aer_rootport_total_err_cor.attr || | 
|  | 658 | a == &dev_attr_aer_rootport_total_err_fatal.attr || | 
|  | 659 | a == &dev_attr_aer_rootport_total_err_nonfatal.attr) && | 
|  | 660 | pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) | 
|  | 661 | return 0; | 
|  | 662 |  | 
|  | 663 | return a->mode; | 
|  | 664 | } | 
|  | 665 |  | 
|  | 666 | const struct attribute_group aer_stats_attr_group = { | 
|  | 667 | .attrs  = aer_stats_attrs, | 
|  | 668 | .is_visible = aer_stats_attrs_are_visible, | 
|  | 669 | }; | 
|  | 670 |  | 
|  | 671 | static void pci_dev_aer_stats_incr(struct pci_dev *pdev, | 
|  | 672 | struct aer_err_info *info) | 
|  | 673 | { | 
|  | 674 | int status, i, max = -1; | 
|  | 675 | u64 *counter = NULL; | 
|  | 676 | struct aer_stats *aer_stats = pdev->aer_stats; | 
|  | 677 |  | 
|  | 678 | if (!aer_stats) | 
|  | 679 | return; | 
|  | 680 |  | 
|  | 681 | switch (info->severity) { | 
|  | 682 | case AER_CORRECTABLE: | 
|  | 683 | aer_stats->dev_total_cor_errs++; | 
|  | 684 | counter = &aer_stats->dev_cor_errs[0]; | 
|  | 685 | max = AER_MAX_TYPEOF_COR_ERRS; | 
|  | 686 | break; | 
|  | 687 | case AER_NONFATAL: | 
|  | 688 | aer_stats->dev_total_nonfatal_errs++; | 
|  | 689 | counter = &aer_stats->dev_nonfatal_errs[0]; | 
|  | 690 | max = AER_MAX_TYPEOF_UNCOR_ERRS; | 
|  | 691 | break; | 
|  | 692 | case AER_FATAL: | 
|  | 693 | aer_stats->dev_total_fatal_errs++; | 
|  | 694 | counter = &aer_stats->dev_fatal_errs[0]; | 
|  | 695 | max = AER_MAX_TYPEOF_UNCOR_ERRS; | 
|  | 696 | break; | 
|  | 697 | } | 
|  | 698 |  | 
|  | 699 | status = (info->status & ~info->mask); | 
|  | 700 | for (i = 0; i < max; i++) | 
|  | 701 | if (status & (1 << i)) | 
|  | 702 | counter[i]++; | 
|  | 703 | } | 
|  | 704 |  | 
|  | 705 | static void pci_rootport_aer_stats_incr(struct pci_dev *pdev, | 
|  | 706 | struct aer_err_source *e_src) | 
|  | 707 | { | 
|  | 708 | struct aer_stats *aer_stats = pdev->aer_stats; | 
|  | 709 |  | 
|  | 710 | if (!aer_stats) | 
|  | 711 | return; | 
|  | 712 |  | 
|  | 713 | if (e_src->status & PCI_ERR_ROOT_COR_RCV) | 
|  | 714 | aer_stats->rootport_total_cor_errs++; | 
|  | 715 |  | 
|  | 716 | if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) { | 
|  | 717 | if (e_src->status & PCI_ERR_ROOT_FATAL_RCV) | 
|  | 718 | aer_stats->rootport_total_fatal_errs++; | 
|  | 719 | else | 
|  | 720 | aer_stats->rootport_total_nonfatal_errs++; | 
|  | 721 | } | 
|  | 722 | } | 
|  | 723 |  | 
|  | 724 | static void __print_tlp_header(struct pci_dev *dev, | 
|  | 725 | struct aer_header_log_regs *t) | 
|  | 726 | { | 
|  | 727 | pci_err(dev, "  TLP Header: %08x %08x %08x %08x\n", | 
|  | 728 | t->dw0, t->dw1, t->dw2, t->dw3); | 
|  | 729 | } | 
|  | 730 |  | 
|  | 731 | static void __aer_print_error(struct pci_dev *dev, | 
|  | 732 | struct aer_err_info *info) | 
|  | 733 | { | 
|  | 734 | int i, status; | 
|  | 735 | const char *errmsg = NULL; | 
|  | 736 | status = (info->status & ~info->mask); | 
|  | 737 |  | 
|  | 738 | for (i = 0; i < 32; i++) { | 
|  | 739 | if (!(status & (1 << i))) | 
|  | 740 | continue; | 
|  | 741 |  | 
|  | 742 | if (info->severity == AER_CORRECTABLE) | 
|  | 743 | errmsg = i < ARRAY_SIZE(aer_correctable_error_string) ? | 
|  | 744 | aer_correctable_error_string[i] : NULL; | 
|  | 745 | else | 
|  | 746 | errmsg = i < ARRAY_SIZE(aer_uncorrectable_error_string) ? | 
|  | 747 | aer_uncorrectable_error_string[i] : NULL; | 
|  | 748 |  | 
|  | 749 | if (errmsg) | 
|  | 750 | pci_err(dev, "   [%2d] %-22s%s\n", i, errmsg, | 
|  | 751 | info->first_error == i ? " (First)" : ""); | 
|  | 752 | else | 
|  | 753 | pci_err(dev, "   [%2d] Unknown Error Bit%s\n", | 
|  | 754 | i, info->first_error == i ? " (First)" : ""); | 
|  | 755 | } | 
|  | 756 | pci_dev_aer_stats_incr(dev, info); | 
|  | 757 | } | 
|  | 758 |  | 
|  | 759 | void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) | 
|  | 760 | { | 
|  | 761 | int layer, agent; | 
|  | 762 | int id = ((dev->bus->number << 8) | dev->devfn); | 
|  | 763 |  | 
|  | 764 | if (!info->status) { | 
|  | 765 | pci_err(dev, "PCIe Bus Error: severity=%s, type=Inaccessible, (Unregistered Agent ID)\n", | 
|  | 766 | aer_error_severity_string[info->severity]); | 
|  | 767 | goto out; | 
|  | 768 | } | 
|  | 769 |  | 
|  | 770 | layer = AER_GET_LAYER_ERROR(info->severity, info->status); | 
|  | 771 | agent = AER_GET_AGENT(info->severity, info->status); | 
|  | 772 |  | 
|  | 773 | pci_err(dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n", | 
|  | 774 | aer_error_severity_string[info->severity], | 
|  | 775 | aer_error_layer[layer], aer_agent_string[agent]); | 
|  | 776 |  | 
|  | 777 | pci_err(dev, "  device [%04x:%04x] error status/mask=%08x/%08x\n", | 
|  | 778 | dev->vendor, dev->device, | 
|  | 779 | info->status, info->mask); | 
|  | 780 |  | 
|  | 781 | __aer_print_error(dev, info); | 
|  | 782 |  | 
|  | 783 | if (info->tlp_header_valid) | 
|  | 784 | __print_tlp_header(dev, &info->tlp); | 
|  | 785 |  | 
|  | 786 | out: | 
|  | 787 | if (info->id && info->error_dev_num > 1 && info->id == id) | 
|  | 788 | pci_err(dev, "  Error of this Agent is reported first\n"); | 
|  | 789 |  | 
|  | 790 | trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask), | 
|  | 791 | info->severity, info->tlp_header_valid, &info->tlp); | 
|  | 792 | } | 
|  | 793 |  | 
|  | 794 | static void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info) | 
|  | 795 | { | 
|  | 796 | u8 bus = info->id >> 8; | 
|  | 797 | u8 devfn = info->id & 0xff; | 
|  | 798 |  | 
|  | 799 | pci_info(dev, "AER: %s%s error received: %04x:%02x:%02x.%d\n", | 
|  | 800 | info->multi_error_valid ? "Multiple " : "", | 
|  | 801 | aer_error_severity_string[info->severity], | 
|  | 802 | pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); | 
|  | 803 | } | 
|  | 804 |  | 
|  | 805 | #ifdef CONFIG_ACPI_APEI_PCIEAER | 
|  | 806 | int cper_severity_to_aer(int cper_severity) | 
|  | 807 | { | 
|  | 808 | switch (cper_severity) { | 
|  | 809 | case CPER_SEV_RECOVERABLE: | 
|  | 810 | return AER_NONFATAL; | 
|  | 811 | case CPER_SEV_FATAL: | 
|  | 812 | return AER_FATAL; | 
|  | 813 | default: | 
|  | 814 | return AER_CORRECTABLE; | 
|  | 815 | } | 
|  | 816 | } | 
|  | 817 | EXPORT_SYMBOL_GPL(cper_severity_to_aer); | 
|  | 818 |  | 
|  | 819 | void cper_print_aer(struct pci_dev *dev, int aer_severity, | 
|  | 820 | struct aer_capability_regs *aer) | 
|  | 821 | { | 
|  | 822 | int layer, agent, tlp_header_valid = 0; | 
|  | 823 | u32 status, mask; | 
|  | 824 | struct aer_err_info info; | 
|  | 825 |  | 
|  | 826 | if (aer_severity == AER_CORRECTABLE) { | 
|  | 827 | status = aer->cor_status; | 
|  | 828 | mask = aer->cor_mask; | 
|  | 829 | } else { | 
|  | 830 | status = aer->uncor_status; | 
|  | 831 | mask = aer->uncor_mask; | 
|  | 832 | tlp_header_valid = status & AER_LOG_TLP_MASKS; | 
|  | 833 | } | 
|  | 834 |  | 
|  | 835 | layer = AER_GET_LAYER_ERROR(aer_severity, status); | 
|  | 836 | agent = AER_GET_AGENT(aer_severity, status); | 
|  | 837 |  | 
|  | 838 | memset(&info, 0, sizeof(info)); | 
|  | 839 | info.severity = aer_severity; | 
|  | 840 | info.status = status; | 
|  | 841 | info.mask = mask; | 
|  | 842 | info.first_error = PCI_ERR_CAP_FEP(aer->cap_control); | 
|  | 843 |  | 
|  | 844 | pci_err(dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask); | 
|  | 845 | __aer_print_error(dev, &info); | 
|  | 846 | pci_err(dev, "aer_layer=%s, aer_agent=%s\n", | 
|  | 847 | aer_error_layer[layer], aer_agent_string[agent]); | 
|  | 848 |  | 
|  | 849 | if (aer_severity != AER_CORRECTABLE) | 
|  | 850 | pci_err(dev, "aer_uncor_severity: 0x%08x\n", | 
|  | 851 | aer->uncor_severity); | 
|  | 852 |  | 
|  | 853 | if (tlp_header_valid) | 
|  | 854 | __print_tlp_header(dev, &aer->header_log); | 
|  | 855 |  | 
|  | 856 | trace_aer_event(dev_name(&dev->dev), (status & ~mask), | 
|  | 857 | aer_severity, tlp_header_valid, &aer->header_log); | 
|  | 858 | } | 
|  | 859 | #endif | 
|  | 860 |  | 
|  | 861 | /** | 
|  | 862 | * add_error_device - list device to be handled | 
|  | 863 | * @e_info: pointer to error info | 
|  | 864 | * @dev: pointer to pci_dev to be added | 
|  | 865 | */ | 
|  | 866 | static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev) | 
|  | 867 | { | 
|  | 868 | if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) { | 
|  | 869 | e_info->dev[e_info->error_dev_num] = pci_dev_get(dev); | 
|  | 870 | e_info->error_dev_num++; | 
|  | 871 | return 0; | 
|  | 872 | } | 
|  | 873 | return -ENOSPC; | 
|  | 874 | } | 
|  | 875 |  | 
|  | 876 | /** | 
|  | 877 | * is_error_source - check whether the device is source of reported error | 
|  | 878 | * @dev: pointer to pci_dev to be checked | 
|  | 879 | * @e_info: pointer to reported error info | 
|  | 880 | */ | 
|  | 881 | static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info) | 
|  | 882 | { | 
|  | 883 | int pos; | 
|  | 884 | u32 status, mask; | 
|  | 885 | u16 reg16; | 
|  | 886 |  | 
|  | 887 | /* | 
|  | 888 | * When bus id is equal to 0, it might be a bad id | 
|  | 889 | * reported by root port. | 
|  | 890 | */ | 
|  | 891 | if ((PCI_BUS_NUM(e_info->id) != 0) && | 
|  | 892 | !(dev->bus->bus_flags & PCI_BUS_FLAGS_NO_AERSID)) { | 
|  | 893 | /* Device ID match? */ | 
|  | 894 | if (e_info->id == ((dev->bus->number << 8) | dev->devfn)) | 
|  | 895 | return true; | 
|  | 896 |  | 
|  | 897 | /* Continue id comparing if there is no multiple error */ | 
|  | 898 | if (!e_info->multi_error_valid) | 
|  | 899 | return false; | 
|  | 900 | } | 
|  | 901 |  | 
|  | 902 | /* | 
|  | 903 | * When either | 
|  | 904 | *      1) bus id is equal to 0. Some ports might lose the bus | 
|  | 905 | *              id of error source id; | 
|  | 906 | *      2) bus flag PCI_BUS_FLAGS_NO_AERSID is set | 
|  | 907 | *      3) There are multiple errors and prior ID comparing fails; | 
|  | 908 | * We check AER status registers to find possible reporter. | 
|  | 909 | */ | 
|  | 910 | if (atomic_read(&dev->enable_cnt) == 0) | 
|  | 911 | return false; | 
|  | 912 |  | 
|  | 913 | /* Check if AER is enabled */ | 
|  | 914 | pcie_capability_read_word(dev, PCI_EXP_DEVCTL, ®16); | 
|  | 915 | if (!(reg16 & PCI_EXP_AER_FLAGS)) | 
|  | 916 | return false; | 
|  | 917 |  | 
|  | 918 | pos = dev->aer_cap; | 
|  | 919 | if (!pos) | 
|  | 920 | return false; | 
|  | 921 |  | 
|  | 922 | /* Check if error is recorded */ | 
|  | 923 | if (e_info->severity == AER_CORRECTABLE) { | 
|  | 924 | pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status); | 
|  | 925 | pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &mask); | 
|  | 926 | } else { | 
|  | 927 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); | 
|  | 928 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask); | 
|  | 929 | } | 
|  | 930 | if (status & ~mask) | 
|  | 931 | return true; | 
|  | 932 |  | 
|  | 933 | return false; | 
|  | 934 | } | 
|  | 935 |  | 
|  | 936 | static int find_device_iter(struct pci_dev *dev, void *data) | 
|  | 937 | { | 
|  | 938 | struct aer_err_info *e_info = (struct aer_err_info *)data; | 
|  | 939 |  | 
|  | 940 | if (is_error_source(dev, e_info)) { | 
|  | 941 | /* List this device */ | 
|  | 942 | if (add_error_device(e_info, dev)) { | 
|  | 943 | /* We cannot handle more... Stop iteration */ | 
|  | 944 | /* TODO: Should print error message here? */ | 
|  | 945 | return 1; | 
|  | 946 | } | 
|  | 947 |  | 
|  | 948 | /* If there is only a single error, stop iteration */ | 
|  | 949 | if (!e_info->multi_error_valid) | 
|  | 950 | return 1; | 
|  | 951 | } | 
|  | 952 | return 0; | 
|  | 953 | } | 
|  | 954 |  | 
|  | 955 | /** | 
|  | 956 | * find_source_device - search through device hierarchy for source device | 
|  | 957 | * @parent: pointer to Root Port pci_dev data structure | 
|  | 958 | * @e_info: including detailed error information such like id | 
|  | 959 | * | 
|  | 960 | * Return true if found. | 
|  | 961 | * | 
|  | 962 | * Invoked by DPC when error is detected at the Root Port. | 
|  | 963 | * Caller of this function must set id, severity, and multi_error_valid of | 
|  | 964 | * struct aer_err_info pointed by @e_info properly.  This function must fill | 
|  | 965 | * e_info->error_dev_num and e_info->dev[], based on the given information. | 
|  | 966 | */ | 
|  | 967 | static bool find_source_device(struct pci_dev *parent, | 
|  | 968 | struct aer_err_info *e_info) | 
|  | 969 | { | 
|  | 970 | struct pci_dev *dev = parent; | 
|  | 971 | int result; | 
|  | 972 |  | 
|  | 973 | /* Must reset in this function */ | 
|  | 974 | e_info->error_dev_num = 0; | 
|  | 975 |  | 
|  | 976 | /* Is Root Port an agent that sends error message? */ | 
|  | 977 | result = find_device_iter(dev, e_info); | 
|  | 978 | if (result) | 
|  | 979 | return true; | 
|  | 980 |  | 
|  | 981 | pci_walk_bus(parent->subordinate, find_device_iter, e_info); | 
|  | 982 |  | 
|  | 983 | if (!e_info->error_dev_num) { | 
|  | 984 | pci_printk(KERN_DEBUG, parent, "can't find device of ID%04x\n", | 
|  | 985 | e_info->id); | 
|  | 986 | return false; | 
|  | 987 | } | 
|  | 988 | return true; | 
|  | 989 | } | 
|  | 990 |  | 
|  | 991 | /** | 
|  | 992 | * handle_error_source - handle logging error into an event log | 
|  | 993 | * @dev: pointer to pci_dev data structure of error source device | 
|  | 994 | * @info: comprehensive error information | 
|  | 995 | * | 
|  | 996 | * Invoked when an error being detected by Root Port. | 
|  | 997 | */ | 
|  | 998 | static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info) | 
|  | 999 | { | 
|  | 1000 | int pos; | 
|  | 1001 |  | 
|  | 1002 | if (info->severity == AER_CORRECTABLE) { | 
|  | 1003 | /* | 
|  | 1004 | * Correctable error does not need software intervention. | 
|  | 1005 | * No need to go through error recovery process. | 
|  | 1006 | */ | 
|  | 1007 | pos = dev->aer_cap; | 
|  | 1008 | if (pos) | 
|  | 1009 | pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, | 
|  | 1010 | info->status); | 
|  | 1011 | pci_aer_clear_device_status(dev); | 
|  | 1012 | } else if (info->severity == AER_NONFATAL) | 
|  | 1013 | pcie_do_nonfatal_recovery(dev); | 
|  | 1014 | else if (info->severity == AER_FATAL) | 
|  | 1015 | pcie_do_fatal_recovery(dev, PCIE_PORT_SERVICE_AER); | 
|  | 1016 | pci_dev_put(dev); | 
|  | 1017 | } | 
|  | 1018 |  | 
|  | 1019 | #ifdef CONFIG_ACPI_APEI_PCIEAER | 
|  | 1020 |  | 
|  | 1021 | #define AER_RECOVER_RING_ORDER		4 | 
|  | 1022 | #define AER_RECOVER_RING_SIZE		(1 << AER_RECOVER_RING_ORDER) | 
|  | 1023 |  | 
|  | 1024 | struct aer_recover_entry { | 
|  | 1025 | u8	bus; | 
|  | 1026 | u8	devfn; | 
|  | 1027 | u16	domain; | 
|  | 1028 | int	severity; | 
|  | 1029 | struct aer_capability_regs *regs; | 
|  | 1030 | }; | 
|  | 1031 |  | 
|  | 1032 | static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry, | 
|  | 1033 | AER_RECOVER_RING_SIZE); | 
|  | 1034 |  | 
|  | 1035 | static void aer_recover_work_func(struct work_struct *work) | 
|  | 1036 | { | 
|  | 1037 | struct aer_recover_entry entry; | 
|  | 1038 | struct pci_dev *pdev; | 
|  | 1039 |  | 
|  | 1040 | while (kfifo_get(&aer_recover_ring, &entry)) { | 
|  | 1041 | pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus, | 
|  | 1042 | entry.devfn); | 
|  | 1043 | if (!pdev) { | 
|  | 1044 | pr_err("AER recover: Can not find pci_dev for %04x:%02x:%02x:%x\n", | 
|  | 1045 | entry.domain, entry.bus, | 
|  | 1046 | PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn)); | 
|  | 1047 | continue; | 
|  | 1048 | } | 
|  | 1049 | cper_print_aer(pdev, entry.severity, entry.regs); | 
|  | 1050 | if (entry.severity == AER_NONFATAL) | 
|  | 1051 | pcie_do_nonfatal_recovery(pdev); | 
|  | 1052 | else if (entry.severity == AER_FATAL) | 
|  | 1053 | pcie_do_fatal_recovery(pdev, PCIE_PORT_SERVICE_AER); | 
|  | 1054 | pci_dev_put(pdev); | 
|  | 1055 | } | 
|  | 1056 | } | 
|  | 1057 |  | 
|  | 1058 | /* | 
|  | 1059 | * Mutual exclusion for writers of aer_recover_ring, reader side don't | 
|  | 1060 | * need lock, because there is only one reader and lock is not needed | 
|  | 1061 | * between reader and writer. | 
|  | 1062 | */ | 
|  | 1063 | static DEFINE_SPINLOCK(aer_recover_ring_lock); | 
|  | 1064 | static DECLARE_WORK(aer_recover_work, aer_recover_work_func); | 
|  | 1065 |  | 
|  | 1066 | void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn, | 
|  | 1067 | int severity, struct aer_capability_regs *aer_regs) | 
|  | 1068 | { | 
|  | 1069 | unsigned long flags; | 
|  | 1070 | struct aer_recover_entry entry = { | 
|  | 1071 | .bus		= bus, | 
|  | 1072 | .devfn		= devfn, | 
|  | 1073 | .domain		= domain, | 
|  | 1074 | .severity	= severity, | 
|  | 1075 | .regs		= aer_regs, | 
|  | 1076 | }; | 
|  | 1077 |  | 
|  | 1078 | spin_lock_irqsave(&aer_recover_ring_lock, flags); | 
|  | 1079 | if (kfifo_put(&aer_recover_ring, entry)) | 
|  | 1080 | schedule_work(&aer_recover_work); | 
|  | 1081 | else | 
|  | 1082 | pr_err("AER recover: Buffer overflow when recovering AER for %04x:%02x:%02x:%x\n", | 
|  | 1083 | domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); | 
|  | 1084 | spin_unlock_irqrestore(&aer_recover_ring_lock, flags); | 
|  | 1085 | } | 
|  | 1086 | EXPORT_SYMBOL_GPL(aer_recover_queue); | 
|  | 1087 | #endif | 
|  | 1088 |  | 
|  | 1089 | /** | 
|  | 1090 | * aer_get_device_error_info - read error status from dev and store it to info | 
|  | 1091 | * @dev: pointer to the device expected to have a error record | 
|  | 1092 | * @info: pointer to structure to store the error record | 
|  | 1093 | * | 
|  | 1094 | * Return 1 on success, 0 on error. | 
|  | 1095 | * | 
|  | 1096 | * Note that @info is reused among all error devices. Clear fields properly. | 
|  | 1097 | */ | 
|  | 1098 | int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info) | 
|  | 1099 | { | 
|  | 1100 | int pos, temp; | 
|  | 1101 |  | 
|  | 1102 | /* Must reset in this function */ | 
|  | 1103 | info->status = 0; | 
|  | 1104 | info->tlp_header_valid = 0; | 
|  | 1105 |  | 
|  | 1106 | pos = dev->aer_cap; | 
|  | 1107 |  | 
|  | 1108 | /* The device might not support AER */ | 
|  | 1109 | if (!pos) | 
|  | 1110 | return 0; | 
|  | 1111 |  | 
|  | 1112 | if (info->severity == AER_CORRECTABLE) { | 
|  | 1113 | pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, | 
|  | 1114 | &info->status); | 
|  | 1115 | pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, | 
|  | 1116 | &info->mask); | 
|  | 1117 | if (!(info->status & ~info->mask)) | 
|  | 1118 | return 0; | 
|  | 1119 | } else if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT || | 
|  | 1120 | pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM || | 
|  | 1121 | info->severity == AER_NONFATAL) { | 
|  | 1122 |  | 
|  | 1123 | /* Link is still healthy for IO reads */ | 
|  | 1124 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, | 
|  | 1125 | &info->status); | 
|  | 1126 | pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, | 
|  | 1127 | &info->mask); | 
|  | 1128 | if (!(info->status & ~info->mask)) | 
|  | 1129 | return 0; | 
|  | 1130 |  | 
|  | 1131 | /* Get First Error Pointer */ | 
|  | 1132 | pci_read_config_dword(dev, pos + PCI_ERR_CAP, &temp); | 
|  | 1133 | info->first_error = PCI_ERR_CAP_FEP(temp); | 
|  | 1134 |  | 
|  | 1135 | if (info->status & AER_LOG_TLP_MASKS) { | 
|  | 1136 | info->tlp_header_valid = 1; | 
|  | 1137 | pci_read_config_dword(dev, | 
|  | 1138 | pos + PCI_ERR_HEADER_LOG, &info->tlp.dw0); | 
|  | 1139 | pci_read_config_dword(dev, | 
|  | 1140 | pos + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1); | 
|  | 1141 | pci_read_config_dword(dev, | 
|  | 1142 | pos + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2); | 
|  | 1143 | pci_read_config_dword(dev, | 
|  | 1144 | pos + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3); | 
|  | 1145 | } | 
|  | 1146 | } | 
|  | 1147 |  | 
|  | 1148 | return 1; | 
|  | 1149 | } | 
|  | 1150 |  | 
|  | 1151 | static inline void aer_process_err_devices(struct aer_err_info *e_info) | 
|  | 1152 | { | 
|  | 1153 | int i; | 
|  | 1154 |  | 
|  | 1155 | /* Report all before handle them, not to lost records by reset etc. */ | 
|  | 1156 | for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) { | 
|  | 1157 | if (aer_get_device_error_info(e_info->dev[i], e_info)) | 
|  | 1158 | aer_print_error(e_info->dev[i], e_info); | 
|  | 1159 | } | 
|  | 1160 | for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) { | 
|  | 1161 | if (aer_get_device_error_info(e_info->dev[i], e_info)) | 
|  | 1162 | handle_error_source(e_info->dev[i], e_info); | 
|  | 1163 | } | 
|  | 1164 | } | 
|  | 1165 |  | 
|  | 1166 | /** | 
|  | 1167 | * aer_isr_one_error - consume an error detected by root port | 
|  | 1168 | * @rpc: pointer to the root port which holds an error | 
|  | 1169 | * @e_src: pointer to an error source | 
|  | 1170 | */ | 
|  | 1171 | static void aer_isr_one_error(struct aer_rpc *rpc, | 
|  | 1172 | struct aer_err_source *e_src) | 
|  | 1173 | { | 
|  | 1174 | struct pci_dev *pdev = rpc->rpd; | 
|  | 1175 | struct aer_err_info *e_info = &rpc->e_info; | 
|  | 1176 |  | 
|  | 1177 | pci_rootport_aer_stats_incr(pdev, e_src); | 
|  | 1178 |  | 
|  | 1179 | /* | 
|  | 1180 | * There is a possibility that both correctable error and | 
|  | 1181 | * uncorrectable error being logged. Report correctable error first. | 
|  | 1182 | */ | 
|  | 1183 | if (e_src->status & PCI_ERR_ROOT_COR_RCV) { | 
|  | 1184 | e_info->id = ERR_COR_ID(e_src->id); | 
|  | 1185 | e_info->severity = AER_CORRECTABLE; | 
|  | 1186 |  | 
|  | 1187 | if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV) | 
|  | 1188 | e_info->multi_error_valid = 1; | 
|  | 1189 | else | 
|  | 1190 | e_info->multi_error_valid = 0; | 
|  | 1191 | aer_print_port_info(pdev, e_info); | 
|  | 1192 |  | 
|  | 1193 | if (find_source_device(pdev, e_info)) | 
|  | 1194 | aer_process_err_devices(e_info); | 
|  | 1195 | } | 
|  | 1196 |  | 
|  | 1197 | if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) { | 
|  | 1198 | e_info->id = ERR_UNCOR_ID(e_src->id); | 
|  | 1199 |  | 
|  | 1200 | if (e_src->status & PCI_ERR_ROOT_FATAL_RCV) | 
|  | 1201 | e_info->severity = AER_FATAL; | 
|  | 1202 | else | 
|  | 1203 | e_info->severity = AER_NONFATAL; | 
|  | 1204 |  | 
|  | 1205 | if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV) | 
|  | 1206 | e_info->multi_error_valid = 1; | 
|  | 1207 | else | 
|  | 1208 | e_info->multi_error_valid = 0; | 
|  | 1209 |  | 
|  | 1210 | aer_print_port_info(pdev, e_info); | 
|  | 1211 |  | 
|  | 1212 | if (find_source_device(pdev, e_info)) | 
|  | 1213 | aer_process_err_devices(e_info); | 
|  | 1214 | } | 
|  | 1215 | } | 
|  | 1216 |  | 
|  | 1217 | /** | 
|  | 1218 | * get_e_source - retrieve an error source | 
|  | 1219 | * @rpc: pointer to the root port which holds an error | 
|  | 1220 | * @e_src: pointer to store retrieved error source | 
|  | 1221 | * | 
|  | 1222 | * Return 1 if an error source is retrieved, otherwise 0. | 
|  | 1223 | * | 
|  | 1224 | * Invoked by DPC handler to consume an error. | 
|  | 1225 | */ | 
|  | 1226 | static int get_e_source(struct aer_rpc *rpc, struct aer_err_source *e_src) | 
|  | 1227 | { | 
|  | 1228 | unsigned long flags; | 
|  | 1229 |  | 
|  | 1230 | /* Lock access to Root error producer/consumer index */ | 
|  | 1231 | spin_lock_irqsave(&rpc->e_lock, flags); | 
|  | 1232 | if (rpc->prod_idx == rpc->cons_idx) { | 
|  | 1233 | spin_unlock_irqrestore(&rpc->e_lock, flags); | 
|  | 1234 | return 0; | 
|  | 1235 | } | 
|  | 1236 |  | 
|  | 1237 | *e_src = rpc->e_sources[rpc->cons_idx]; | 
|  | 1238 | rpc->cons_idx++; | 
|  | 1239 | if (rpc->cons_idx == AER_ERROR_SOURCES_MAX) | 
|  | 1240 | rpc->cons_idx = 0; | 
|  | 1241 | spin_unlock_irqrestore(&rpc->e_lock, flags); | 
|  | 1242 |  | 
|  | 1243 | return 1; | 
|  | 1244 | } | 
|  | 1245 |  | 
|  | 1246 | /** | 
|  | 1247 | * aer_isr - consume errors detected by root port | 
|  | 1248 | * @work: definition of this work item | 
|  | 1249 | * | 
|  | 1250 | * Invoked, as DPC, when root port records new detected error | 
|  | 1251 | */ | 
|  | 1252 | static void aer_isr(struct work_struct *work) | 
|  | 1253 | { | 
|  | 1254 | struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler); | 
|  | 1255 | struct aer_err_source uninitialized_var(e_src); | 
|  | 1256 |  | 
|  | 1257 | mutex_lock(&rpc->rpc_mutex); | 
|  | 1258 | while (get_e_source(rpc, &e_src)) | 
|  | 1259 | aer_isr_one_error(rpc, &e_src); | 
|  | 1260 | mutex_unlock(&rpc->rpc_mutex); | 
|  | 1261 | } | 
|  | 1262 |  | 
|  | 1263 | /** | 
|  | 1264 | * aer_irq - Root Port's ISR | 
|  | 1265 | * @irq: IRQ assigned to Root Port | 
|  | 1266 | * @context: pointer to Root Port data structure | 
|  | 1267 | * | 
|  | 1268 | * Invoked when Root Port detects AER messages. | 
|  | 1269 | */ | 
|  | 1270 | irqreturn_t aer_irq(int irq, void *context) | 
|  | 1271 | { | 
|  | 1272 | unsigned int status, id; | 
|  | 1273 | struct pcie_device *pdev = (struct pcie_device *)context; | 
|  | 1274 | struct aer_rpc *rpc = get_service_data(pdev); | 
|  | 1275 | int next_prod_idx; | 
|  | 1276 | unsigned long flags; | 
|  | 1277 | int pos; | 
|  | 1278 |  | 
|  | 1279 | pos = pdev->port->aer_cap; | 
|  | 1280 | /* | 
|  | 1281 | * Must lock access to Root Error Status Reg, Root Error ID Reg, | 
|  | 1282 | * and Root error producer/consumer index | 
|  | 1283 | */ | 
|  | 1284 | spin_lock_irqsave(&rpc->e_lock, flags); | 
|  | 1285 |  | 
|  | 1286 | /* Read error status */ | 
|  | 1287 | pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_STATUS, &status); | 
|  | 1288 | if (!(status & (PCI_ERR_ROOT_UNCOR_RCV|PCI_ERR_ROOT_COR_RCV))) { | 
|  | 1289 | spin_unlock_irqrestore(&rpc->e_lock, flags); | 
|  | 1290 | return IRQ_NONE; | 
|  | 1291 | } | 
|  | 1292 |  | 
|  | 1293 | /* Read error source and clear error status */ | 
|  | 1294 | pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_ERR_SRC, &id); | 
|  | 1295 | pci_write_config_dword(pdev->port, pos + PCI_ERR_ROOT_STATUS, status); | 
|  | 1296 |  | 
|  | 1297 | /* Store error source for later DPC handler */ | 
|  | 1298 | next_prod_idx = rpc->prod_idx + 1; | 
|  | 1299 | if (next_prod_idx == AER_ERROR_SOURCES_MAX) | 
|  | 1300 | next_prod_idx = 0; | 
|  | 1301 | if (next_prod_idx == rpc->cons_idx) { | 
|  | 1302 | /* | 
|  | 1303 | * Error Storm Condition - possibly the same error occurred. | 
|  | 1304 | * Drop the error. | 
|  | 1305 | */ | 
|  | 1306 | spin_unlock_irqrestore(&rpc->e_lock, flags); | 
|  | 1307 | return IRQ_HANDLED; | 
|  | 1308 | } | 
|  | 1309 | rpc->e_sources[rpc->prod_idx].status =  status; | 
|  | 1310 | rpc->e_sources[rpc->prod_idx].id = id; | 
|  | 1311 | rpc->prod_idx = next_prod_idx; | 
|  | 1312 | spin_unlock_irqrestore(&rpc->e_lock, flags); | 
|  | 1313 |  | 
|  | 1314 | /*  Invoke DPC handler */ | 
|  | 1315 | schedule_work(&rpc->dpc_handler); | 
|  | 1316 |  | 
|  | 1317 | return IRQ_HANDLED; | 
|  | 1318 | } | 
|  | 1319 | EXPORT_SYMBOL_GPL(aer_irq); | 
|  | 1320 |  | 
|  | 1321 | static int set_device_error_reporting(struct pci_dev *dev, void *data) | 
|  | 1322 | { | 
|  | 1323 | bool enable = *((bool *)data); | 
|  | 1324 | int type = pci_pcie_type(dev); | 
|  | 1325 |  | 
|  | 1326 | if ((type == PCI_EXP_TYPE_ROOT_PORT) || | 
|  | 1327 | (type == PCI_EXP_TYPE_UPSTREAM) || | 
|  | 1328 | (type == PCI_EXP_TYPE_DOWNSTREAM)) { | 
|  | 1329 | if (enable) | 
|  | 1330 | pci_enable_pcie_error_reporting(dev); | 
|  | 1331 | else | 
|  | 1332 | pci_disable_pcie_error_reporting(dev); | 
|  | 1333 | } | 
|  | 1334 |  | 
|  | 1335 | if (enable) | 
|  | 1336 | pcie_set_ecrc_checking(dev); | 
|  | 1337 |  | 
|  | 1338 | return 0; | 
|  | 1339 | } | 
|  | 1340 |  | 
|  | 1341 | /** | 
|  | 1342 | * set_downstream_devices_error_reporting - enable/disable the error reporting  bits on the root port and its downstream ports. | 
|  | 1343 | * @dev: pointer to root port's pci_dev data structure | 
|  | 1344 | * @enable: true = enable error reporting, false = disable error reporting. | 
|  | 1345 | */ | 
|  | 1346 | static void set_downstream_devices_error_reporting(struct pci_dev *dev, | 
|  | 1347 | bool enable) | 
|  | 1348 | { | 
|  | 1349 | set_device_error_reporting(dev, &enable); | 
|  | 1350 |  | 
|  | 1351 | if (!dev->subordinate) | 
|  | 1352 | return; | 
|  | 1353 | pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable); | 
|  | 1354 | } | 
|  | 1355 |  | 
|  | 1356 | /** | 
|  | 1357 | * aer_enable_rootport - enable Root Port's interrupts when receiving messages | 
|  | 1358 | * @rpc: pointer to a Root Port data structure | 
|  | 1359 | * | 
|  | 1360 | * Invoked when PCIe bus loads AER service driver. | 
|  | 1361 | */ | 
|  | 1362 | static void aer_enable_rootport(struct aer_rpc *rpc) | 
|  | 1363 | { | 
|  | 1364 | struct pci_dev *pdev = rpc->rpd; | 
|  | 1365 | int aer_pos; | 
|  | 1366 | u16 reg16; | 
|  | 1367 | u32 reg32; | 
|  | 1368 |  | 
|  | 1369 | /* Clear PCIe Capability's Device Status */ | 
|  | 1370 | pcie_capability_read_word(pdev, PCI_EXP_DEVSTA, ®16); | 
|  | 1371 | pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, reg16); | 
|  | 1372 |  | 
|  | 1373 | /* Disable system error generation in response to error messages */ | 
|  | 1374 | pcie_capability_clear_word(pdev, PCI_EXP_RTCTL, | 
|  | 1375 | SYSTEM_ERROR_INTR_ON_MESG_MASK); | 
|  | 1376 |  | 
|  | 1377 | aer_pos = pdev->aer_cap; | 
|  | 1378 | /* Clear error status */ | 
|  | 1379 | pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, ®32); | 
|  | 1380 | pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32); | 
|  | 1381 | pci_read_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, ®32); | 
|  | 1382 | pci_write_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, reg32); | 
|  | 1383 | pci_read_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, ®32); | 
|  | 1384 | pci_write_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, reg32); | 
|  | 1385 |  | 
|  | 1386 | /* | 
|  | 1387 | * Enable error reporting for the root port device and downstream port | 
|  | 1388 | * devices. | 
|  | 1389 | */ | 
|  | 1390 | set_downstream_devices_error_reporting(pdev, true); | 
|  | 1391 |  | 
|  | 1392 | /* Enable Root Port's interrupt in response to error messages */ | 
|  | 1393 | pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_COMMAND, ®32); | 
|  | 1394 | reg32 |= ROOT_PORT_INTR_ON_MESG_MASK; | 
|  | 1395 | pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_COMMAND, reg32); | 
|  | 1396 | } | 
|  | 1397 |  | 
|  | 1398 | /** | 
|  | 1399 | * aer_disable_rootport - disable Root Port's interrupts when receiving messages | 
|  | 1400 | * @rpc: pointer to a Root Port data structure | 
|  | 1401 | * | 
|  | 1402 | * Invoked when PCIe bus unloads AER service driver. | 
|  | 1403 | */ | 
|  | 1404 | static void aer_disable_rootport(struct aer_rpc *rpc) | 
|  | 1405 | { | 
|  | 1406 | struct pci_dev *pdev = rpc->rpd; | 
|  | 1407 | u32 reg32; | 
|  | 1408 | int pos; | 
|  | 1409 |  | 
|  | 1410 | /* | 
|  | 1411 | * Disable error reporting for the root port device and downstream port | 
|  | 1412 | * devices. | 
|  | 1413 | */ | 
|  | 1414 | set_downstream_devices_error_reporting(pdev, false); | 
|  | 1415 |  | 
|  | 1416 | pos = pdev->aer_cap; | 
|  | 1417 | /* Disable Root's interrupt in response to error messages */ | 
|  | 1418 | pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, ®32); | 
|  | 1419 | reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK; | 
|  | 1420 | pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, reg32); | 
|  | 1421 |  | 
|  | 1422 | /* Clear Root's error status reg */ | 
|  | 1423 | pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, ®32); | 
|  | 1424 | pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, reg32); | 
|  | 1425 | } | 
|  | 1426 |  | 
|  | 1427 | /** | 
|  | 1428 | * aer_alloc_rpc - allocate Root Port data structure | 
|  | 1429 | * @dev: pointer to the pcie_dev data structure | 
|  | 1430 | * | 
|  | 1431 | * Invoked when Root Port's AER service is loaded. | 
|  | 1432 | */ | 
|  | 1433 | static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev) | 
|  | 1434 | { | 
|  | 1435 | struct aer_rpc *rpc; | 
|  | 1436 |  | 
|  | 1437 | rpc = kzalloc(sizeof(struct aer_rpc), GFP_KERNEL); | 
|  | 1438 | if (!rpc) | 
|  | 1439 | return NULL; | 
|  | 1440 |  | 
|  | 1441 | /* Initialize Root lock access, e_lock, to Root Error Status Reg */ | 
|  | 1442 | spin_lock_init(&rpc->e_lock); | 
|  | 1443 |  | 
|  | 1444 | rpc->rpd = dev->port; | 
|  | 1445 | INIT_WORK(&rpc->dpc_handler, aer_isr); | 
|  | 1446 | mutex_init(&rpc->rpc_mutex); | 
|  | 1447 |  | 
|  | 1448 | /* Use PCIe bus function to store rpc into PCIe device */ | 
|  | 1449 | set_service_data(dev, rpc); | 
|  | 1450 |  | 
|  | 1451 | return rpc; | 
|  | 1452 | } | 
|  | 1453 |  | 
|  | 1454 | /** | 
|  | 1455 | * aer_remove - clean up resources | 
|  | 1456 | * @dev: pointer to the pcie_dev data structure | 
|  | 1457 | * | 
|  | 1458 | * Invoked when PCI Express bus unloads or AER probe fails. | 
|  | 1459 | */ | 
|  | 1460 | static void aer_remove(struct pcie_device *dev) | 
|  | 1461 | { | 
|  | 1462 | struct aer_rpc *rpc = get_service_data(dev); | 
|  | 1463 |  | 
|  | 1464 | if (rpc) { | 
|  | 1465 | /* If register interrupt service, it must be free. */ | 
|  | 1466 | if (rpc->isr) | 
|  | 1467 | free_irq(dev->irq, dev); | 
|  | 1468 |  | 
|  | 1469 | flush_work(&rpc->dpc_handler); | 
|  | 1470 | aer_disable_rootport(rpc); | 
|  | 1471 | kfree(rpc); | 
|  | 1472 | set_service_data(dev, NULL); | 
|  | 1473 | } | 
|  | 1474 | } | 
|  | 1475 |  | 
|  | 1476 | /** | 
|  | 1477 | * aer_probe - initialize resources | 
|  | 1478 | * @dev: pointer to the pcie_dev data structure | 
|  | 1479 | * | 
|  | 1480 | * Invoked when PCI Express bus loads AER service driver. | 
|  | 1481 | */ | 
|  | 1482 | static int aer_probe(struct pcie_device *dev) | 
|  | 1483 | { | 
|  | 1484 | int status; | 
|  | 1485 | struct aer_rpc *rpc; | 
|  | 1486 | struct device *device = &dev->port->dev; | 
|  | 1487 |  | 
|  | 1488 | /* Alloc rpc data structure */ | 
|  | 1489 | rpc = aer_alloc_rpc(dev); | 
|  | 1490 | if (!rpc) { | 
|  | 1491 | dev_printk(KERN_DEBUG, device, "alloc AER rpc failed\n"); | 
|  | 1492 | aer_remove(dev); | 
|  | 1493 | return -ENOMEM; | 
|  | 1494 | } | 
|  | 1495 |  | 
|  | 1496 | /* Request IRQ ISR */ | 
|  | 1497 | status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv", dev); | 
|  | 1498 | if (status) { | 
|  | 1499 | dev_printk(KERN_DEBUG, device, "request AER IRQ %d failed\n", | 
|  | 1500 | dev->irq); | 
|  | 1501 | aer_remove(dev); | 
|  | 1502 | return status; | 
|  | 1503 | } | 
|  | 1504 |  | 
|  | 1505 | rpc->isr = 1; | 
|  | 1506 |  | 
|  | 1507 | aer_enable_rootport(rpc); | 
|  | 1508 | dev_info(device, "AER enabled with IRQ %d\n", dev->irq); | 
|  | 1509 | return 0; | 
|  | 1510 | } | 
|  | 1511 |  | 
|  | 1512 | /** | 
|  | 1513 | * aer_root_reset - reset link on Root Port | 
|  | 1514 | * @dev: pointer to Root Port's pci_dev data structure | 
|  | 1515 | * | 
|  | 1516 | * Invoked by Port Bus driver when performing link reset at Root Port. | 
|  | 1517 | */ | 
|  | 1518 | static pci_ers_result_t aer_root_reset(struct pci_dev *dev) | 
|  | 1519 | { | 
|  | 1520 | u32 reg32; | 
|  | 1521 | int pos; | 
|  | 1522 | int rc; | 
|  | 1523 |  | 
|  | 1524 | pos = dev->aer_cap; | 
|  | 1525 |  | 
|  | 1526 | /* Disable Root's interrupt in response to error messages */ | 
|  | 1527 | pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, ®32); | 
|  | 1528 | reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK; | 
|  | 1529 | pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32); | 
|  | 1530 |  | 
|  | 1531 | rc = pci_bus_error_reset(dev); | 
|  | 1532 | pci_printk(KERN_DEBUG, dev, "Root Port link has been reset\n"); | 
|  | 1533 |  | 
|  | 1534 | /* Clear Root Error Status */ | 
|  | 1535 | pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, ®32); | 
|  | 1536 | pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, reg32); | 
|  | 1537 |  | 
|  | 1538 | /* Enable Root Port's interrupt in response to error messages */ | 
|  | 1539 | pci_read_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, ®32); | 
|  | 1540 | reg32 |= ROOT_PORT_INTR_ON_MESG_MASK; | 
|  | 1541 | pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32); | 
|  | 1542 |  | 
|  | 1543 | return rc ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; | 
|  | 1544 | } | 
|  | 1545 |  | 
|  | 1546 | /** | 
|  | 1547 | * aer_error_resume - clean up corresponding error status bits | 
|  | 1548 | * @dev: pointer to Root Port's pci_dev data structure | 
|  | 1549 | * | 
|  | 1550 | * Invoked by Port Bus driver during nonfatal recovery. | 
|  | 1551 | */ | 
|  | 1552 | static void aer_error_resume(struct pci_dev *dev) | 
|  | 1553 | { | 
|  | 1554 | pci_aer_clear_device_status(dev); | 
|  | 1555 | pci_cleanup_aer_uncorrect_error_status(dev); | 
|  | 1556 | } | 
|  | 1557 |  | 
|  | 1558 | static struct pcie_port_service_driver aerdriver = { | 
|  | 1559 | .name		= "aer", | 
|  | 1560 | .port_type	= PCI_EXP_TYPE_ROOT_PORT, | 
|  | 1561 | .service	= PCIE_PORT_SERVICE_AER, | 
|  | 1562 |  | 
|  | 1563 | .probe		= aer_probe, | 
|  | 1564 | .remove		= aer_remove, | 
|  | 1565 | .error_resume	= aer_error_resume, | 
|  | 1566 | .reset_link	= aer_root_reset, | 
|  | 1567 | }; | 
|  | 1568 |  | 
|  | 1569 | /** | 
|  | 1570 | * aer_service_init - register AER root service driver | 
|  | 1571 | * | 
|  | 1572 | * Invoked when AER root service driver is loaded. | 
|  | 1573 | */ | 
|  | 1574 | int __init pcie_aer_init(void) | 
|  | 1575 | { | 
|  | 1576 | if (!pci_aer_available() || aer_acpi_firmware_first()) | 
|  | 1577 | return -ENXIO; | 
|  | 1578 | return pcie_port_service_register(&aerdriver); | 
|  | 1579 | } |