blob: 96b3172a9d761e9143d16ecd36f6c10c458447ce [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * probe.c - PCI detection and setup code
3 */
4
5#include <linux/kernel.h>
6#include <linux/delay.h>
7#include <linux/init.h>
8#include <linux/pci.h>
9#include <linux/of_device.h>
10#include <linux/of_pci.h>
11#include <linux/pci_hotplug.h>
12#include <linux/slab.h>
13#include <linux/module.h>
14#include <linux/cpumask.h>
15#include <linux/pci-aspm.h>
16#include <linux/aer.h>
17#include <linux/acpi.h>
18#include <linux/irqdomain.h>
19#include <linux/pm_runtime.h>
20#include "pci.h"
21
22#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
23#define CARDBUS_RESERVE_BUSNR 3
24
25static struct resource busn_resource = {
26 .name = "PCI busn",
27 .start = 0,
28 .end = 255,
29 .flags = IORESOURCE_BUS,
30};
31
32/* Ugh. Need to stop exporting this to modules. */
33LIST_HEAD(pci_root_buses);
34EXPORT_SYMBOL(pci_root_buses);
35
36static LIST_HEAD(pci_domain_busn_res_list);
37
38struct pci_domain_busn_res {
39 struct list_head list;
40 struct resource res;
41 int domain_nr;
42};
43
44static struct resource *get_pci_domain_busn_res(int domain_nr)
45{
46 struct pci_domain_busn_res *r;
47
48 list_for_each_entry(r, &pci_domain_busn_res_list, list)
49 if (r->domain_nr == domain_nr)
50 return &r->res;
51
52 r = kzalloc(sizeof(*r), GFP_KERNEL);
53 if (!r)
54 return NULL;
55
56 r->domain_nr = domain_nr;
57 r->res.start = 0;
58 r->res.end = 0xff;
59 r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
60
61 list_add_tail(&r->list, &pci_domain_busn_res_list);
62
63 return &r->res;
64}
65
66static int find_anything(struct device *dev, void *data)
67{
68 return 1;
69}
70
71/*
72 * Some device drivers need know if pci is initiated.
73 * Basically, we think pci is not initiated when there
74 * is no device to be found on the pci_bus_type.
75 */
76int no_pci_devices(void)
77{
78 struct device *dev;
79 int no_devices;
80
81 dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
82 no_devices = (dev == NULL);
83 put_device(dev);
84 return no_devices;
85}
86EXPORT_SYMBOL(no_pci_devices);
87
88/*
89 * PCI Bus Class
90 */
91static void release_pcibus_dev(struct device *dev)
92{
93 struct pci_bus *pci_bus = to_pci_bus(dev);
94
95 put_device(pci_bus->bridge);
96 pci_bus_remove_resources(pci_bus);
97 pci_release_bus_of_node(pci_bus);
98 kfree(pci_bus);
99}
100
101static struct class pcibus_class = {
102 .name = "pci_bus",
103 .dev_release = &release_pcibus_dev,
104 .dev_groups = pcibus_groups,
105};
106
107static int __init pcibus_class_init(void)
108{
109 return class_register(&pcibus_class);
110}
111postcore_initcall(pcibus_class_init);
112
113static u64 pci_size(u64 base, u64 maxbase, u64 mask)
114{
115 u64 size = mask & maxbase; /* Find the significant bits */
116 if (!size)
117 return 0;
118
119 /* Get the lowest of them to find the decode size, and
120 from that the extent. */
121 size = (size & ~(size-1)) - 1;
122
123 /* base == maxbase can be valid only if the BAR has
124 already been programmed with all 1s. */
125 if (base == maxbase && ((base | size) & mask) != mask)
126 return 0;
127
128 return size;
129}
130
131static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
132{
133 u32 mem_type;
134 unsigned long flags;
135
136 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
137 flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
138 flags |= IORESOURCE_IO;
139 return flags;
140 }
141
142 flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
143 flags |= IORESOURCE_MEM;
144 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
145 flags |= IORESOURCE_PREFETCH;
146
147 mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
148 switch (mem_type) {
149 case PCI_BASE_ADDRESS_MEM_TYPE_32:
150 break;
151 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
152 /* 1M mem BAR treated as 32-bit BAR */
153 break;
154 case PCI_BASE_ADDRESS_MEM_TYPE_64:
155 flags |= IORESOURCE_MEM_64;
156 break;
157 default:
158 /* mem unknown type treated as 32-bit BAR */
159 break;
160 }
161 return flags;
162}
163
164#define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
165
166/**
167 * pci_read_base - read a PCI BAR
168 * @dev: the PCI device
169 * @type: type of the BAR
170 * @res: resource buffer to be filled in
171 * @pos: BAR position in the config space
172 *
173 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
174 */
175int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
176 struct resource *res, unsigned int pos)
177{
178 u32 l = 0, sz = 0, mask;
179 u64 l64, sz64, mask64;
180 u16 orig_cmd;
181 struct pci_bus_region region, inverted_region;
182
183 mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
184
185 /* No printks while decoding is disabled! */
186 if (!dev->mmio_always_on) {
187 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
188 if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
189 pci_write_config_word(dev, PCI_COMMAND,
190 orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
191 }
192 }
193
194 res->name = pci_name(dev);
195
196 pci_read_config_dword(dev, pos, &l);
197 pci_write_config_dword(dev, pos, l | mask);
198 pci_read_config_dword(dev, pos, &sz);
199 pci_write_config_dword(dev, pos, l);
200
201 /*
202 * All bits set in sz means the device isn't working properly.
203 * If the BAR isn't implemented, all bits must be 0. If it's a
204 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
205 * 1 must be clear.
206 */
207 if (sz == 0xffffffff)
208 sz = 0;
209
210 /*
211 * I don't know how l can have all bits set. Copied from old code.
212 * Maybe it fixes a bug on some ancient platform.
213 */
214 if (l == 0xffffffff)
215 l = 0;
216
217 if (type == pci_bar_unknown) {
218 res->flags = decode_bar(dev, l);
219 res->flags |= IORESOURCE_SIZEALIGN;
220 if (res->flags & IORESOURCE_IO) {
221 l64 = l & PCI_BASE_ADDRESS_IO_MASK;
222 sz64 = sz & PCI_BASE_ADDRESS_IO_MASK;
223 mask64 = PCI_BASE_ADDRESS_IO_MASK & (u32)IO_SPACE_LIMIT;
224 } else {
225 l64 = l & PCI_BASE_ADDRESS_MEM_MASK;
226 sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK;
227 mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
228 }
229 } else {
230 if (l & PCI_ROM_ADDRESS_ENABLE)
231 res->flags |= IORESOURCE_ROM_ENABLE;
232 l64 = l & PCI_ROM_ADDRESS_MASK;
233 sz64 = sz & PCI_ROM_ADDRESS_MASK;
234 mask64 = PCI_ROM_ADDRESS_MASK;
235 }
236
237 if (res->flags & IORESOURCE_MEM_64) {
238 pci_read_config_dword(dev, pos + 4, &l);
239 pci_write_config_dword(dev, pos + 4, ~0);
240 pci_read_config_dword(dev, pos + 4, &sz);
241 pci_write_config_dword(dev, pos + 4, l);
242
243 l64 |= ((u64)l << 32);
244 sz64 |= ((u64)sz << 32);
245 mask64 |= ((u64)~0 << 32);
246 }
247
248 if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
249 pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
250
251 if (!sz64)
252 goto fail;
253
254 sz64 = pci_size(l64, sz64, mask64);
255 if (!sz64) {
256 dev_info(&dev->dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n",
257 pos);
258 goto fail;
259 }
260
261 if (res->flags & IORESOURCE_MEM_64) {
262 if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8)
263 && sz64 > 0x100000000ULL) {
264 res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
265 res->start = 0;
266 res->end = 0;
267 dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
268 pos, (unsigned long long)sz64);
269 goto out;
270 }
271
272 if ((sizeof(pci_bus_addr_t) < 8) && l) {
273 /* Above 32-bit boundary; try to reallocate */
274 res->flags |= IORESOURCE_UNSET;
275 res->start = 0;
276 res->end = sz64;
277 dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
278 pos, (unsigned long long)l64);
279 goto out;
280 }
281 }
282
283 region.start = l64;
284 region.end = l64 + sz64;
285
286 pcibios_bus_to_resource(dev->bus, res, &region);
287 pcibios_resource_to_bus(dev->bus, &inverted_region, res);
288
289 /*
290 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
291 * the corresponding resource address (the physical address used by
292 * the CPU. Converting that resource address back to a bus address
293 * should yield the original BAR value:
294 *
295 * resource_to_bus(bus_to_resource(A)) == A
296 *
297 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
298 * be claimed by the device.
299 */
300 if (inverted_region.start != region.start) {
301 res->flags |= IORESOURCE_UNSET;
302 res->start = 0;
303 res->end = region.end - region.start;
304 dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
305 pos, (unsigned long long)region.start);
306 }
307
308 goto out;
309
310
311fail:
312 res->flags = 0;
313out:
314 if (res->flags)
315 dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
316
317 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
318}
319
320static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
321{
322 unsigned int pos, reg;
323
324 if (dev->non_compliant_bars)
325 return;
326
327 for (pos = 0; pos < howmany; pos++) {
328 struct resource *res = &dev->resource[pos];
329 reg = PCI_BASE_ADDRESS_0 + (pos << 2);
330 pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
331 }
332
333 if (rom) {
334 struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
335 dev->rom_base_reg = rom;
336 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
337 IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
338 __pci_read_base(dev, pci_bar_mem32, res, rom);
339 }
340}
341
342static void pci_read_bridge_io(struct pci_bus *child)
343{
344 struct pci_dev *dev = child->self;
345 u8 io_base_lo, io_limit_lo;
346 unsigned long io_mask, io_granularity, base, limit;
347 struct pci_bus_region region;
348 struct resource *res;
349
350 io_mask = PCI_IO_RANGE_MASK;
351 io_granularity = 0x1000;
352 if (dev->io_window_1k) {
353 /* Support 1K I/O space granularity */
354 io_mask = PCI_IO_1K_RANGE_MASK;
355 io_granularity = 0x400;
356 }
357
358 res = child->resource[0];
359 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
360 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
361 base = (io_base_lo & io_mask) << 8;
362 limit = (io_limit_lo & io_mask) << 8;
363
364 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
365 u16 io_base_hi, io_limit_hi;
366
367 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
368 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
369 base |= ((unsigned long) io_base_hi << 16);
370 limit |= ((unsigned long) io_limit_hi << 16);
371 }
372
373 if (base <= limit) {
374 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
375 region.start = base;
376 region.end = limit + io_granularity - 1;
377 pcibios_bus_to_resource(dev->bus, res, &region);
378 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
379 }
380}
381
382static void pci_read_bridge_mmio(struct pci_bus *child)
383{
384 struct pci_dev *dev = child->self;
385 u16 mem_base_lo, mem_limit_lo;
386 unsigned long base, limit;
387 struct pci_bus_region region;
388 struct resource *res;
389
390 res = child->resource[1];
391 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
392 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
393 base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
394 limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
395 if (base <= limit) {
396 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
397 region.start = base;
398 region.end = limit + 0xfffff;
399 pcibios_bus_to_resource(dev->bus, res, &region);
400 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
401 }
402}
403
404static void pci_read_bridge_mmio_pref(struct pci_bus *child)
405{
406 struct pci_dev *dev = child->self;
407 u16 mem_base_lo, mem_limit_lo;
408 u64 base64, limit64;
409 pci_bus_addr_t base, limit;
410 struct pci_bus_region region;
411 struct resource *res;
412
413 res = child->resource[2];
414 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
415 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
416 base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
417 limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
418
419 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
420 u32 mem_base_hi, mem_limit_hi;
421
422 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
423 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
424
425 /*
426 * Some bridges set the base > limit by default, and some
427 * (broken) BIOSes do not initialize them. If we find
428 * this, just assume they are not being used.
429 */
430 if (mem_base_hi <= mem_limit_hi) {
431 base64 |= (u64) mem_base_hi << 32;
432 limit64 |= (u64) mem_limit_hi << 32;
433 }
434 }
435
436 base = (pci_bus_addr_t) base64;
437 limit = (pci_bus_addr_t) limit64;
438
439 if (base != base64) {
440 dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
441 (unsigned long long) base64);
442 return;
443 }
444
445 if (base <= limit) {
446 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
447 IORESOURCE_MEM | IORESOURCE_PREFETCH;
448 if (res->flags & PCI_PREF_RANGE_TYPE_64)
449 res->flags |= IORESOURCE_MEM_64;
450 region.start = base;
451 region.end = limit + 0xfffff;
452 pcibios_bus_to_resource(dev->bus, res, &region);
453 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
454 }
455}
456
457void pci_read_bridge_bases(struct pci_bus *child)
458{
459 struct pci_dev *dev = child->self;
460 struct resource *res;
461 int i;
462
463 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
464 return;
465
466 dev_info(&dev->dev, "PCI bridge to %pR%s\n",
467 &child->busn_res,
468 dev->transparent ? " (subtractive decode)" : "");
469
470 pci_bus_remove_resources(child);
471 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
472 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
473
474 pci_read_bridge_io(child);
475 pci_read_bridge_mmio(child);
476 pci_read_bridge_mmio_pref(child);
477
478 if (dev->transparent) {
479 pci_bus_for_each_resource(child->parent, res, i) {
480 if (res && res->flags) {
481 pci_bus_add_resource(child, res,
482 PCI_SUBTRACTIVE_DECODE);
483 dev_printk(KERN_DEBUG, &dev->dev,
484 " bridge window %pR (subtractive decode)\n",
485 res);
486 }
487 }
488 }
489}
490
491static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
492{
493 struct pci_bus *b;
494
495 b = kzalloc(sizeof(*b), GFP_KERNEL);
496 if (!b)
497 return NULL;
498
499 INIT_LIST_HEAD(&b->node);
500 INIT_LIST_HEAD(&b->children);
501 INIT_LIST_HEAD(&b->devices);
502 INIT_LIST_HEAD(&b->slots);
503 INIT_LIST_HEAD(&b->resources);
504 b->max_bus_speed = PCI_SPEED_UNKNOWN;
505 b->cur_bus_speed = PCI_SPEED_UNKNOWN;
506#ifdef CONFIG_PCI_DOMAINS_GENERIC
507 if (parent)
508 b->domain_nr = parent->domain_nr;
509#endif
510 return b;
511}
512
513static void devm_pci_release_host_bridge_dev(struct device *dev)
514{
515 struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
516
517 if (bridge->release_fn)
518 bridge->release_fn(bridge);
519
520 pci_free_resource_list(&bridge->windows);
521}
522
523static void pci_release_host_bridge_dev(struct device *dev)
524{
525 devm_pci_release_host_bridge_dev(dev);
526 kfree(to_pci_host_bridge(dev));
527}
528
529struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
530{
531 struct pci_host_bridge *bridge;
532
533 bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
534 if (!bridge)
535 return NULL;
536
537 INIT_LIST_HEAD(&bridge->windows);
538 bridge->dev.release = pci_release_host_bridge_dev;
539
540 return bridge;
541}
542EXPORT_SYMBOL(pci_alloc_host_bridge);
543
544struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
545 size_t priv)
546{
547 struct pci_host_bridge *bridge;
548
549 bridge = devm_kzalloc(dev, sizeof(*bridge) + priv, GFP_KERNEL);
550 if (!bridge)
551 return NULL;
552
553 INIT_LIST_HEAD(&bridge->windows);
554 bridge->dev.release = devm_pci_release_host_bridge_dev;
555
556 return bridge;
557}
558EXPORT_SYMBOL(devm_pci_alloc_host_bridge);
559
560void pci_free_host_bridge(struct pci_host_bridge *bridge)
561{
562 pci_free_resource_list(&bridge->windows);
563
564 kfree(bridge);
565}
566EXPORT_SYMBOL(pci_free_host_bridge);
567
568static const unsigned char pcix_bus_speed[] = {
569 PCI_SPEED_UNKNOWN, /* 0 */
570 PCI_SPEED_66MHz_PCIX, /* 1 */
571 PCI_SPEED_100MHz_PCIX, /* 2 */
572 PCI_SPEED_133MHz_PCIX, /* 3 */
573 PCI_SPEED_UNKNOWN, /* 4 */
574 PCI_SPEED_66MHz_PCIX_ECC, /* 5 */
575 PCI_SPEED_100MHz_PCIX_ECC, /* 6 */
576 PCI_SPEED_133MHz_PCIX_ECC, /* 7 */
577 PCI_SPEED_UNKNOWN, /* 8 */
578 PCI_SPEED_66MHz_PCIX_266, /* 9 */
579 PCI_SPEED_100MHz_PCIX_266, /* A */
580 PCI_SPEED_133MHz_PCIX_266, /* B */
581 PCI_SPEED_UNKNOWN, /* C */
582 PCI_SPEED_66MHz_PCIX_533, /* D */
583 PCI_SPEED_100MHz_PCIX_533, /* E */
584 PCI_SPEED_133MHz_PCIX_533 /* F */
585};
586
587const unsigned char pcie_link_speed[] = {
588 PCI_SPEED_UNKNOWN, /* 0 */
589 PCIE_SPEED_2_5GT, /* 1 */
590 PCIE_SPEED_5_0GT, /* 2 */
591 PCIE_SPEED_8_0GT, /* 3 */
592 PCI_SPEED_UNKNOWN, /* 4 */
593 PCI_SPEED_UNKNOWN, /* 5 */
594 PCI_SPEED_UNKNOWN, /* 6 */
595 PCI_SPEED_UNKNOWN, /* 7 */
596 PCI_SPEED_UNKNOWN, /* 8 */
597 PCI_SPEED_UNKNOWN, /* 9 */
598 PCI_SPEED_UNKNOWN, /* A */
599 PCI_SPEED_UNKNOWN, /* B */
600 PCI_SPEED_UNKNOWN, /* C */
601 PCI_SPEED_UNKNOWN, /* D */
602 PCI_SPEED_UNKNOWN, /* E */
603 PCI_SPEED_UNKNOWN /* F */
604};
605
606void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
607{
608 bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
609}
610EXPORT_SYMBOL_GPL(pcie_update_link_speed);
611
612static unsigned char agp_speeds[] = {
613 AGP_UNKNOWN,
614 AGP_1X,
615 AGP_2X,
616 AGP_4X,
617 AGP_8X
618};
619
620static enum pci_bus_speed agp_speed(int agp3, int agpstat)
621{
622 int index = 0;
623
624 if (agpstat & 4)
625 index = 3;
626 else if (agpstat & 2)
627 index = 2;
628 else if (agpstat & 1)
629 index = 1;
630 else
631 goto out;
632
633 if (agp3) {
634 index += 2;
635 if (index == 5)
636 index = 0;
637 }
638
639 out:
640 return agp_speeds[index];
641}
642
643static void pci_set_bus_speed(struct pci_bus *bus)
644{
645 struct pci_dev *bridge = bus->self;
646 int pos;
647
648 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
649 if (!pos)
650 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
651 if (pos) {
652 u32 agpstat, agpcmd;
653
654 pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
655 bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
656
657 pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
658 bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
659 }
660
661 pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
662 if (pos) {
663 u16 status;
664 enum pci_bus_speed max;
665
666 pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
667 &status);
668
669 if (status & PCI_X_SSTATUS_533MHZ) {
670 max = PCI_SPEED_133MHz_PCIX_533;
671 } else if (status & PCI_X_SSTATUS_266MHZ) {
672 max = PCI_SPEED_133MHz_PCIX_266;
673 } else if (status & PCI_X_SSTATUS_133MHZ) {
674 if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2)
675 max = PCI_SPEED_133MHz_PCIX_ECC;
676 else
677 max = PCI_SPEED_133MHz_PCIX;
678 } else {
679 max = PCI_SPEED_66MHz_PCIX;
680 }
681
682 bus->max_bus_speed = max;
683 bus->cur_bus_speed = pcix_bus_speed[
684 (status & PCI_X_SSTATUS_FREQ) >> 6];
685
686 return;
687 }
688
689 if (pci_is_pcie(bridge)) {
690 u32 linkcap;
691 u16 linksta;
692
693 pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
694 bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
695
696 pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
697 pcie_update_link_speed(bus, linksta);
698 }
699}
700
701static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
702{
703 struct irq_domain *d;
704
705 /*
706 * Any firmware interface that can resolve the msi_domain
707 * should be called from here.
708 */
709 d = pci_host_bridge_of_msi_domain(bus);
710 if (!d)
711 d = pci_host_bridge_acpi_msi_domain(bus);
712
713#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
714 /*
715 * If no IRQ domain was found via the OF tree, try looking it up
716 * directly through the fwnode_handle.
717 */
718 if (!d) {
719 struct fwnode_handle *fwnode = pci_root_bus_fwnode(bus);
720
721 if (fwnode)
722 d = irq_find_matching_fwnode(fwnode,
723 DOMAIN_BUS_PCI_MSI);
724 }
725#endif
726
727 return d;
728}
729
730static void pci_set_bus_msi_domain(struct pci_bus *bus)
731{
732 struct irq_domain *d;
733 struct pci_bus *b;
734
735 /*
736 * The bus can be a root bus, a subordinate bus, or a virtual bus
737 * created by an SR-IOV device. Walk up to the first bridge device
738 * found or derive the domain from the host bridge.
739 */
740 for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) {
741 if (b->self)
742 d = dev_get_msi_domain(&b->self->dev);
743 }
744
745 if (!d)
746 d = pci_host_bridge_msi_domain(b);
747
748 dev_set_msi_domain(&bus->dev, d);
749}
750
751static int pci_register_host_bridge(struct pci_host_bridge *bridge)
752{
753 struct device *parent = bridge->dev.parent;
754 struct resource_entry *window, *n;
755 struct pci_bus *bus, *b;
756 resource_size_t offset;
757 LIST_HEAD(resources);
758 struct resource *res;
759 char addr[64], *fmt;
760 const char *name;
761 int err;
762
763 bus = pci_alloc_bus(NULL);
764 if (!bus)
765 return -ENOMEM;
766
767 bridge->bus = bus;
768
769 /* temporarily move resources off the list */
770 list_splice_init(&bridge->windows, &resources);
771 bus->sysdata = bridge->sysdata;
772 bus->msi = bridge->msi;
773 bus->ops = bridge->ops;
774 bus->number = bus->busn_res.start = bridge->busnr;
775#ifdef CONFIG_PCI_DOMAINS_GENERIC
776 bus->domain_nr = pci_bus_find_domain_nr(bus, parent);
777#endif
778
779 b = pci_find_bus(pci_domain_nr(bus), bridge->busnr);
780 if (b) {
781 /* If we already got to this bus through a different bridge, ignore it */
782 dev_dbg(&b->dev, "bus already known\n");
783 err = -EEXIST;
784 goto free;
785 }
786
787 dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(bus),
788 bridge->busnr);
789
790 err = pcibios_root_bridge_prepare(bridge);
791 if (err)
792 goto free;
793
794 err = device_register(&bridge->dev);
795 if (err) {
796 put_device(&bridge->dev);
797 goto free;
798 }
799 bus->bridge = get_device(&bridge->dev);
800 device_enable_async_suspend(bus->bridge);
801 pci_set_bus_of_node(bus);
802 pci_set_bus_msi_domain(bus);
803
804 if (!parent)
805 set_dev_node(bus->bridge, pcibus_to_node(bus));
806
807 bus->dev.class = &pcibus_class;
808 bus->dev.parent = bus->bridge;
809
810 dev_set_name(&bus->dev, "%04x:%02x", pci_domain_nr(bus), bus->number);
811 name = dev_name(&bus->dev);
812
813 err = device_register(&bus->dev);
814 if (err)
815 goto unregister;
816
817 pcibios_add_bus(bus);
818
819 /* Create legacy_io and legacy_mem files for this bus */
820 pci_create_legacy_files(bus);
821
822 if (parent)
823 dev_info(parent, "PCI host bridge to bus %s\n", name);
824 else
825 pr_info("PCI host bridge to bus %s\n", name);
826
827 /* Add initial resources to the bus */
828 resource_list_for_each_entry_safe(window, n, &resources) {
829 list_move_tail(&window->node, &bridge->windows);
830 offset = window->offset;
831 res = window->res;
832
833 if (res->flags & IORESOURCE_BUS)
834 pci_bus_insert_busn_res(bus, bus->number, res->end);
835 else
836 pci_bus_add_resource(bus, res, 0);
837
838 if (offset) {
839 if (resource_type(res) == IORESOURCE_IO)
840 fmt = " (bus address [%#06llx-%#06llx])";
841 else
842 fmt = " (bus address [%#010llx-%#010llx])";
843
844 snprintf(addr, sizeof(addr), fmt,
845 (unsigned long long)(res->start - offset),
846 (unsigned long long)(res->end - offset));
847 } else
848 addr[0] = '\0';
849
850 dev_info(&bus->dev, "root bus resource %pR%s\n", res, addr);
851 }
852
853 down_write(&pci_bus_sem);
854 list_add_tail(&bus->node, &pci_root_buses);
855 up_write(&pci_bus_sem);
856
857 return 0;
858
859unregister:
860 put_device(&bridge->dev);
861 device_unregister(&bridge->dev);
862
863free:
864 kfree(bus);
865 return err;
866}
867
868static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
869 struct pci_dev *bridge, int busnr)
870{
871 struct pci_bus *child;
872 int i;
873 int ret;
874
875 /*
876 * Allocate a new bus, and inherit stuff from the parent..
877 */
878 child = pci_alloc_bus(parent);
879 if (!child)
880 return NULL;
881
882 child->parent = parent;
883 child->ops = parent->ops;
884 child->msi = parent->msi;
885 child->sysdata = parent->sysdata;
886 child->bus_flags = parent->bus_flags;
887
888 /* initialize some portions of the bus device, but don't register it
889 * now as the parent is not properly set up yet.
890 */
891 child->dev.class = &pcibus_class;
892 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
893
894 /*
895 * Set up the primary, secondary and subordinate
896 * bus numbers.
897 */
898 child->number = child->busn_res.start = busnr;
899 child->primary = parent->busn_res.start;
900 child->busn_res.end = 0xff;
901
902 if (!bridge) {
903 child->dev.parent = parent->bridge;
904 goto add_dev;
905 }
906
907 child->self = bridge;
908 child->bridge = get_device(&bridge->dev);
909 child->dev.parent = child->bridge;
910 pci_set_bus_of_node(child);
911 pci_set_bus_speed(child);
912
913 /* Set up default resource pointers and names.. */
914 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
915 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
916 child->resource[i]->name = child->name;
917 }
918 bridge->subordinate = child;
919
920add_dev:
921 pci_set_bus_msi_domain(child);
922 ret = device_register(&child->dev);
923 WARN_ON(ret < 0);
924
925 pcibios_add_bus(child);
926
927 if (child->ops->add_bus) {
928 ret = child->ops->add_bus(child);
929 if (WARN_ON(ret < 0))
930 dev_err(&child->dev, "failed to add bus: %d\n", ret);
931 }
932
933 /* Create legacy_io and legacy_mem files for this bus */
934 pci_create_legacy_files(child);
935
936 return child;
937}
938
939struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
940 int busnr)
941{
942 struct pci_bus *child;
943
944 child = pci_alloc_child_bus(parent, dev, busnr);
945 if (child) {
946 down_write(&pci_bus_sem);
947 list_add_tail(&child->node, &parent->children);
948 up_write(&pci_bus_sem);
949 }
950 return child;
951}
952EXPORT_SYMBOL(pci_add_new_bus);
953
954static void pci_enable_crs(struct pci_dev *pdev)
955{
956 u16 root_cap = 0;
957
958 /* Enable CRS Software Visibility if supported */
959 pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
960 if (root_cap & PCI_EXP_RTCAP_CRSVIS)
961 pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
962 PCI_EXP_RTCTL_CRSSVE);
963}
964
965/*
966 * If it's a bridge, configure it and scan the bus behind it.
967 * For CardBus bridges, we don't scan behind as the devices will
968 * be handled by the bridge driver itself.
969 *
970 * We need to process bridges in two passes -- first we scan those
971 * already configured by the BIOS and after we are done with all of
972 * them, we proceed to assigning numbers to the remaining buses in
973 * order to avoid overlaps between old and new bus numbers.
974 */
975int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
976{
977 struct pci_bus *child;
978 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
979 u32 buses, i, j = 0;
980 u16 bctl;
981 u8 primary, secondary, subordinate;
982 int broken = 0;
983
984 /*
985 * Make sure the bridge is powered on to be able to access config
986 * space of devices below it.
987 */
988 pm_runtime_get_sync(&dev->dev);
989
990 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
991 primary = buses & 0xFF;
992 secondary = (buses >> 8) & 0xFF;
993 subordinate = (buses >> 16) & 0xFF;
994
995 dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
996 secondary, subordinate, pass);
997
998 if (!primary && (primary != bus->number) && secondary && subordinate) {
999 dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
1000 primary = bus->number;
1001 }
1002
1003 /* Check if setup is sensible at all */
1004 if (!pass &&
1005 (primary != bus->number || secondary <= bus->number ||
1006 secondary > subordinate)) {
1007 dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
1008 secondary, subordinate);
1009 broken = 1;
1010 }
1011
1012 /* Disable MasterAbortMode during probing to avoid reporting
1013 of bus errors (in some architectures) */
1014 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
1015 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
1016 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
1017
1018 pci_enable_crs(dev);
1019
1020 if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
1021 !is_cardbus && !broken) {
1022 unsigned int cmax;
1023 /*
1024 * Bus already configured by firmware, process it in the first
1025 * pass and just note the configuration.
1026 */
1027 if (pass)
1028 goto out;
1029
1030 /*
1031 * The bus might already exist for two reasons: Either we are
1032 * rescanning the bus or the bus is reachable through more than
1033 * one bridge. The second case can happen with the i450NX
1034 * chipset.
1035 */
1036 child = pci_find_bus(pci_domain_nr(bus), secondary);
1037 if (!child) {
1038 child = pci_add_new_bus(bus, dev, secondary);
1039 if (!child)
1040 goto out;
1041 child->primary = primary;
1042 pci_bus_insert_busn_res(child, secondary, subordinate);
1043 child->bridge_ctl = bctl;
1044 }
1045
1046 cmax = pci_scan_child_bus(child);
1047 if (cmax > subordinate)
1048 dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
1049 subordinate, cmax);
1050 /* subordinate should equal child->busn_res.end */
1051 if (subordinate > max)
1052 max = subordinate;
1053 } else {
1054 /*
1055 * We need to assign a number to this bus which we always
1056 * do in the second pass.
1057 */
1058 if (!pass) {
1059 if (pcibios_assign_all_busses() || broken || is_cardbus)
1060 /* Temporarily disable forwarding of the
1061 configuration cycles on all bridges in
1062 this bus segment to avoid possible
1063 conflicts in the second pass between two
1064 bridges programmed with overlapping
1065 bus ranges. */
1066 pci_write_config_dword(dev, PCI_PRIMARY_BUS,
1067 buses & ~0xffffff);
1068 goto out;
1069 }
1070
1071 /* Clear errors */
1072 pci_write_config_word(dev, PCI_STATUS, 0xffff);
1073
1074 /* Prevent assigning a bus number that already exists.
1075 * This can happen when a bridge is hot-plugged, so in
1076 * this case we only re-scan this bus. */
1077 child = pci_find_bus(pci_domain_nr(bus), max+1);
1078 if (!child) {
1079 child = pci_add_new_bus(bus, dev, max+1);
1080 if (!child)
1081 goto out;
1082 pci_bus_insert_busn_res(child, max+1,
1083 bus->busn_res.end);
1084 }
1085 max++;
1086 buses = (buses & 0xff000000)
1087 | ((unsigned int)(child->primary) << 0)
1088 | ((unsigned int)(child->busn_res.start) << 8)
1089 | ((unsigned int)(child->busn_res.end) << 16);
1090
1091 /*
1092 * yenta.c forces a secondary latency timer of 176.
1093 * Copy that behaviour here.
1094 */
1095 if (is_cardbus) {
1096 buses &= ~0xff000000;
1097 buses |= CARDBUS_LATENCY_TIMER << 24;
1098 }
1099
1100 /*
1101 * We need to blast all three values with a single write.
1102 */
1103 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
1104
1105 if (!is_cardbus) {
1106 child->bridge_ctl = bctl;
1107 max = pci_scan_child_bus(child);
1108 } else {
1109 /*
1110 * For CardBus bridges, we leave 4 bus numbers
1111 * as cards with a PCI-to-PCI bridge can be
1112 * inserted later.
1113 */
1114 for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) {
1115 struct pci_bus *parent = bus;
1116 if (pci_find_bus(pci_domain_nr(bus),
1117 max+i+1))
1118 break;
1119 while (parent->parent) {
1120 if ((!pcibios_assign_all_busses()) &&
1121 (parent->busn_res.end > max) &&
1122 (parent->busn_res.end <= max+i)) {
1123 j = 1;
1124 }
1125 parent = parent->parent;
1126 }
1127 if (j) {
1128 /*
1129 * Often, there are two cardbus bridges
1130 * -- try to leave one valid bus number
1131 * for each one.
1132 */
1133 i /= 2;
1134 break;
1135 }
1136 }
1137 max += i;
1138 }
1139 /*
1140 * Set the subordinate bus number to its real value.
1141 */
1142 pci_bus_update_busn_res_end(child, max);
1143 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
1144 }
1145
1146 sprintf(child->name,
1147 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
1148 pci_domain_nr(bus), child->number);
1149
1150 /* Has only triggered on CardBus, fixup is in yenta_socket */
1151 while (bus->parent) {
1152 if ((child->busn_res.end > bus->busn_res.end) ||
1153 (child->number > bus->busn_res.end) ||
1154 (child->number < bus->number) ||
1155 (child->busn_res.end < bus->number)) {
1156 dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n",
1157 &child->busn_res,
1158 (bus->number > child->busn_res.end &&
1159 bus->busn_res.end < child->number) ?
1160 "wholly" : "partially",
1161 bus->self->transparent ? " transparent" : "",
1162 dev_name(&bus->dev),
1163 &bus->busn_res);
1164 }
1165 bus = bus->parent;
1166 }
1167
1168out:
1169 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
1170
1171 pm_runtime_put(&dev->dev);
1172
1173 return max;
1174}
1175EXPORT_SYMBOL(pci_scan_bridge);
1176
1177/*
1178 * Read interrupt line and base address registers.
1179 * The architecture-dependent code can tweak these, of course.
1180 */
1181static void pci_read_irq(struct pci_dev *dev)
1182{
1183 unsigned char irq;
1184
1185 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
1186 dev->pin = irq;
1187 if (irq)
1188 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1189 dev->irq = irq;
1190}
1191
1192void set_pcie_port_type(struct pci_dev *pdev)
1193{
1194 int pos;
1195 u16 reg16;
1196 int type;
1197 struct pci_dev *parent;
1198
1199 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1200 if (!pos)
1201 return;
1202
1203 pdev->pcie_cap = pos;
1204 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
1205 pdev->pcie_flags_reg = reg16;
1206 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
1207 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
1208
1209 /*
1210 * A Root Port or a PCI-to-PCIe bridge is always the upstream end
1211 * of a Link. No PCIe component has two Links. Two Links are
1212 * connected by a Switch that has a Port on each Link and internal
1213 * logic to connect the two Ports.
1214 */
1215 type = pci_pcie_type(pdev);
1216 if (type == PCI_EXP_TYPE_ROOT_PORT ||
1217 type == PCI_EXP_TYPE_PCIE_BRIDGE)
1218 pdev->has_secondary_link = 1;
1219 else if (type == PCI_EXP_TYPE_UPSTREAM ||
1220 type == PCI_EXP_TYPE_DOWNSTREAM) {
1221 parent = pci_upstream_bridge(pdev);
1222
1223 /*
1224 * Usually there's an upstream device (Root Port or Switch
1225 * Downstream Port), but we can't assume one exists.
1226 */
1227 if (parent && !parent->has_secondary_link)
1228 pdev->has_secondary_link = 1;
1229 }
1230}
1231
1232void set_pcie_hotplug_bridge(struct pci_dev *pdev)
1233{
1234 u32 reg32;
1235
1236 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
1237 if (reg32 & PCI_EXP_SLTCAP_HPC)
1238 pdev->is_hotplug_bridge = 1;
1239}
1240
1241static void set_pcie_thunderbolt(struct pci_dev *dev)
1242{
1243 int vsec = 0;
1244 u32 header;
1245
1246 while ((vsec = pci_find_next_ext_capability(dev, vsec,
1247 PCI_EXT_CAP_ID_VNDR))) {
1248 pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
1249
1250 /* Is the device part of a Thunderbolt controller? */
1251 if (dev->vendor == PCI_VENDOR_ID_INTEL &&
1252 PCI_VNDR_HEADER_ID(header) == PCI_VSEC_ID_INTEL_TBT) {
1253 dev->is_thunderbolt = 1;
1254 return;
1255 }
1256 }
1257}
1258
1259/**
1260 * pci_ext_cfg_is_aliased - is ext config space just an alias of std config?
1261 * @dev: PCI device
1262 *
1263 * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
1264 * when forwarding a type1 configuration request the bridge must check that
1265 * the extended register address field is zero. The bridge is not permitted
1266 * to forward the transactions and must handle it as an Unsupported Request.
1267 * Some bridges do not follow this rule and simply drop the extended register
1268 * bits, resulting in the standard config space being aliased, every 256
1269 * bytes across the entire configuration space. Test for this condition by
1270 * comparing the first dword of each potential alias to the vendor/device ID.
1271 * Known offenders:
1272 * ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
1273 * AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
1274 */
1275static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
1276{
1277#ifdef CONFIG_PCI_QUIRKS
1278 int pos;
1279 u32 header, tmp;
1280
1281 pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
1282
1283 for (pos = PCI_CFG_SPACE_SIZE;
1284 pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
1285 if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
1286 || header != tmp)
1287 return false;
1288 }
1289
1290 return true;
1291#else
1292 return false;
1293#endif
1294}
1295
1296/**
1297 * pci_cfg_space_size - get the configuration space size of the PCI device.
1298 * @dev: PCI device
1299 *
1300 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1301 * have 4096 bytes. Even if the device is capable, that doesn't mean we can
1302 * access it. Maybe we don't have a way to generate extended config space
1303 * accesses, or the device is behind a reverse Express bridge. So we try
1304 * reading the dword at 0x100 which must either be 0 or a valid extended
1305 * capability header.
1306 */
1307static int pci_cfg_space_size_ext(struct pci_dev *dev)
1308{
1309 u32 status;
1310 int pos = PCI_CFG_SPACE_SIZE;
1311
1312 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1313 return PCI_CFG_SPACE_SIZE;
1314 if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev))
1315 return PCI_CFG_SPACE_SIZE;
1316
1317 return PCI_CFG_SPACE_EXP_SIZE;
1318}
1319
1320int pci_cfg_space_size(struct pci_dev *dev)
1321{
1322 int pos;
1323 u32 status;
1324 u16 class;
1325
1326 class = dev->class >> 8;
1327 if (class == PCI_CLASS_BRIDGE_HOST)
1328 return pci_cfg_space_size_ext(dev);
1329
1330 if (pci_is_pcie(dev))
1331 return pci_cfg_space_size_ext(dev);
1332
1333 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1334 if (!pos)
1335 return PCI_CFG_SPACE_SIZE;
1336
1337 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1338 if (status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))
1339 return pci_cfg_space_size_ext(dev);
1340
1341 return PCI_CFG_SPACE_SIZE;
1342}
1343
1344#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
1345
1346static void pci_msi_setup_pci_dev(struct pci_dev *dev)
1347{
1348 /*
1349 * Disable the MSI hardware to avoid screaming interrupts
1350 * during boot. This is the power on reset default so
1351 * usually this should be a noop.
1352 */
1353 dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
1354 if (dev->msi_cap)
1355 pci_msi_set_enable(dev, 0);
1356
1357 dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1358 if (dev->msix_cap)
1359 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
1360}
1361
1362/**
1363 * pci_intx_mask_broken - test PCI_COMMAND_INTX_DISABLE writability
1364 * @dev: PCI device
1365 *
1366 * Test whether PCI_COMMAND_INTX_DISABLE is writable for @dev. Check this
1367 * at enumeration-time to avoid modifying PCI_COMMAND at run-time.
1368 */
1369static int pci_intx_mask_broken(struct pci_dev *dev)
1370{
1371 u16 orig, toggle, new;
1372
1373 pci_read_config_word(dev, PCI_COMMAND, &orig);
1374 toggle = orig ^ PCI_COMMAND_INTX_DISABLE;
1375 pci_write_config_word(dev, PCI_COMMAND, toggle);
1376 pci_read_config_word(dev, PCI_COMMAND, &new);
1377
1378 pci_write_config_word(dev, PCI_COMMAND, orig);
1379
1380 /*
1381 * PCI_COMMAND_INTX_DISABLE was reserved and read-only prior to PCI
1382 * r2.3, so strictly speaking, a device is not *broken* if it's not
1383 * writable. But we'll live with the misnomer for now.
1384 */
1385 if (new != toggle)
1386 return 1;
1387 return 0;
1388}
1389
1390/**
1391 * pci_setup_device - fill in class and map information of a device
1392 * @dev: the device structure to fill
1393 *
1394 * Initialize the device structure with information about the device's
1395 * vendor,class,memory and IO-space addresses,IRQ lines etc.
1396 * Called at initialisation of the PCI subsystem and by CardBus services.
1397 * Returns 0 on success and negative if unknown type of device (not normal,
1398 * bridge or CardBus).
1399 */
1400int pci_setup_device(struct pci_dev *dev)
1401{
1402 u32 class;
1403 u16 cmd;
1404 u8 hdr_type;
1405 int pos = 0;
1406 struct pci_bus_region region;
1407 struct resource *res;
1408
1409 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1410 return -EIO;
1411
1412 dev->sysdata = dev->bus->sysdata;
1413 dev->dev.parent = dev->bus->bridge;
1414 dev->dev.bus = &pci_bus_type;
1415 dev->hdr_type = hdr_type & 0x7f;
1416 dev->multifunction = !!(hdr_type & 0x80);
1417 dev->error_state = pci_channel_io_normal;
1418 set_pcie_port_type(dev);
1419
1420 pci_dev_assign_slot(dev);
1421 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1422 set this higher, assuming the system even supports it. */
1423 dev->dma_mask = 0xffffffff;
1424
1425 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1426 dev->bus->number, PCI_SLOT(dev->devfn),
1427 PCI_FUNC(dev->devfn));
1428
1429 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1430 dev->revision = class & 0xff;
1431 dev->class = class >> 8; /* upper 3 bytes */
1432
1433 dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1434 dev->vendor, dev->device, dev->hdr_type, dev->class);
1435
1436 /* need to have dev->class ready */
1437 dev->cfg_size = pci_cfg_space_size(dev);
1438
1439 /* need to have dev->cfg_size ready */
1440 set_pcie_thunderbolt(dev);
1441
1442 /* "Unknown power state" */
1443 dev->current_state = PCI_UNKNOWN;
1444
1445 /* Early fixups, before probing the BARs */
1446 pci_fixup_device(pci_fixup_early, dev);
1447 /* device class may be changed after fixup */
1448 class = dev->class >> 8;
1449
1450 if (dev->non_compliant_bars && !dev->mmio_always_on) {
1451 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1452 if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
1453 dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
1454 cmd &= ~PCI_COMMAND_IO;
1455 cmd &= ~PCI_COMMAND_MEMORY;
1456 pci_write_config_word(dev, PCI_COMMAND, cmd);
1457 }
1458 }
1459
1460 dev->broken_intx_masking = pci_intx_mask_broken(dev);
1461
1462 switch (dev->hdr_type) { /* header type */
1463 case PCI_HEADER_TYPE_NORMAL: /* standard header */
1464 if (class == PCI_CLASS_BRIDGE_PCI)
1465 goto bad;
1466 pci_read_irq(dev);
1467 pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1468 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1469 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1470
1471 /*
1472 * Do the ugly legacy mode stuff here rather than broken chip
1473 * quirk code. Legacy mode ATA controllers have fixed
1474 * addresses. These are not always echoed in BAR0-3, and
1475 * BAR0-3 in a few cases contain junk!
1476 */
1477 if (class == PCI_CLASS_STORAGE_IDE) {
1478 u8 progif;
1479 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1480 if ((progif & 1) == 0) {
1481 region.start = 0x1F0;
1482 region.end = 0x1F7;
1483 res = &dev->resource[0];
1484 res->flags = LEGACY_IO_RESOURCE;
1485 pcibios_bus_to_resource(dev->bus, res, &region);
1486 dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n",
1487 res);
1488 region.start = 0x3F6;
1489 region.end = 0x3F6;
1490 res = &dev->resource[1];
1491 res->flags = LEGACY_IO_RESOURCE;
1492 pcibios_bus_to_resource(dev->bus, res, &region);
1493 dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n",
1494 res);
1495 }
1496 if ((progif & 4) == 0) {
1497 region.start = 0x170;
1498 region.end = 0x177;
1499 res = &dev->resource[2];
1500 res->flags = LEGACY_IO_RESOURCE;
1501 pcibios_bus_to_resource(dev->bus, res, &region);
1502 dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n",
1503 res);
1504 region.start = 0x376;
1505 region.end = 0x376;
1506 res = &dev->resource[3];
1507 res->flags = LEGACY_IO_RESOURCE;
1508 pcibios_bus_to_resource(dev->bus, res, &region);
1509 dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n",
1510 res);
1511 }
1512 }
1513 break;
1514
1515 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
1516 if (class != PCI_CLASS_BRIDGE_PCI)
1517 goto bad;
1518 /* The PCI-to-PCI bridge spec requires that subtractive
1519 decoding (i.e. transparent) bridge must have programming
1520 interface code of 0x01. */
1521 pci_read_irq(dev);
1522 dev->transparent = ((dev->class & 0xff) == 1);
1523 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1524 set_pcie_hotplug_bridge(dev);
1525 pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1526 if (pos) {
1527 pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1528 pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1529 }
1530 break;
1531
1532 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
1533 if (class != PCI_CLASS_BRIDGE_CARDBUS)
1534 goto bad;
1535 pci_read_irq(dev);
1536 pci_read_bases(dev, 1, 0);
1537 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1538 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1539 break;
1540
1541 default: /* unknown header */
1542 dev_err(&dev->dev, "unknown header type %02x, ignoring device\n",
1543 dev->hdr_type);
1544 return -EIO;
1545
1546 bad:
1547 dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n",
1548 dev->class, dev->hdr_type);
1549 dev->class = PCI_CLASS_NOT_DEFINED << 8;
1550 }
1551
1552 /* We found a fine healthy device, go go go... */
1553 return 0;
1554}
1555
1556static void pci_configure_mps(struct pci_dev *dev)
1557{
1558 struct pci_dev *bridge = pci_upstream_bridge(dev);
1559 int mps, p_mps, rc;
1560
1561 if (!pci_is_pcie(dev))
1562 return;
1563
1564 /* MPS and MRRS fields are of type 'RsvdP' for VFs, short-circuit out */
1565 if (dev->is_virtfn)
1566 return;
1567
1568 /*
1569 * For Root Complex Integrated Endpoints, program the maximum
1570 * supported value unless limited by the PCIE_BUS_PEER2PEER case.
1571 */
1572 if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) {
1573 if (pcie_bus_config == PCIE_BUS_PEER2PEER)
1574 mps = 128;
1575 else
1576 mps = 128 << dev->pcie_mpss;
1577 rc = pcie_set_mps(dev, mps);
1578 if (rc) {
1579 pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1580 mps);
1581 }
1582 return;
1583 }
1584
1585 if (!bridge || !pci_is_pcie(bridge))
1586 return;
1587
1588 mps = pcie_get_mps(dev);
1589 p_mps = pcie_get_mps(bridge);
1590
1591 if (mps == p_mps)
1592 return;
1593
1594 if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
1595 dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1596 mps, pci_name(bridge), p_mps);
1597 return;
1598 }
1599
1600 /*
1601 * Fancier MPS configuration is done later by
1602 * pcie_bus_configure_settings()
1603 */
1604 if (pcie_bus_config != PCIE_BUS_DEFAULT)
1605 return;
1606
1607 rc = pcie_set_mps(dev, p_mps);
1608 if (rc) {
1609 dev_warn(&dev->dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1610 p_mps);
1611 return;
1612 }
1613
1614 dev_info(&dev->dev, "Max Payload Size set to %d (was %d, max %d)\n",
1615 p_mps, mps, 128 << dev->pcie_mpss);
1616}
1617
1618static struct hpp_type0 pci_default_type0 = {
1619 .revision = 1,
1620 .cache_line_size = 8,
1621 .latency_timer = 0x40,
1622 .enable_serr = 0,
1623 .enable_perr = 0,
1624};
1625
1626static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
1627{
1628 u16 pci_cmd, pci_bctl;
1629
1630 if (!hpp)
1631 hpp = &pci_default_type0;
1632
1633 if (hpp->revision > 1) {
1634 dev_warn(&dev->dev,
1635 "PCI settings rev %d not supported; using defaults\n",
1636 hpp->revision);
1637 hpp = &pci_default_type0;
1638 }
1639
1640 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
1641 pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
1642 pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
1643 if (hpp->enable_serr)
1644 pci_cmd |= PCI_COMMAND_SERR;
1645 if (hpp->enable_perr)
1646 pci_cmd |= PCI_COMMAND_PARITY;
1647 pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
1648
1649 /* Program bridge control value */
1650 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
1651 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
1652 hpp->latency_timer);
1653 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
1654 if (hpp->enable_serr)
1655 pci_bctl |= PCI_BRIDGE_CTL_SERR;
1656 if (hpp->enable_perr)
1657 pci_bctl |= PCI_BRIDGE_CTL_PARITY;
1658 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
1659 }
1660}
1661
1662static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
1663{
1664 int pos;
1665
1666 if (!hpp)
1667 return;
1668
1669 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1670 if (!pos)
1671 return;
1672
1673 dev_warn(&dev->dev, "PCI-X settings not supported\n");
1674}
1675
1676static bool pcie_root_rcb_set(struct pci_dev *dev)
1677{
1678 struct pci_dev *rp = pcie_find_root_port(dev);
1679 u16 lnkctl;
1680
1681 if (!rp)
1682 return false;
1683
1684 pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
1685 if (lnkctl & PCI_EXP_LNKCTL_RCB)
1686 return true;
1687
1688 return false;
1689}
1690
1691static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1692{
1693 int pos;
1694 u32 reg32;
1695
1696 if (!hpp)
1697 return;
1698
1699 if (!pci_is_pcie(dev))
1700 return;
1701
1702 if (hpp->revision > 1) {
1703 dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
1704 hpp->revision);
1705 return;
1706 }
1707
1708 /*
1709 * Don't allow _HPX to change MPS or MRRS settings. We manage
1710 * those to make sure they're consistent with the rest of the
1711 * platform.
1712 */
1713 hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
1714 PCI_EXP_DEVCTL_READRQ;
1715 hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
1716 PCI_EXP_DEVCTL_READRQ);
1717
1718 /* Initialize Device Control Register */
1719 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
1720 ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
1721
1722 /* Initialize Link Control Register */
1723 if (pcie_cap_has_lnkctl(dev)) {
1724
1725 /*
1726 * If the Root Port supports Read Completion Boundary of
1727 * 128, set RCB to 128. Otherwise, clear it.
1728 */
1729 hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
1730 hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
1731 if (pcie_root_rcb_set(dev))
1732 hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
1733
1734 pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
1735 ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
1736 }
1737
1738 /* Find Advanced Error Reporting Enhanced Capability */
1739 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
1740 if (!pos)
1741 return;
1742
1743 /* Initialize Uncorrectable Error Mask Register */
1744 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
1745 reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
1746 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
1747
1748 /* Initialize Uncorrectable Error Severity Register */
1749 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
1750 reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
1751 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
1752
1753 /* Initialize Correctable Error Mask Register */
1754 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
1755 reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
1756 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
1757
1758 /* Initialize Advanced Error Capabilities and Control Register */
1759 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
1760 reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
1761 /* Don't enable ECRC generation or checking if unsupported */
1762 if (!(reg32 & PCI_ERR_CAP_ECRC_GENC))
1763 reg32 &= ~PCI_ERR_CAP_ECRC_GENE;
1764 if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC))
1765 reg32 &= ~PCI_ERR_CAP_ECRC_CHKE;
1766 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
1767
1768 /*
1769 * FIXME: The following two registers are not supported yet.
1770 *
1771 * o Secondary Uncorrectable Error Severity Register
1772 * o Secondary Uncorrectable Error Mask Register
1773 */
1774}
1775
1776int pci_configure_extended_tags(struct pci_dev *dev, void *ign)
1777{
1778 struct pci_host_bridge *host;
1779 u32 cap;
1780 u16 ctl;
1781 int ret;
1782
1783 if (!pci_is_pcie(dev))
1784 return 0;
1785
1786 ret = pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
1787 if (ret)
1788 return 0;
1789
1790 if (!(cap & PCI_EXP_DEVCAP_EXT_TAG))
1791 return 0;
1792
1793 ret = pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
1794 if (ret)
1795 return 0;
1796
1797 host = pci_find_host_bridge(dev->bus);
1798 if (!host)
1799 return 0;
1800
1801 /*
1802 * If some device in the hierarchy doesn't handle Extended Tags
1803 * correctly, make sure they're disabled.
1804 */
1805 if (host->no_ext_tags) {
1806 if (ctl & PCI_EXP_DEVCTL_EXT_TAG) {
1807 dev_info(&dev->dev, "disabling Extended Tags\n");
1808 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
1809 PCI_EXP_DEVCTL_EXT_TAG);
1810 }
1811 return 0;
1812 }
1813
1814 if (!(ctl & PCI_EXP_DEVCTL_EXT_TAG)) {
1815 dev_info(&dev->dev, "enabling Extended Tags\n");
1816 pcie_capability_set_word(dev, PCI_EXP_DEVCTL,
1817 PCI_EXP_DEVCTL_EXT_TAG);
1818 }
1819 return 0;
1820}
1821
1822/**
1823 * pcie_relaxed_ordering_enabled - Probe for PCIe relaxed ordering enable
1824 * @dev: PCI device to query
1825 *
1826 * Returns true if the device has enabled relaxed ordering attribute.
1827 */
1828bool pcie_relaxed_ordering_enabled(struct pci_dev *dev)
1829{
1830 u16 v;
1831
1832 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &v);
1833
1834 return !!(v & PCI_EXP_DEVCTL_RELAX_EN);
1835}
1836EXPORT_SYMBOL(pcie_relaxed_ordering_enabled);
1837
1838static void pci_configure_relaxed_ordering(struct pci_dev *dev)
1839{
1840 struct pci_dev *root;
1841
1842 /* PCI_EXP_DEVICE_RELAX_EN is RsvdP in VFs */
1843 if (dev->is_virtfn)
1844 return;
1845
1846 if (!pcie_relaxed_ordering_enabled(dev))
1847 return;
1848
1849 /*
1850 * For now, we only deal with Relaxed Ordering issues with Root
1851 * Ports. Peer-to-Peer DMA is another can of worms.
1852 */
1853 root = pci_find_pcie_root_port(dev);
1854 if (!root)
1855 return;
1856
1857 if (root->dev_flags & PCI_DEV_FLAGS_NO_RELAXED_ORDERING) {
1858 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
1859 PCI_EXP_DEVCTL_RELAX_EN);
1860 dev_info(&dev->dev, "Disable Relaxed Ordering because the Root Port didn't support it\n");
1861 }
1862}
1863
1864static void pci_configure_device(struct pci_dev *dev)
1865{
1866 struct hotplug_params hpp;
1867 int ret;
1868
1869 pci_configure_mps(dev);
1870 pci_configure_extended_tags(dev, NULL);
1871 pci_configure_relaxed_ordering(dev);
1872
1873 memset(&hpp, 0, sizeof(hpp));
1874 ret = pci_get_hp_params(dev, &hpp);
1875 if (ret)
1876 return;
1877
1878 program_hpp_type2(dev, hpp.t2);
1879 program_hpp_type1(dev, hpp.t1);
1880 program_hpp_type0(dev, hpp.t0);
1881}
1882
1883static void pci_release_capabilities(struct pci_dev *dev)
1884{
1885 pci_vpd_release(dev);
1886 pci_iov_release(dev);
1887 pci_free_cap_save_buffers(dev);
1888}
1889
1890/**
1891 * pci_release_dev - free a pci device structure when all users of it are finished.
1892 * @dev: device that's been disconnected
1893 *
1894 * Will be called only by the device core when all users of this pci device are
1895 * done.
1896 */
1897static void pci_release_dev(struct device *dev)
1898{
1899 struct pci_dev *pci_dev;
1900
1901 pci_dev = to_pci_dev(dev);
1902 pci_release_capabilities(pci_dev);
1903 pci_release_of_node(pci_dev);
1904 pcibios_release_device(pci_dev);
1905 pci_bus_put(pci_dev->bus);
1906 kfree(pci_dev->driver_override);
1907 kfree(pci_dev->dma_alias_mask);
1908 kfree(pci_dev);
1909}
1910
1911struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
1912{
1913 struct pci_dev *dev;
1914
1915 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1916 if (!dev)
1917 return NULL;
1918
1919 INIT_LIST_HEAD(&dev->bus_list);
1920 dev->dev.type = &pci_dev_type;
1921 dev->bus = pci_bus_get(bus);
1922
1923 return dev;
1924}
1925EXPORT_SYMBOL(pci_alloc_dev);
1926
1927static bool pci_bus_crs_vendor_id(u32 l)
1928{
1929 return (l & 0xffff) == 0x0001;
1930}
1931
1932static bool pci_bus_wait_crs(struct pci_bus *bus, int devfn, u32 *l,
1933 int timeout)
1934{
1935 int delay = 1;
1936
1937 if (!pci_bus_crs_vendor_id(*l))
1938 return true; /* not a CRS completion */
1939
1940 if (!timeout)
1941 return false; /* CRS, but caller doesn't want to wait */
1942
1943 /*
1944 * We got the reserved Vendor ID that indicates a completion with
1945 * Configuration Request Retry Status (CRS). Retry until we get a
1946 * valid Vendor ID or we time out.
1947 */
1948 while (pci_bus_crs_vendor_id(*l)) {
1949 if (delay > timeout) {
1950 pr_warn("pci %04x:%02x:%02x.%d: not ready after %dms; giving up\n",
1951 pci_domain_nr(bus), bus->number,
1952 PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
1953
1954 return false;
1955 }
1956 if (delay >= 1000)
1957 pr_info("pci %04x:%02x:%02x.%d: not ready after %dms; waiting\n",
1958 pci_domain_nr(bus), bus->number,
1959 PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
1960
1961 msleep(delay);
1962 delay *= 2;
1963
1964 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1965 return false;
1966 }
1967
1968 if (delay >= 1000)
1969 pr_info("pci %04x:%02x:%02x.%d: ready after %dms\n",
1970 pci_domain_nr(bus), bus->number,
1971 PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
1972
1973 return true;
1974}
1975
1976bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1977 int timeout)
1978{
1979 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1980 return false;
1981
1982 /* some broken boards return 0 or ~0 if a slot is empty: */
1983 if (*l == 0xffffffff || *l == 0x00000000 ||
1984 *l == 0x0000ffff || *l == 0xffff0000)
1985 return false;
1986
1987 if (pci_bus_crs_vendor_id(*l))
1988 return pci_bus_wait_crs(bus, devfn, l, timeout);
1989
1990 return true;
1991}
1992EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1993
1994/*
1995 * Read the config data for a PCI device, sanity-check it
1996 * and fill in the dev structure...
1997 */
1998static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1999{
2000 struct pci_dev *dev;
2001 u32 l;
2002
2003 if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
2004 return NULL;
2005
2006 dev = pci_alloc_dev(bus);
2007 if (!dev)
2008 return NULL;
2009
2010 dev->devfn = devfn;
2011 dev->vendor = l & 0xffff;
2012 dev->device = (l >> 16) & 0xffff;
2013
2014 pci_set_of_node(dev);
2015
2016 if (pci_setup_device(dev)) {
2017 pci_bus_put(dev->bus);
2018 kfree(dev);
2019 return NULL;
2020 }
2021
2022 return dev;
2023}
2024
2025static void pci_init_capabilities(struct pci_dev *dev)
2026{
2027 /* Enhanced Allocation */
2028 pci_ea_init(dev);
2029
2030 /* Setup MSI caps & disable MSI/MSI-X interrupts */
2031 pci_msi_setup_pci_dev(dev);
2032
2033 /* Buffers for saving PCIe and PCI-X capabilities */
2034 pci_allocate_cap_save_buffers(dev);
2035
2036 /* Power Management */
2037 pci_pm_init(dev);
2038
2039 /* Vital Product Data */
2040 pci_vpd_init(dev);
2041
2042 /* Alternative Routing-ID Forwarding */
2043 pci_configure_ari(dev);
2044
2045 /* Single Root I/O Virtualization */
2046 pci_iov_init(dev);
2047
2048 /* Address Translation Services */
2049 pci_ats_init(dev);
2050
2051 /* Enable ACS P2P upstream forwarding */
2052 pci_enable_acs(dev);
2053
2054 /* Precision Time Measurement */
2055 pci_ptm_init(dev);
2056
2057 /* Advanced Error Reporting */
2058 pci_aer_init(dev);
2059}
2060
2061/*
2062 * This is the equivalent of pci_host_bridge_msi_domain that acts on
2063 * devices. Firmware interfaces that can select the MSI domain on a
2064 * per-device basis should be called from here.
2065 */
2066static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev)
2067{
2068 struct irq_domain *d;
2069
2070 /*
2071 * If a domain has been set through the pcibios_add_device
2072 * callback, then this is the one (platform code knows best).
2073 */
2074 d = dev_get_msi_domain(&dev->dev);
2075 if (d)
2076 return d;
2077
2078 /*
2079 * Let's see if we have a firmware interface able to provide
2080 * the domain.
2081 */
2082 d = pci_msi_get_device_domain(dev);
2083 if (d)
2084 return d;
2085
2086 return NULL;
2087}
2088
2089static void pci_set_msi_domain(struct pci_dev *dev)
2090{
2091 struct irq_domain *d;
2092
2093 /*
2094 * If the platform or firmware interfaces cannot supply a
2095 * device-specific MSI domain, then inherit the default domain
2096 * from the host bridge itself.
2097 */
2098 d = pci_dev_msi_domain(dev);
2099 if (!d)
2100 d = dev_get_msi_domain(&dev->bus->dev);
2101
2102 dev_set_msi_domain(&dev->dev, d);
2103}
2104
2105void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
2106{
2107 int ret;
2108
2109 pci_configure_device(dev);
2110
2111 device_initialize(&dev->dev);
2112 dev->dev.release = pci_release_dev;
2113
2114 set_dev_node(&dev->dev, pcibus_to_node(bus));
2115 dev->dev.dma_mask = &dev->dma_mask;
2116 dev->dev.dma_parms = &dev->dma_parms;
2117 dev->dev.coherent_dma_mask = 0xffffffffull;
2118
2119 pci_set_dma_max_seg_size(dev, 65536);
2120 pci_set_dma_seg_boundary(dev, 0xffffffff);
2121
2122 /* Fix up broken headers */
2123 pci_fixup_device(pci_fixup_header, dev);
2124
2125 /* moved out from quirk header fixup code */
2126 pci_reassigndev_resource_alignment(dev);
2127
2128 /* Clear the state_saved flag. */
2129 dev->state_saved = false;
2130
2131 /* Initialize various capabilities */
2132 pci_init_capabilities(dev);
2133
2134 /*
2135 * Add the device to our list of discovered devices
2136 * and the bus list for fixup functions, etc.
2137 */
2138 down_write(&pci_bus_sem);
2139 list_add_tail(&dev->bus_list, &bus->devices);
2140 up_write(&pci_bus_sem);
2141
2142 ret = pcibios_add_device(dev);
2143 WARN_ON(ret < 0);
2144
2145 /* Setup MSI irq domain */
2146 pci_set_msi_domain(dev);
2147
2148 /* Notifier could use PCI capabilities */
2149 dev->match_driver = false;
2150 ret = device_add(&dev->dev);
2151 WARN_ON(ret < 0);
2152}
2153
2154struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
2155{
2156 struct pci_dev *dev;
2157
2158 dev = pci_get_slot(bus, devfn);
2159 if (dev) {
2160 pci_dev_put(dev);
2161 return dev;
2162 }
2163
2164 dev = pci_scan_device(bus, devfn);
2165 if (!dev)
2166 return NULL;
2167
2168 pci_device_add(dev, bus);
2169
2170 return dev;
2171}
2172EXPORT_SYMBOL(pci_scan_single_device);
2173
2174static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
2175{
2176 int pos;
2177 u16 cap = 0;
2178 unsigned next_fn;
2179
2180 if (pci_ari_enabled(bus)) {
2181 if (!dev)
2182 return 0;
2183 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
2184 if (!pos)
2185 return 0;
2186
2187 pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
2188 next_fn = PCI_ARI_CAP_NFN(cap);
2189 if (next_fn <= fn)
2190 return 0; /* protect against malformed list */
2191
2192 return next_fn;
2193 }
2194
2195 /* dev may be NULL for non-contiguous multifunction devices */
2196 if (!dev || dev->multifunction)
2197 return (fn + 1) % 8;
2198
2199 return 0;
2200}
2201
2202static int only_one_child(struct pci_bus *bus)
2203{
2204 struct pci_dev *parent = bus->self;
2205
2206 if (!parent || !pci_is_pcie(parent))
2207 return 0;
2208 if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
2209 return 1;
2210
2211 /*
2212 * PCIe downstream ports are bridges that normally lead to only a
2213 * device 0, but if PCI_SCAN_ALL_PCIE_DEVS is set, scan all
2214 * possible devices, not just device 0. See PCIe spec r3.0,
2215 * sec 7.3.1.
2216 */
2217 if (parent->has_secondary_link &&
2218 !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
2219 return 1;
2220 return 0;
2221}
2222
2223/**
2224 * pci_scan_slot - scan a PCI slot on a bus for devices.
2225 * @bus: PCI bus to scan
2226 * @devfn: slot number to scan (must have zero function.)
2227 *
2228 * Scan a PCI slot on the specified PCI bus for devices, adding
2229 * discovered devices to the @bus->devices list. New devices
2230 * will not have is_added set.
2231 *
2232 * Returns the number of new devices found.
2233 */
2234int pci_scan_slot(struct pci_bus *bus, int devfn)
2235{
2236 unsigned fn, nr = 0;
2237 struct pci_dev *dev;
2238
2239 if (only_one_child(bus) && (devfn > 0))
2240 return 0; /* Already scanned the entire slot */
2241
2242 dev = pci_scan_single_device(bus, devfn);
2243 if (!dev)
2244 return 0;
2245 if (!dev->is_added)
2246 nr++;
2247
2248 for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
2249 dev = pci_scan_single_device(bus, devfn + fn);
2250 if (dev) {
2251 if (!dev->is_added)
2252 nr++;
2253 dev->multifunction = 1;
2254 }
2255 }
2256
2257 /* only one slot has pcie device */
2258 if (bus->self && nr)
2259 pcie_aspm_init_link_state(bus->self);
2260
2261 return nr;
2262}
2263EXPORT_SYMBOL(pci_scan_slot);
2264
2265static int pcie_find_smpss(struct pci_dev *dev, void *data)
2266{
2267 u8 *smpss = data;
2268
2269 if (!pci_is_pcie(dev))
2270 return 0;
2271
2272 /*
2273 * We don't have a way to change MPS settings on devices that have
2274 * drivers attached. A hot-added device might support only the minimum
2275 * MPS setting (MPS=128). Therefore, if the fabric contains a bridge
2276 * where devices may be hot-added, we limit the fabric MPS to 128 so
2277 * hot-added devices will work correctly.
2278 *
2279 * However, if we hot-add a device to a slot directly below a Root
2280 * Port, it's impossible for there to be other existing devices below
2281 * the port. We don't limit the MPS in this case because we can
2282 * reconfigure MPS on both the Root Port and the hot-added device,
2283 * and there are no other devices involved.
2284 *
2285 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
2286 */
2287 if (dev->is_hotplug_bridge &&
2288 pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
2289 *smpss = 0;
2290
2291 if (*smpss > dev->pcie_mpss)
2292 *smpss = dev->pcie_mpss;
2293
2294 return 0;
2295}
2296
2297static void pcie_write_mps(struct pci_dev *dev, int mps)
2298{
2299 int rc;
2300
2301 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
2302 mps = 128 << dev->pcie_mpss;
2303
2304 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
2305 dev->bus->self)
2306 /* For "Performance", the assumption is made that
2307 * downstream communication will never be larger than
2308 * the MRRS. So, the MPS only needs to be configured
2309 * for the upstream communication. This being the case,
2310 * walk from the top down and set the MPS of the child
2311 * to that of the parent bus.
2312 *
2313 * Configure the device MPS with the smaller of the
2314 * device MPSS or the bridge MPS (which is assumed to be
2315 * properly configured at this point to the largest
2316 * allowable MPS based on its parent bus).
2317 */
2318 mps = min(mps, pcie_get_mps(dev->bus->self));
2319 }
2320
2321 rc = pcie_set_mps(dev, mps);
2322 if (rc)
2323 dev_err(&dev->dev, "Failed attempting to set the MPS\n");
2324}
2325
2326static void pcie_write_mrrs(struct pci_dev *dev)
2327{
2328 int rc, mrrs;
2329
2330 /* In the "safe" case, do not configure the MRRS. There appear to be
2331 * issues with setting MRRS to 0 on a number of devices.
2332 */
2333 if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
2334 return;
2335
2336 /* For Max performance, the MRRS must be set to the largest supported
2337 * value. However, it cannot be configured larger than the MPS the
2338 * device or the bus can support. This should already be properly
2339 * configured by a prior call to pcie_write_mps.
2340 */
2341 mrrs = pcie_get_mps(dev);
2342
2343 /* MRRS is a R/W register. Invalid values can be written, but a
2344 * subsequent read will verify if the value is acceptable or not.
2345 * If the MRRS value provided is not acceptable (e.g., too large),
2346 * shrink the value until it is acceptable to the HW.
2347 */
2348 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
2349 rc = pcie_set_readrq(dev, mrrs);
2350 if (!rc)
2351 break;
2352
2353 dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
2354 mrrs /= 2;
2355 }
2356
2357 if (mrrs < 128)
2358 dev_err(&dev->dev, "MRRS was unable to be configured with a safe value. If problems are experienced, try running with pci=pcie_bus_safe\n");
2359}
2360
2361static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
2362{
2363 int mps, orig_mps;
2364
2365 if (!pci_is_pcie(dev))
2366 return 0;
2367
2368 if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
2369 pcie_bus_config == PCIE_BUS_DEFAULT)
2370 return 0;
2371
2372 mps = 128 << *(u8 *)data;
2373 orig_mps = pcie_get_mps(dev);
2374
2375 pcie_write_mps(dev, mps);
2376 pcie_write_mrrs(dev);
2377
2378 dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
2379 pcie_get_mps(dev), 128 << dev->pcie_mpss,
2380 orig_mps, pcie_get_readrq(dev));
2381
2382 return 0;
2383}
2384
2385/* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
2386 * parents then children fashion. If this changes, then this code will not
2387 * work as designed.
2388 */
2389void pcie_bus_configure_settings(struct pci_bus *bus)
2390{
2391 u8 smpss = 0;
2392
2393 if (!bus->self)
2394 return;
2395
2396 if (!pci_is_pcie(bus->self))
2397 return;
2398
2399 /* FIXME - Peer to peer DMA is possible, though the endpoint would need
2400 * to be aware of the MPS of the destination. To work around this,
2401 * simply force the MPS of the entire system to the smallest possible.
2402 */
2403 if (pcie_bus_config == PCIE_BUS_PEER2PEER)
2404 smpss = 0;
2405
2406 if (pcie_bus_config == PCIE_BUS_SAFE) {
2407 smpss = bus->self->pcie_mpss;
2408
2409 pcie_find_smpss(bus->self, &smpss);
2410 pci_walk_bus(bus, pcie_find_smpss, &smpss);
2411 }
2412
2413 pcie_bus_configure_set(bus->self, &smpss);
2414 pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
2415}
2416EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
2417
2418/*
2419 * Called after each bus is probed, but before its children are examined. This
2420 * is marked as __weak because multiple architectures define it.
2421 */
2422void __weak pcibios_fixup_bus(struct pci_bus *bus)
2423{
2424 /* nothing to do, expected to be removed in the future */
2425}
2426
2427unsigned int pci_scan_child_bus(struct pci_bus *bus)
2428{
2429 unsigned int devfn, pass, max = bus->busn_res.start;
2430 struct pci_dev *dev;
2431
2432 dev_dbg(&bus->dev, "scanning bus\n");
2433
2434 /* Go find them, Rover! */
2435 for (devfn = 0; devfn < 0x100; devfn += 8)
2436 pci_scan_slot(bus, devfn);
2437
2438 /* Reserve buses for SR-IOV capability. */
2439 max += pci_iov_bus_range(bus);
2440
2441 /*
2442 * After performing arch-dependent fixup of the bus, look behind
2443 * all PCI-to-PCI bridges on this bus.
2444 */
2445 if (!bus->is_added) {
2446 dev_dbg(&bus->dev, "fixups for bus\n");
2447 pcibios_fixup_bus(bus);
2448 bus->is_added = 1;
2449 }
2450
2451 for (pass = 0; pass < 2; pass++)
2452 list_for_each_entry(dev, &bus->devices, bus_list) {
2453 if (pci_is_bridge(dev))
2454 max = pci_scan_bridge(bus, dev, max, pass);
2455 }
2456
2457 /*
2458 * Make sure a hotplug bridge has at least the minimum requested
2459 * number of buses.
2460 */
2461 if (bus->self && bus->self->is_hotplug_bridge && pci_hotplug_bus_size) {
2462 if (max - bus->busn_res.start < pci_hotplug_bus_size - 1)
2463 max = bus->busn_res.start + pci_hotplug_bus_size - 1;
2464
2465 /* Do not allocate more buses than we have room left */
2466 if (max > bus->busn_res.end)
2467 max = bus->busn_res.end;
2468 }
2469
2470 /*
2471 * We've scanned the bus and so we know all about what's on
2472 * the other side of any bridges that may be on this bus plus
2473 * any devices.
2474 *
2475 * Return how far we've got finding sub-buses.
2476 */
2477 dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
2478 return max;
2479}
2480EXPORT_SYMBOL_GPL(pci_scan_child_bus);
2481
2482/**
2483 * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
2484 * @bridge: Host bridge to set up.
2485 *
2486 * Default empty implementation. Replace with an architecture-specific setup
2487 * routine, if necessary.
2488 */
2489int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
2490{
2491 return 0;
2492}
2493
2494void __weak pcibios_add_bus(struct pci_bus *bus)
2495{
2496}
2497
2498void __weak pcibios_remove_bus(struct pci_bus *bus)
2499{
2500}
2501
2502struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
2503 struct pci_ops *ops, void *sysdata, struct list_head *resources)
2504{
2505 int error;
2506 struct pci_host_bridge *bridge;
2507
2508 bridge = pci_alloc_host_bridge(0);
2509 if (!bridge)
2510 return NULL;
2511
2512 bridge->dev.parent = parent;
2513
2514 list_splice_init(resources, &bridge->windows);
2515 bridge->sysdata = sysdata;
2516 bridge->busnr = bus;
2517 bridge->ops = ops;
2518
2519 error = pci_register_host_bridge(bridge);
2520 if (error < 0)
2521 goto err_out;
2522
2523 return bridge->bus;
2524
2525err_out:
2526 kfree(bridge);
2527 return NULL;
2528}
2529EXPORT_SYMBOL_GPL(pci_create_root_bus);
2530
2531int pci_host_probe(struct pci_host_bridge *bridge)
2532{
2533 struct pci_bus *bus, *child;
2534 int ret;
2535
2536 ret = pci_scan_root_bus_bridge(bridge);
2537 if (ret < 0) {
2538 dev_err(bridge->dev.parent, "Scanning root bridge failed");
2539 return ret;
2540 }
2541
2542 bus = bridge->bus;
2543
2544 /*
2545 * We insert PCI resources into the iomem_resource and
2546 * ioport_resource trees in either pci_bus_claim_resources()
2547 * or pci_bus_assign_resources().
2548 */
2549 if (pci_has_flag(PCI_PROBE_ONLY)) {
2550 pci_bus_claim_resources(bus);
2551 } else {
2552 pci_bus_size_bridges(bus);
2553 pci_bus_assign_resources(bus);
2554
2555 list_for_each_entry(child, &bus->children, node)
2556 pcie_bus_configure_settings(child);
2557 }
2558
2559 pci_bus_add_devices(bus);
2560 return 0;
2561}
2562EXPORT_SYMBOL_GPL(pci_host_probe);
2563
2564int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
2565{
2566 struct resource *res = &b->busn_res;
2567 struct resource *parent_res, *conflict;
2568
2569 res->start = bus;
2570 res->end = bus_max;
2571 res->flags = IORESOURCE_BUS;
2572
2573 if (!pci_is_root_bus(b))
2574 parent_res = &b->parent->busn_res;
2575 else {
2576 parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
2577 res->flags |= IORESOURCE_PCI_FIXED;
2578 }
2579
2580 conflict = request_resource_conflict(parent_res, res);
2581
2582 if (conflict)
2583 dev_printk(KERN_DEBUG, &b->dev,
2584 "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
2585 res, pci_is_root_bus(b) ? "domain " : "",
2586 parent_res, conflict->name, conflict);
2587
2588 return conflict == NULL;
2589}
2590
2591int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
2592{
2593 struct resource *res = &b->busn_res;
2594 struct resource old_res = *res;
2595 resource_size_t size;
2596 int ret;
2597
2598 if (res->start > bus_max)
2599 return -EINVAL;
2600
2601 size = bus_max - res->start + 1;
2602 ret = adjust_resource(res, res->start, size);
2603 dev_printk(KERN_DEBUG, &b->dev,
2604 "busn_res: %pR end %s updated to %02x\n",
2605 &old_res, ret ? "can not be" : "is", bus_max);
2606
2607 if (!ret && !res->parent)
2608 pci_bus_insert_busn_res(b, res->start, res->end);
2609
2610 return ret;
2611}
2612
2613void pci_bus_release_busn_res(struct pci_bus *b)
2614{
2615 struct resource *res = &b->busn_res;
2616 int ret;
2617
2618 if (!res->flags || !res->parent)
2619 return;
2620
2621 ret = release_resource(res);
2622 dev_printk(KERN_DEBUG, &b->dev,
2623 "busn_res: %pR %s released\n",
2624 res, ret ? "can not be" : "is");
2625}
2626
2627int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge)
2628{
2629 struct resource_entry *window;
2630 bool found = false;
2631 struct pci_bus *b;
2632 int max, bus, ret;
2633
2634 if (!bridge)
2635 return -EINVAL;
2636
2637 resource_list_for_each_entry(window, &bridge->windows)
2638 if (window->res->flags & IORESOURCE_BUS) {
2639 found = true;
2640 break;
2641 }
2642
2643 ret = pci_register_host_bridge(bridge);
2644 if (ret < 0)
2645 return ret;
2646
2647 b = bridge->bus;
2648 bus = bridge->busnr;
2649
2650 if (!found) {
2651 dev_info(&b->dev,
2652 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2653 bus);
2654 pci_bus_insert_busn_res(b, bus, 255);
2655 }
2656
2657 max = pci_scan_child_bus(b);
2658
2659 if (!found)
2660 pci_bus_update_busn_res_end(b, max);
2661
2662 return 0;
2663}
2664EXPORT_SYMBOL(pci_scan_root_bus_bridge);
2665
2666struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
2667 struct pci_ops *ops, void *sysdata, struct list_head *resources)
2668{
2669 struct resource_entry *window;
2670 bool found = false;
2671 struct pci_bus *b;
2672 int max;
2673
2674 resource_list_for_each_entry(window, resources)
2675 if (window->res->flags & IORESOURCE_BUS) {
2676 found = true;
2677 break;
2678 }
2679
2680 b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
2681 if (!b)
2682 return NULL;
2683
2684 if (!found) {
2685 dev_info(&b->dev,
2686 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2687 bus);
2688 pci_bus_insert_busn_res(b, bus, 255);
2689 }
2690
2691 max = pci_scan_child_bus(b);
2692
2693 if (!found)
2694 pci_bus_update_busn_res_end(b, max);
2695
2696 return b;
2697}
2698EXPORT_SYMBOL(pci_scan_root_bus);
2699
2700struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
2701 void *sysdata)
2702{
2703 LIST_HEAD(resources);
2704 struct pci_bus *b;
2705
2706 pci_add_resource(&resources, &ioport_resource);
2707 pci_add_resource(&resources, &iomem_resource);
2708 pci_add_resource(&resources, &busn_resource);
2709 b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
2710 if (b) {
2711 pci_scan_child_bus(b);
2712 } else {
2713 pci_free_resource_list(&resources);
2714 }
2715 return b;
2716}
2717EXPORT_SYMBOL(pci_scan_bus);
2718
2719/**
2720 * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
2721 * @bridge: PCI bridge for the bus to scan
2722 *
2723 * Scan a PCI bus and child buses for new devices, add them,
2724 * and enable them, resizing bridge mmio/io resource if necessary
2725 * and possible. The caller must ensure the child devices are already
2726 * removed for resizing to occur.
2727 *
2728 * Returns the max number of subordinate bus discovered.
2729 */
2730unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
2731{
2732 unsigned int max;
2733 struct pci_bus *bus = bridge->subordinate;
2734
2735 max = pci_scan_child_bus(bus);
2736
2737 pci_assign_unassigned_bridge_resources(bridge);
2738
2739 pci_bus_add_devices(bus);
2740
2741 return max;
2742}
2743
2744/**
2745 * pci_rescan_bus - scan a PCI bus for devices.
2746 * @bus: PCI bus to scan
2747 *
2748 * Scan a PCI bus and child buses for new devices, adds them,
2749 * and enables them.
2750 *
2751 * Returns the max number of subordinate bus discovered.
2752 */
2753unsigned int pci_rescan_bus(struct pci_bus *bus)
2754{
2755 unsigned int max;
2756
2757 max = pci_scan_child_bus(bus);
2758 pci_assign_unassigned_bus_resources(bus);
2759 pci_bus_add_devices(bus);
2760
2761 return max;
2762}
2763EXPORT_SYMBOL_GPL(pci_rescan_bus);
2764
2765/*
2766 * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
2767 * routines should always be executed under this mutex.
2768 */
2769static DEFINE_MUTEX(pci_rescan_remove_lock);
2770
2771void pci_lock_rescan_remove(void)
2772{
2773 mutex_lock(&pci_rescan_remove_lock);
2774}
2775EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
2776
2777void pci_unlock_rescan_remove(void)
2778{
2779 mutex_unlock(&pci_rescan_remove_lock);
2780}
2781EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
2782
2783static int __init pci_sort_bf_cmp(const struct device *d_a,
2784 const struct device *d_b)
2785{
2786 const struct pci_dev *a = to_pci_dev(d_a);
2787 const struct pci_dev *b = to_pci_dev(d_b);
2788
2789 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
2790 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1;
2791
2792 if (a->bus->number < b->bus->number) return -1;
2793 else if (a->bus->number > b->bus->number) return 1;
2794
2795 if (a->devfn < b->devfn) return -1;
2796 else if (a->devfn > b->devfn) return 1;
2797
2798 return 0;
2799}
2800
2801void __init pci_sort_breadthfirst(void)
2802{
2803 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
2804}