blob: 5999dce11fc88813dd5fa03136c50eeaad32da88 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Incremental bus scan, based on bus topology
4 *
5 * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 */
7
8#include <linux/bug.h>
9#include <linux/errno.h>
10#include <linux/firewire.h>
11#include <linux/firewire-constants.h>
12#include <linux/jiffies.h>
13#include <linux/kernel.h>
14#include <linux/list.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/spinlock.h>
18
19#include <linux/atomic.h>
20#include <asm/byteorder.h>
21
22#include "core.h"
23
24#define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f)
25#define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01)
26#define SELF_ID_LINK_ON(q) (((q) >> 22) & 0x01)
27#define SELF_ID_GAP_COUNT(q) (((q) >> 16) & 0x3f)
28#define SELF_ID_PHY_SPEED(q) (((q) >> 14) & 0x03)
29#define SELF_ID_CONTENDER(q) (((q) >> 11) & 0x01)
30#define SELF_ID_PHY_INITIATOR(q) (((q) >> 1) & 0x01)
31#define SELF_ID_MORE_PACKETS(q) (((q) >> 0) & 0x01)
32
33#define SELF_ID_EXT_SEQUENCE(q) (((q) >> 20) & 0x07)
34
35#define SELFID_PORT_CHILD 0x3
36#define SELFID_PORT_PARENT 0x2
37#define SELFID_PORT_NCONN 0x1
38#define SELFID_PORT_NONE 0x0
39
40static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
41{
42 u32 q;
43 int port_type, shift, seq;
44
45 *total_port_count = 0;
46 *child_port_count = 0;
47
48 shift = 6;
49 q = *sid;
50 seq = 0;
51
52 while (1) {
53 port_type = (q >> shift) & 0x03;
54 switch (port_type) {
55 case SELFID_PORT_CHILD:
56 (*child_port_count)++;
57 /* fall through */
58 case SELFID_PORT_PARENT:
59 case SELFID_PORT_NCONN:
60 (*total_port_count)++;
61 case SELFID_PORT_NONE:
62 break;
63 }
64
65 shift -= 2;
66 if (shift == 0) {
67 if (!SELF_ID_MORE_PACKETS(q))
68 return sid + 1;
69
70 shift = 16;
71 sid++;
72 q = *sid;
73
74 /*
75 * Check that the extra packets actually are
76 * extended self ID packets and that the
77 * sequence numbers in the extended self ID
78 * packets increase as expected.
79 */
80
81 if (!SELF_ID_EXTENDED(q) ||
82 seq != SELF_ID_EXT_SEQUENCE(q))
83 return NULL;
84
85 seq++;
86 }
87 }
88}
89
90static int get_port_type(u32 *sid, int port_index)
91{
92 int index, shift;
93
94 index = (port_index + 5) / 8;
95 shift = 16 - ((port_index + 5) & 7) * 2;
96 return (sid[index] >> shift) & 0x03;
97}
98
99static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
100{
101 struct fw_node *node;
102
103 node = kzalloc(struct_size(node, ports, port_count), GFP_ATOMIC);
104 if (node == NULL)
105 return NULL;
106
107 node->color = color;
108 node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid);
109 node->link_on = SELF_ID_LINK_ON(sid);
110 node->phy_speed = SELF_ID_PHY_SPEED(sid);
111 node->initiated_reset = SELF_ID_PHY_INITIATOR(sid);
112 node->port_count = port_count;
113
114 refcount_set(&node->ref_count, 1);
115 INIT_LIST_HEAD(&node->link);
116
117 return node;
118}
119
120/*
121 * Compute the maximum hop count for this node and it's children. The
122 * maximum hop count is the maximum number of connections between any
123 * two nodes in the subtree rooted at this node. We need this for
124 * setting the gap count. As we build the tree bottom up in
125 * build_tree() below, this is fairly easy to do: for each node we
126 * maintain the max hop count and the max depth, ie the number of hops
127 * to the furthest leaf. Computing the max hop count breaks down into
128 * two cases: either the path goes through this node, in which case
129 * the hop count is the sum of the two biggest child depths plus 2.
130 * Or it could be the case that the max hop path is entirely
131 * containted in a child tree, in which case the max hop count is just
132 * the max hop count of this child.
133 */
134static void update_hop_count(struct fw_node *node)
135{
136 int depths[2] = { -1, -1 };
137 int max_child_hops = 0;
138 int i;
139
140 for (i = 0; i < node->port_count; i++) {
141 if (node->ports[i] == NULL)
142 continue;
143
144 if (node->ports[i]->max_hops > max_child_hops)
145 max_child_hops = node->ports[i]->max_hops;
146
147 if (node->ports[i]->max_depth > depths[0]) {
148 depths[1] = depths[0];
149 depths[0] = node->ports[i]->max_depth;
150 } else if (node->ports[i]->max_depth > depths[1])
151 depths[1] = node->ports[i]->max_depth;
152 }
153
154 node->max_depth = depths[0] + 1;
155 node->max_hops = max(max_child_hops, depths[0] + depths[1] + 2);
156}
157
158static inline struct fw_node *fw_node(struct list_head *l)
159{
160 return list_entry(l, struct fw_node, link);
161}
162
163/*
164 * This function builds the tree representation of the topology given
165 * by the self IDs from the latest bus reset. During the construction
166 * of the tree, the function checks that the self IDs are valid and
167 * internally consistent. On success this function returns the
168 * fw_node corresponding to the local card otherwise NULL.
169 */
170static struct fw_node *build_tree(struct fw_card *card,
171 u32 *sid, int self_id_count)
172{
173 struct fw_node *node, *child, *local_node, *irm_node;
174 struct list_head stack, *h;
175 u32 *next_sid, *end, q;
176 int i, port_count, child_port_count, phy_id, parent_count, stack_depth;
177 int gap_count;
178 bool beta_repeaters_present;
179
180 local_node = NULL;
181 node = NULL;
182 INIT_LIST_HEAD(&stack);
183 stack_depth = 0;
184 end = sid + self_id_count;
185 phy_id = 0;
186 irm_node = NULL;
187 gap_count = SELF_ID_GAP_COUNT(*sid);
188 beta_repeaters_present = false;
189
190 while (sid < end) {
191 next_sid = count_ports(sid, &port_count, &child_port_count);
192
193 if (next_sid == NULL) {
194 fw_err(card, "inconsistent extended self IDs\n");
195 return NULL;
196 }
197
198 q = *sid;
199 if (phy_id != SELF_ID_PHY_ID(q)) {
200 fw_err(card, "PHY ID mismatch in self ID: %d != %d\n",
201 phy_id, SELF_ID_PHY_ID(q));
202 return NULL;
203 }
204
205 if (child_port_count > stack_depth) {
206 fw_err(card, "topology stack underflow\n");
207 return NULL;
208 }
209
210 /*
211 * Seek back from the top of our stack to find the
212 * start of the child nodes for this node.
213 */
214 for (i = 0, h = &stack; i < child_port_count; i++)
215 h = h->prev;
216 /*
217 * When the stack is empty, this yields an invalid value,
218 * but that pointer will never be dereferenced.
219 */
220 child = fw_node(h);
221
222 node = fw_node_create(q, port_count, card->color);
223 if (node == NULL) {
224 fw_err(card, "out of memory while building topology\n");
225 return NULL;
226 }
227
228 if (phy_id == (card->node_id & 0x3f))
229 local_node = node;
230
231 if (SELF_ID_CONTENDER(q))
232 irm_node = node;
233
234 parent_count = 0;
235
236 for (i = 0; i < port_count; i++) {
237 switch (get_port_type(sid, i)) {
238 case SELFID_PORT_PARENT:
239 /*
240 * Who's your daddy? We dont know the
241 * parent node at this time, so we
242 * temporarily abuse node->color for
243 * remembering the entry in the
244 * node->ports array where the parent
245 * node should be. Later, when we
246 * handle the parent node, we fix up
247 * the reference.
248 */
249 parent_count++;
250 node->color = i;
251 break;
252
253 case SELFID_PORT_CHILD:
254 node->ports[i] = child;
255 /*
256 * Fix up parent reference for this
257 * child node.
258 */
259 child->ports[child->color] = node;
260 child->color = card->color;
261 child = fw_node(child->link.next);
262 break;
263 }
264 }
265
266 /*
267 * Check that the node reports exactly one parent
268 * port, except for the root, which of course should
269 * have no parents.
270 */
271 if ((next_sid == end && parent_count != 0) ||
272 (next_sid < end && parent_count != 1)) {
273 fw_err(card, "parent port inconsistency for node %d: "
274 "parent_count=%d\n", phy_id, parent_count);
275 return NULL;
276 }
277
278 /* Pop the child nodes off the stack and push the new node. */
279 __list_del(h->prev, &stack);
280 list_add_tail(&node->link, &stack);
281 stack_depth += 1 - child_port_count;
282
283 if (node->phy_speed == SCODE_BETA &&
284 parent_count + child_port_count > 1)
285 beta_repeaters_present = true;
286
287 /*
288 * If PHYs report different gap counts, set an invalid count
289 * which will force a gap count reconfiguration and a reset.
290 */
291 if (SELF_ID_GAP_COUNT(q) != gap_count)
292 gap_count = 0;
293
294 update_hop_count(node);
295
296 sid = next_sid;
297 phy_id++;
298 }
299
300 card->root_node = node;
301 card->irm_node = irm_node;
302 card->gap_count = gap_count;
303 card->beta_repeaters_present = beta_repeaters_present;
304
305 return local_node;
306}
307
308typedef void (*fw_node_callback_t)(struct fw_card * card,
309 struct fw_node * node,
310 struct fw_node * parent);
311
312static void for_each_fw_node(struct fw_card *card, struct fw_node *root,
313 fw_node_callback_t callback)
314{
315 struct list_head list;
316 struct fw_node *node, *next, *child, *parent;
317 int i;
318
319 INIT_LIST_HEAD(&list);
320
321 fw_node_get(root);
322 list_add_tail(&root->link, &list);
323 parent = NULL;
324 list_for_each_entry(node, &list, link) {
325 node->color = card->color;
326
327 for (i = 0; i < node->port_count; i++) {
328 child = node->ports[i];
329 if (!child)
330 continue;
331 if (child->color == card->color)
332 parent = child;
333 else {
334 fw_node_get(child);
335 list_add_tail(&child->link, &list);
336 }
337 }
338
339 callback(card, node, parent);
340 }
341
342 list_for_each_entry_safe(node, next, &list, link)
343 fw_node_put(node);
344}
345
346static void report_lost_node(struct fw_card *card,
347 struct fw_node *node, struct fw_node *parent)
348{
349 fw_node_event(card, node, FW_NODE_DESTROYED);
350 fw_node_put(node);
351
352 /* Topology has changed - reset bus manager retry counter */
353 card->bm_retries = 0;
354}
355
356static void report_found_node(struct fw_card *card,
357 struct fw_node *node, struct fw_node *parent)
358{
359 int b_path = (node->phy_speed == SCODE_BETA);
360
361 if (parent != NULL) {
362 /* min() macro doesn't work here with gcc 3.4 */
363 node->max_speed = parent->max_speed < node->phy_speed ?
364 parent->max_speed : node->phy_speed;
365 node->b_path = parent->b_path && b_path;
366 } else {
367 node->max_speed = node->phy_speed;
368 node->b_path = b_path;
369 }
370
371 fw_node_event(card, node, FW_NODE_CREATED);
372
373 /* Topology has changed - reset bus manager retry counter */
374 card->bm_retries = 0;
375}
376
377/* Must be called with card->lock held */
378void fw_destroy_nodes(struct fw_card *card)
379{
380 card->color++;
381 if (card->local_node != NULL)
382 for_each_fw_node(card, card->local_node, report_lost_node);
383 card->local_node = NULL;
384}
385
386static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
387{
388 struct fw_node *tree;
389 int i;
390
391 tree = node1->ports[port];
392 node0->ports[port] = tree;
393 for (i = 0; i < tree->port_count; i++) {
394 if (tree->ports[i] == node1) {
395 tree->ports[i] = node0;
396 break;
397 }
398 }
399}
400
401/*
402 * Compare the old topology tree for card with the new one specified by root.
403 * Queue the nodes and mark them as either found, lost or updated.
404 * Update the nodes in the card topology tree as we go.
405 */
406static void update_tree(struct fw_card *card, struct fw_node *root)
407{
408 struct list_head list0, list1;
409 struct fw_node *node0, *node1, *next1;
410 int i, event;
411
412 INIT_LIST_HEAD(&list0);
413 list_add_tail(&card->local_node->link, &list0);
414 INIT_LIST_HEAD(&list1);
415 list_add_tail(&root->link, &list1);
416
417 node0 = fw_node(list0.next);
418 node1 = fw_node(list1.next);
419
420 while (&node0->link != &list0) {
421 WARN_ON(node0->port_count != node1->port_count);
422
423 if (node0->link_on && !node1->link_on)
424 event = FW_NODE_LINK_OFF;
425 else if (!node0->link_on && node1->link_on)
426 event = FW_NODE_LINK_ON;
427 else if (node1->initiated_reset && node1->link_on)
428 event = FW_NODE_INITIATED_RESET;
429 else
430 event = FW_NODE_UPDATED;
431
432 node0->node_id = node1->node_id;
433 node0->color = card->color;
434 node0->link_on = node1->link_on;
435 node0->initiated_reset = node1->initiated_reset;
436 node0->max_hops = node1->max_hops;
437 node1->color = card->color;
438 fw_node_event(card, node0, event);
439
440 if (card->root_node == node1)
441 card->root_node = node0;
442 if (card->irm_node == node1)
443 card->irm_node = node0;
444
445 for (i = 0; i < node0->port_count; i++) {
446 if (node0->ports[i] && node1->ports[i]) {
447 /*
448 * This port didn't change, queue the
449 * connected node for further
450 * investigation.
451 */
452 if (node0->ports[i]->color == card->color)
453 continue;
454 list_add_tail(&node0->ports[i]->link, &list0);
455 list_add_tail(&node1->ports[i]->link, &list1);
456 } else if (node0->ports[i]) {
457 /*
458 * The nodes connected here were
459 * unplugged; unref the lost nodes and
460 * queue FW_NODE_LOST callbacks for
461 * them.
462 */
463
464 for_each_fw_node(card, node0->ports[i],
465 report_lost_node);
466 node0->ports[i] = NULL;
467 } else if (node1->ports[i]) {
468 /*
469 * One or more node were connected to
470 * this port. Move the new nodes into
471 * the tree and queue FW_NODE_CREATED
472 * callbacks for them.
473 */
474 move_tree(node0, node1, i);
475 for_each_fw_node(card, node0->ports[i],
476 report_found_node);
477 }
478 }
479
480 node0 = fw_node(node0->link.next);
481 next1 = fw_node(node1->link.next);
482 fw_node_put(node1);
483 node1 = next1;
484 }
485}
486
487static void update_topology_map(struct fw_card *card,
488 u32 *self_ids, int self_id_count)
489{
490 int node_count = (card->root_node->node_id & 0x3f) + 1;
491 __be32 *map = card->topology_map;
492
493 *map++ = cpu_to_be32((self_id_count + 2) << 16);
494 *map++ = cpu_to_be32(be32_to_cpu(card->topology_map[1]) + 1);
495 *map++ = cpu_to_be32((node_count << 16) | self_id_count);
496
497 while (self_id_count--)
498 *map++ = cpu_to_be32p(self_ids++);
499
500 fw_compute_block_crc(card->topology_map);
501}
502
503void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
504 int self_id_count, u32 *self_ids, bool bm_abdicate)
505{
506 struct fw_node *local_node;
507 unsigned long flags;
508
509 spin_lock_irqsave(&card->lock, flags);
510
511 /*
512 * If the selfID buffer is not the immediate successor of the
513 * previously processed one, we cannot reliably compare the
514 * old and new topologies.
515 */
516 if (!is_next_generation(generation, card->generation) &&
517 card->local_node != NULL) {
518 fw_destroy_nodes(card);
519 card->bm_retries = 0;
520 }
521
522 card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
523 card->node_id = node_id;
524 /*
525 * Update node_id before generation to prevent anybody from using
526 * a stale node_id together with a current generation.
527 */
528 smp_wmb();
529 card->generation = generation;
530 card->reset_jiffies = get_jiffies_64();
531 card->bm_node_id = 0xffff;
532 card->bm_abdicate = bm_abdicate;
533 fw_schedule_bm_work(card, 0);
534
535 local_node = build_tree(card, self_ids, self_id_count);
536
537 update_topology_map(card, self_ids, self_id_count);
538
539 card->color++;
540
541 if (local_node == NULL) {
542 fw_err(card, "topology build failed\n");
543 /* FIXME: We need to issue a bus reset in this case. */
544 } else if (card->local_node == NULL) {
545 card->local_node = local_node;
546 for_each_fw_node(card, local_node, report_found_node);
547 } else {
548 update_tree(card, local_node);
549 }
550
551 spin_unlock_irqrestore(&card->lock, flags);
552}
553EXPORT_SYMBOL(fw_core_handle_bus_reset);