blob: f7d4ad812b7222e62f98647d1ff0d64fbaec98b0 [file] [log] [blame]
b.liue9582032025-04-17 19:18:16 +08001/*
2 * Fastpath Devices
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU FP_ERR( Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#define pr_fmt(fmt) "mfp" " device:%s:%d: " fmt, __func__, __LINE__
11
12#include <net/ipv6.h>
13#include <linux/inet.h>
14#include "fp_common.h"
15#include "fp_device.h"
16#include "fp_core.h"
17#include "fp_ndisc.h"
18
19static struct fastpath_module *fp_device;
20
21#define STATS_TITLE_FMT \
22 "%-13.13s: total Slow Fast\n"
23#define STATS_DATA_FMT \
24 " %-10.10s: %10llu %10llu %10lu\n"
25
26static unsigned long long inline stats_diff(unsigned long long slow, unsigned long fast)
27{
28 return (slow < fast) ? 0 : slow - fast;
29}
30
31static int
32add_stats_to_buff(char *buf, struct fp_net_device *fdev, ssize_t max_size)
33{
34 struct rtnl_link_stats64 temp;
35 const struct rtnl_link_stats64 *stats;
36 struct fp_net_device_stats *stats_fast;
37 static const char *title_fmt = STATS_TITLE_FMT;
38 static const char *data_fmt = STATS_DATA_FMT;
39 int len;
40
41 stats = dev_get_stats(fdev->dev, &temp);
42
43 stats_fast = &fdev->stats;
44
45 len = scnprintf(buf, max_size, title_fmt, fdev->dev->name);
46
47 len += scnprintf(buf + len, max_size - len, data_fmt, "queue_stopped",
48 0llu, 0llu, stats_fast->queue_stopped);
49 len += scnprintf(buf + len, max_size - len, data_fmt, "rx_packets",
50 stats->rx_packets, stats_diff(stats->rx_packets, stats_fast->rx_packets) ,stats_fast->rx_packets);
51 len += scnprintf(buf + len, max_size - len, data_fmt, "rx_bytes",
52 stats->rx_bytes, stats_diff(stats->rx_bytes, stats_fast->rx_bytes), stats_fast->rx_bytes);
53 len += scnprintf(buf + len, max_size - len, data_fmt, "rx_errors",
54 stats->rx_errors, stats_diff(stats->rx_errors, stats_fast->rx_errors), stats_fast->rx_errors);
55 len += scnprintf(buf + len, max_size - len, data_fmt, "rx_dropped",
56 stats->rx_dropped, stats_diff(stats->rx_dropped, stats_fast->rx_dropped), stats_fast->rx_dropped);
57 len += scnprintf(buf + len, max_size - len, data_fmt, "tx_packets",
58 stats->tx_packets, stats_diff(stats->tx_packets, stats_fast->tx_packets), stats_fast->tx_packets);
59 len += scnprintf(buf + len, max_size - len, data_fmt, "tx_bytes",
60 stats->tx_bytes, stats_diff(stats->tx_bytes, stats_fast->tx_bytes), stats_fast->tx_bytes);
61 len += scnprintf(buf + len, max_size - len, data_fmt, "tx_errors",
62 stats->tx_errors, stats_diff(stats->tx_errors, stats_fast->tx_errors), stats_fast->tx_errors);
63 len += scnprintf(buf + len, max_size - len, data_fmt, "tx_dropped",
64 stats->tx_dropped, stats_diff(stats->tx_dropped, stats_fast->tx_dropped), stats_fast->tx_dropped);
65 return len;
66}
67
68static int
69add_status_to_buff(char *buf, struct fp_net_device *fdev, ssize_t max_size)
70{
71 return scnprintf(buf, max_size, "%16s%8s%11s%9d%9d%9s\n",
72 fdev->dev->name,
73 netif_running(fdev->dev) ? "Up" : "Down",
74 fdev->forward ? "enabled" : "disabled",
75 atomic_read(&fdev->refcnt),
76 netdev_refcnt_read(fdev->dev),
77 fdev->br ? fdev->br->name : "NA");
78}
79
80static inline bool ip6addr_is_empty(struct in6_addr *addr)
81{
82 return !addr->in6_u.u6_addr32[0] &&
83 !addr->in6_u.u6_addr32[1] &&
84 !addr->in6_u.u6_addr32[2] &&
85 !addr->in6_u.u6_addr32[3];
86}
87
88static ssize_t fdev_forward_show(struct fp_net_device *fdev, char *buf)
89{
90 return scnprintf(buf, PAGE_SIZE, "%s\n", fdev->forward ? "Enabled" : "Disabled");
91}
92
93static ssize_t fdev_forward_store(struct fp_net_device *fdev,
94 const char *buf, size_t count)
95{
96 unsigned int forward;
97
98 if (sscanf(buf, "%u", &forward) != 1)
99 return -EINVAL;
100
101 fdev->forward = (bool)forward;
102
103 return count;
104}
105
106/**
107 * show statistics
108 */
109static ssize_t fdev_stats_show(struct fp_net_device *fdev, char *buf)
110{
111 struct net_device_stats *stats_slow = &fdev->dev->stats;
112 struct fp_net_device_stats *stats_fast = &fdev->stats;
113
114 if (fdev->dev->netdev_ops && fdev->dev->netdev_ops->ndo_get_stats)
115 stats_slow = fdev->dev->netdev_ops->ndo_get_stats(fdev->dev);
116 stats_fast = &fdev->stats;
117
118 return add_stats_to_buff(buf, fdev, PAGE_SIZE - 1);;
119}
120
121/**
122 * clear statistics
123 * 0 - clear fast stats only
124 * 1 - clear slow & fast stats
125 */
126static ssize_t fdev_stats_store(struct fp_net_device *fdev,
127 const char *buf, size_t count)
128{
129 struct net_device_stats *stats_slow = &fdev->dev->stats;
130 struct fp_net_device_stats *stats_fast = &fdev->stats;
131 unsigned int op;
132
133 if (sscanf(buf, "%u", &op) != 1 || op > 1)
134 return -EINVAL;
135
136 if (fdev->dev->netdev_ops && fdev->dev->netdev_ops->ndo_get_stats)
137 stats_slow = fdev->dev->netdev_ops->ndo_get_stats(fdev->dev);
138 stats_fast = &fdev->stats;
139
140 memset(stats_fast,0,sizeof(struct fp_net_device_stats));
141 if (op)
142 memset(stats_slow,0,sizeof(struct net_device_stats));
143
144 return count;
145}
146
147/**
148 * show status
149 */
150static ssize_t fdev_status_show(struct fp_net_device *fdev, char *buf)
151{
152 int len;
153
154 len = scnprintf(buf, PAGE_SIZE, " device state forward refcnt dev_ref bridge\n");
155 return len + add_status_to_buff(buf + len, fdev, PAGE_SIZE - len -1);
156}
157
158static ssize_t fpdev_prefixlen_store(struct fp_net_device *fpdev,
159 const char *buf, size_t count)
160{
161 int pref;
162 sscanf(buf, "%d\n", &pref);
163
164 fpdev->prefixlen = pref;
165
166 return count;
167}
168
169static ssize_t fpdev_prefixlen_show(struct fp_net_device *fpdev, char *buf)
170{
171 return scnprintf(buf, PAGE_SIZE, "%d\n", fpdev->prefixlen);
172}
173
174static ssize_t fpdev_ll6addr_store(struct fp_net_device *fpdev,
175 const char *buf, size_t count)
176{
177 in6_pton(buf, -1, (u8 *)&fpdev->ll6addr.s6_addr, -1, NULL);
178
179 if (ip6addr_is_empty(&fpdev->ll6addr))
180 fpdev_clear_ll6(fpdev);
181 else
182 fpdev_set_ll6(fpdev);
183
184 memset(&fpdev->gb6addr, 0, sizeof(struct in6_addr));
185 fpdev->prefixlen = 0;
186 fpdev->mtu = 0;
187 fpdev_clear_gb6(fpdev);
188 fpdev_clear_mtu(fpdev);
189
190 return count;
191}
192
193static ssize_t fpdev_ll6addr_show(struct fp_net_device *fpdev, char *buf)
194{
195 return scnprintf(buf, PAGE_SIZE, "%pI6c\n", &fpdev->ll6addr);
196}
197
198static ssize_t fpdev_gb6addr_store(struct fp_net_device *fpdev,
199 const char *buf, size_t count)
200{
201 in6_pton(buf, -1, (u8 *)&fpdev->gb6addr.s6_addr, -1, NULL);
202
203 fpdev_set_gb6(fpdev);
204 return count;
205}
206
207static ssize_t fpdev_gb6addr_show(struct fp_net_device *fpdev, char *buf)
208{
209 return scnprintf(buf, PAGE_SIZE, "%pI6c\n", &fpdev->gb6addr);
210}
211
212static ssize_t fpdev_mtu_store(struct fp_net_device *fpdev,
213 const char *buf, size_t count)
214{
215 u32 mtu;
216 sscanf(buf, "%d\n", &mtu);
217
218 fpdev->mtu = mtu;
219
220 return count;
221}
222
223
224static ssize_t fpdev_mtu_show(struct fp_net_device *fpdev, char *buf)
225{
226 return scnprintf(buf, PAGE_SIZE, "%d\n", fpdev->mtu);
227}
228
229struct fp_dev_attr {
230 struct attribute attr;
231 ssize_t (*show)(struct fp_net_device *, char *);
232 ssize_t (*store)(struct fp_net_device *, const char *, size_t count);
233};
234
235#define FPDEV_ATTR(_name, _mode, _show, _store) \
236 struct fp_dev_attr fp_dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
237
238#define to_fpdev(fpdev) container_of(fpdev, struct fp_net_device, kobj)
239#define to_attr(a) container_of(a, struct fp_dev_attr, attr)
240
241static FPDEV_ATTR(forward, S_IRUGO|S_IWUSR, fdev_forward_show, fdev_forward_store);
242static FPDEV_ATTR(statistics, S_IRUGO|S_IWUSR, fdev_stats_show, fdev_stats_store);
243static FPDEV_ATTR(status, S_IRUGO, fdev_status_show, NULL);
244static FPDEV_ATTR(ll6addr, S_IRUGO|S_IWUSR, fpdev_ll6addr_show,
245 fpdev_ll6addr_store);
246static FPDEV_ATTR(gb6addr, S_IRUGO|S_IWUSR, fpdev_gb6addr_show,
247 fpdev_gb6addr_store);
248static FPDEV_ATTR(prefixlen, S_IRUGO|S_IWUSR, fpdev_prefixlen_show,
249 fpdev_prefixlen_store);
250static FPDEV_ATTR(mtu, S_IRUGO|S_IWUSR, fpdev_mtu_show,
251 fpdev_mtu_store);
252
253static struct attribute *fpdev_default_attrs[] = {
254 &fp_dev_attr_forward.attr,
255 &fp_dev_attr_statistics.attr,
256 &fp_dev_attr_status.attr,
257 &fp_dev_attr_ll6addr.attr,
258 &fp_dev_attr_gb6addr.attr,
259 &fp_dev_attr_prefixlen.attr,
260 &fp_dev_attr_mtu.attr,
261 NULL
262};
263
264static ssize_t fpdev_show(struct kobject *kobj, struct attribute *attr, char *buf)
265{
266 struct fp_net_device *fdev = to_fpdev(kobj);
267 struct fp_dev_attr *fattr = to_attr(attr);
268
269 if (!fdev || !fattr || !fattr->show)
270 return -EINVAL;
271
272 return fattr->show(fdev, buf);
273}
274
275static ssize_t fpdev_store(struct kobject *kobj, struct attribute *attr,
276 const char *buf, size_t count)
277{
278 struct fp_net_device *fdev = to_fpdev(kobj);
279 struct fp_dev_attr *fattr = to_attr(attr);
280
281 if (!fdev || !fattr || !fattr->store)
282 return -EINVAL;
283
284 return fattr->store(fdev, buf, count);
285}
286
287static const struct sysfs_ops fpdev_sysfs_ops = {
288 .show = fpdev_show,
289 .store = fpdev_store,
290};
291
292void destroy_fpdev(struct work_struct *w)
293{
294 struct fp_dev_work *work;
295 struct fp_net_device *fpdev;
296 struct fp_dev_list *fpdl;
297
298 work = container_of(w, struct fp_dev_work, work.work);
299 BUG_ON(!work);
300
301 fpdev = work->fpdev;
302 fpdl = work->fpdl;
303
304 pr_err("device (%s) destroyed\n", fpdev->dev->name);
305
306 rtnl_lock();
307 dev_put(fpdev->dev);
308 rtnl_unlock();
309
310 kfree(fpdev);
311 atomic_dec(&fpdl->dev_count);
312 wake_up(&fpdl->wq);
313
314 list_del(&work->list);
315 kfree(work);
316
317}
318
319void destroy_fpdev_rcu(struct rcu_head *rcu)
320{
321 struct fp_dev_work *work;
322 struct fp_net_device *fpdev =
323 container_of(rcu, struct fp_net_device, rcu);
324 struct fp_dev_list *fpdl = fp_device->priv;
325
326 work = kzalloc(sizeof(*work), GFP_ATOMIC);
327 if (!work)
328 return;
329
330 work->fpdev = fpdev;
331 work->fpdl = fpdl;
332
333 INIT_LIST_HEAD(&work->list);
334 INIT_DELAYED_WORK(&work->work, destroy_fpdev);
335 queue_delayed_work(fpdl->dev_put_wq, &work->work, 0);
336}
337
338static void release_fpdev(struct kobject *kobj)
339{
340 pr_debug("fpdev kobj released\n");
341}
342
343static struct kobj_type ktype_fpdev = {
344 .sysfs_ops = &fpdev_sysfs_ops,
345 .default_attrs = fpdev_default_attrs,
346 .release = release_fpdev,
347};
348
349static void fpdev_del_if_finish(struct work_struct *work)
350{
351 struct fp_net_device *fpdev;
352
353 fpdev = container_of(work, struct fp_net_device, free_work);
354
355 kobject_put(&fpdev->kobj);
356 fpdev_put(fpdev);
357}
358
359/*--------------------------------------------------------------*/
360/*- API -*/
361/*--------------------------------------------------------------*/
362
363/**
364 * delete the fastpath device associated with this net device
365 *
366 * @param dev net device
367 *
368 * @return 0 for success, -ENODEV if not found
369 */
370int fpdev_del_if(struct net_device *dev)
371{
372 struct fp_net_device *fpdev;
373 struct fp_dev_list *fpdl = fp_device->priv;
374
375 spin_lock_bh(&fpdl->list_lock);
376 rcu_read_lock_bh();
377 list_for_each_entry_rcu(fpdev, &fpdl->devices_list, list) {
378 if (fpdev->dev == dev && fpdev_hold(fpdev))
379 goto found;
380 }
381
382 fpdev = NULL;
383
384found:
385 rcu_read_unlock_bh();
386
387 if (!fpdev) {
388 pr_debug("device (%s) not found\n", dev->name);
389 spin_unlock_bh(&fpdl->list_lock);
390 return -ENODEV;
391 }
392
393 list_del_rcu(&fpdev->list);
394 spin_unlock_bh(&fpdl->list_lock);
395
396 fpdev_put(fpdev);
397 schedule_work(&fpdev->free_work);
398
399 fpdev_put(fpdev);
400
401 printk(KERN_DEBUG "device (%s) found and deleted\n", dev->name);
402 return 0;
403}
404
405/**
406 * create and add a fastpath device for a given interface
407 *
408 * @param dev net device
409 *
410 * @return 0 for success, error code otherwise
411 */
412int fpdev_add_if(struct net_device *dev)
413{
414 struct fp_net_device *fpdev;
415 struct fp_dev_list *fpdl;
416 int ret;
417
418 BUG_ON(!dev);
419 BUG_ON(!fp_device);
420
421 fpdl = fp_device->priv;
422
423 fpdev = kzalloc(sizeof(*fpdev), GFP_ATOMIC);
424 if (!fpdev) {
425 ret = -ENOMEM;
426 goto err;
427 }
428
429 dev_hold(dev);
430
431 ret = kobject_init_and_add(&fpdev->kobj, &ktype_fpdev, &fp_device->kobj,
432 dev->name);
433 if (ret)
434 goto kobj_err;
435
436
437 fpdev->forward = true;
438 fpdev->dev = dev;
439 INIT_LIST_HEAD(&fpdev->list);
440 INIT_WORK(&fpdev->free_work, fpdev_del_if_finish);
441
442 /* extra reference for return */
443 atomic_set(&fpdev->refcnt, 2);
444 atomic_inc(&fpdl->dev_count);
445
446 spin_lock_bh(&fpdl->list_lock);
447 list_add_tail_rcu(&fpdev->list, &fpdl->devices_list);
448 spin_unlock_bh(&fpdl->list_lock);
449
450 kobject_uevent(&fpdev->kobj, KOBJ_ADD);
451
452 pr_debug("created fastpath device for %s\n", dev->name);
453
454 return 0;
455
456kobj_err:
457 kobject_put(&fpdev->kobj);
458 dev_put(dev);
459 kfree(fpdev);
460err:
461 pr_err("could not creat fastpath device for %s\n", dev->name);
462 return ret;
463}
464
465/**
466 * search for a fastpath device associated with a given net device.
467 * If found, the fastpath device's refcount is incremented.
468 * The user must call fpdev_put() when finished in order to release the device.
469 *
470 * @param dev net device
471 *
472 * @return pointer to the associated fastpath device (NULL if not found)
473 */
474struct fp_net_device *fpdev_get_if(struct net_device *dev)
475{
476 struct fp_net_device *fpdev;
477 struct fp_dev_list *fpdl = fp_device->priv;
478
479 rcu_read_lock_bh();
480 list_for_each_entry_rcu(fpdev, &fpdl->devices_list, list) {
481 if (fpdev->dev == dev && atomic_inc_not_zero(&fpdev->refcnt))
482 goto found;
483 }
484
485 fpdev = NULL;
486 printk(KERN_DEBUG "device (%s) not found\n", dev->name);
487
488found:
489 rcu_read_unlock_bh();
490 return fpdev;
491}
492
493struct fp_net_device *fpdev_get_ccinet(void)
494{
495 struct fp_net_device *fpdev;
496 struct fp_dev_list *fpdl = fp_device->priv;
497
498 rcu_read_lock_bh();
499 list_for_each_entry_rcu(fpdev, &fpdl->devices_list, list) {
500 if (fpdev_is_gb6_set(fpdev) && fpdev_is_ll6_set(fpdev) &&
501 (!strncasecmp(fpdev->dev->name, "ccinet", 6)) &&
502 fpdev_is_mtu_set(fpdev) && atomic_inc_not_zero(&fpdev->refcnt))
503 goto found;
504 }
505
506 fpdev = NULL;
507
508found:
509 rcu_read_unlock_bh();
510 return fpdev;
511}
512
513/**
514 * show statistics (all fastpath devices)
515 */
516static ssize_t stats_show(struct fastpath_module *m, char *buf)
517{
518 struct fp_net_device *itr;
519 int len, res;
520 struct fp_dev_list *fpdl = fp_device->priv;
521
522 len = sprintf(buf, "fastpath statistics\n");
523
524 spin_lock_bh(&fpdl->list_lock);
525 list_for_each_entry(itr, &fpdl->devices_list, list) {
526 if (!netif_running(itr->dev) || !fpdev_hold(itr))
527 continue;
528 res = add_stats_to_buff(buf + len, itr, PAGE_SIZE - len - 1);
529 fpdev_put(itr);
530 len += res;
531 if (res == 0) {
532 pr_info("Exceed PAGE_SIZE, result trancated\n");
533 len += sprintf(buf + len, "\n");
534 break;
535 }
536 }
537 spin_unlock_bh(&fpdl->list_lock);
538
539 return len;
540}
541
542/**
543 * clear statistics (all fastpath devices)
544 * 0 - clear fast stats only
545 * 1 - clear slow & fast stats
546 */
547static ssize_t stats_store(struct fastpath_module *m, const char *buf,
548 size_t count)
549{
550 struct fp_net_device *itr;
551 struct net_device_stats *stats_slow;
552 struct fp_net_device_stats *stats_fast;
553 unsigned int op;
554 struct fp_dev_list *fpdl = fp_device->priv;
555
556 if (sscanf(buf, "%u", &op) != 1 || op > 1)
557 return -EINVAL;
558
559 spin_lock_bh(&fpdl->list_lock);
560 list_for_each_entry(itr, &fpdl->devices_list, list) {
561 BUG_ON(!itr->dev);
562 if (!fpdev_hold(itr))
563 continue;
564 stats_slow = &itr->dev->stats;
565 stats_fast = &itr->stats;
566 if (itr->dev->netdev_ops && itr->dev->netdev_ops->ndo_get_stats)
567 stats_slow = itr->dev->netdev_ops->ndo_get_stats(itr->dev);
568
569 memset(stats_fast,0,sizeof(struct fp_net_device_stats));
570 if (op)
571 memset(stats_slow,0,sizeof(struct net_device_stats));
572
573 fpdev_put(itr);
574 }
575 spin_unlock_bh(&fpdl->list_lock);
576
577 return count;
578}
579
580/**
581 * show status (all fastpath devices)
582 */
583static ssize_t status_show(struct fastpath_module *m, char *buf)
584{
585 struct fp_net_device *itr;
586 struct fp_dev_list *fpdl = fp_device->priv;
587 int len = 0;
588
589 len = scnprintf(buf, PAGE_SIZE, " device state forward refcnt dev_ref bridge\n");
590
591 /* active devices */
592 rcu_read_lock_bh();
593 list_for_each_entry_rcu(itr, &fpdl->devices_list, list)
594 len += add_status_to_buff(buf + len, itr, PAGE_SIZE - len -1);
595 rcu_read_unlock_bh();
596
597 return len;
598}
599
600static void fp_device_release(struct kobject *kobj)
601{
602 struct fp_dev_list *fpdl = fp_device->priv;
603 BUG_ON(!list_empty(&fpdl->devices_list));
604 pr_debug("fp_device released\n");
605}
606
607static FP_ATTR(devices, S_IRUGO, status_show, NULL);
608static FP_ATTR(stats, S_IRUGO|S_IWUSR, stats_show, stats_store);
609
610static struct attribute *fp_device_attrs[] = {
611 &fp_attr_devices.attr,
612 &fp_attr_stats.attr,
613 NULL, /* need to NULL terminate the list of attributes */
614};
615
616static struct kobj_type ktype_devices = {
617 .sysfs_ops = &fp_sysfs_ops,
618 .default_attrs = fp_device_attrs,
619 .release = fp_device_release,
620};
621
622static int fp_device_probe(struct fastpath_module *module)
623{
624 int ret;
625 struct fp_dev_list *fpdl;
626
627 snprintf(module->name, sizeof(module->name),"fp_device");
628
629 fpdl = kzalloc(sizeof(*fpdl), GFP_KERNEL);
630 if (!fpdl) {
631 pr_err("fp_dev_list alloc failed\n");
632 return -ENOMEM;
633 }
634
635 kobject_init(&module->kobj, &ktype_devices);
636 ret = kobject_add(&module->kobj, module->fastpath->kobj, "%s", module->name);
637 if (ret < 0) {
638 pr_err("kobject_add failed (%d)\n", ret);
639 goto kobj_err;
640 }
641
642 atomic_set(&fpdl->dev_count, 0);
643 INIT_LIST_HEAD(&fpdl->devices_list);
644 spin_lock_init(&fpdl->list_lock);
645 init_waitqueue_head(&fpdl->wq);
646
647 fpdl->dev_put_wq = create_singlethread_workqueue(module->name);
648 if (!fpdl->dev_put_wq) {
649 pr_err("create workqueue failed\n");
650 ret = -EBUSY;
651 goto kobj_err;
652 }
653
654 module->priv = fpdl;
655 fp_device = module;
656
657 kobject_uevent(&module->kobj, KOBJ_ADD);
658
659 pr_debug("fp_device probed\n");
660 return 0;
661
662kobj_err:
663 kobject_put(&module->kobj);
664 kfree(fpdl);
665 return ret;
666}
667
668static int fp_device_remove(struct fastpath_module *module)
669{
670 struct fp_dev_list *fpdl = fp_device->priv;
671
672 BUG_ON(!module);
673
674 flush_workqueue(fpdl->dev_put_wq);
675 wait_event(fpdl->wq, !atomic_read(&fpdl->dev_count));
676 destroy_workqueue(fpdl->dev_put_wq);
677
678 kobject_put(&module->kobj);
679 fp_device = NULL;
680
681 kfree(module->priv);
682 kfree(module);
683
684 pr_debug("fp_device removed\n");
685 return 0;
686}
687
688struct fastpath_module_ops fp_device_ops = {
689 .probe = fp_device_probe,
690 .remove = fp_device_remove,
691};