blob: f62044106ef0b020d7f8ed352620e3d92f58e905 [file] [log] [blame]
xjb04a4022021-11-25 15:01:52 +08001/*
2 * Copyright (c) 2014 Ezequiel Garcia
3 * Copyright (c) 2011 Free Electrons
4 *
5 * Driver parameter handling strongly based on drivers/mtd/ubi/build.c
6 * Copyright (c) International Business Machines Corp., 2006
7 * Copyright (c) Nokia Corporation, 2007
8 * Authors: Artem Bityutskiy, Frank Haverkamp
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation, version 2.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU General Public License for more details.
18 */
19
20/*
21 * Read-only block devices on top of UBI volumes
22 *
23 * A simple implementation to allow a block device to be layered on top of a
24 * UBI volume. The implementation is provided by creating a static 1-to-1
25 * mapping between the block device and the UBI volume.
26 *
27 * The addressed byte is obtained from the addressed block sector, which is
28 * mapped linearly into the corresponding LEB:
29 *
30 * LEB number = addressed byte / LEB size
31 *
32 * This feature is compiled in the UBI core, and adds a 'block' parameter
33 * to allow early creation of block devices on top of UBI volumes. Runtime
34 * block creation/removal for UBI volumes is provided through two UBI ioctls:
35 * UBI_IOCVOLCRBLK and UBI_IOCVOLRMBLK.
36 */
37
38#include <linux/module.h>
39#include <linux/init.h>
40#include <linux/err.h>
41#include <linux/kernel.h>
42#include <linux/list.h>
43#include <linux/mutex.h>
44#include <linux/slab.h>
45#include <linux/mtd/ubi.h>
46#include <linux/workqueue.h>
47#include <linux/blkdev.h>
48#include <linux/blk-mq.h>
49#include <linux/hdreg.h>
50#include <linux/scatterlist.h>
51#include <linux/idr.h>
52#include <asm/div64.h>
53#include <linux/root_dev.h>
54
55#include "ubi-media.h"
56#include "ubi.h"
57
58/* Maximum number of supported devices */
59#define UBIBLOCK_MAX_DEVICES 32
60
61/* Maximum length of the 'block=' parameter */
62#define UBIBLOCK_PARAM_LEN 63
63
64/* Maximum number of comma-separated items in the 'block=' parameter */
65#define UBIBLOCK_PARAM_COUNT 2
66
67struct ubiblock_param {
68 int ubi_num;
69 int vol_id;
70 char name[UBIBLOCK_PARAM_LEN+1];
71};
72
73struct ubiblock_pdu {
74 struct work_struct work;
75 struct ubi_sgl usgl;
76};
77
78/* Numbers of elements set in the @ubiblock_param array */
79static int ubiblock_devs __initdata;
80
81/* MTD devices specification parameters */
82static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
83
84struct ubiblock {
85 struct ubi_volume_desc *desc;
86 int ubi_num;
87 int vol_id;
88 int refcnt;
89 int leb_size;
90
91 struct gendisk *gd;
92 struct request_queue *rq;
93
94 struct workqueue_struct *wq;
95
96 struct mutex dev_mutex;
97 struct list_head list;
98 struct blk_mq_tag_set tag_set;
99};
100
101/* Linked list of all ubiblock instances */
102static LIST_HEAD(ubiblock_devices);
103static DEFINE_IDR(ubiblock_minor_idr);
104/* Protects ubiblock_devices and ubiblock_minor_idr */
105static DEFINE_MUTEX(devices_mutex);
106static int ubiblock_major;
107
108static int __init ubiblock_set_param(const char *val,
109 const struct kernel_param *kp)
110{
111 int i, ret;
112 size_t len;
113 struct ubiblock_param *param;
114 char buf[UBIBLOCK_PARAM_LEN];
115 char *pbuf = &buf[0];
116 char *tokens[UBIBLOCK_PARAM_COUNT];
117
118 if (!val)
119 return -EINVAL;
120
121 len = strnlen(val, UBIBLOCK_PARAM_LEN);
122 if (len == 0) {
123 pr_warn("UBI: block: empty 'block=' parameter - ignored\n");
124 return 0;
125 }
126
127 if (len == UBIBLOCK_PARAM_LEN) {
128 pr_err("UBI: block: parameter \"%s\" is too long, max. is %d\n",
129 val, UBIBLOCK_PARAM_LEN);
130 return -EINVAL;
131 }
132
133 strcpy(buf, val);
134
135 /* Get rid of the final newline */
136 if (buf[len - 1] == '\n')
137 buf[len - 1] = '\0';
138
139 for (i = 0; i < UBIBLOCK_PARAM_COUNT; i++)
140 tokens[i] = strsep(&pbuf, ",");
141
142 param = &ubiblock_param[ubiblock_devs];
143 if (tokens[1]) {
144 /* Two parameters: can be 'ubi, vol_id' or 'ubi, vol_name' */
145 ret = kstrtoint(tokens[0], 10, &param->ubi_num);
146 if (ret < 0)
147 return -EINVAL;
148
149 /* Second param can be a number or a name */
150 ret = kstrtoint(tokens[1], 10, &param->vol_id);
151 if (ret < 0) {
152 param->vol_id = -1;
153 strcpy(param->name, tokens[1]);
154 }
155
156 } else {
157 /* One parameter: must be device path */
158 strcpy(param->name, tokens[0]);
159 param->ubi_num = -1;
160 param->vol_id = -1;
161 }
162
163 ubiblock_devs++;
164
165 return 0;
166}
167
168static const struct kernel_param_ops ubiblock_param_ops = {
169 .set = ubiblock_set_param,
170};
171module_param_cb(block, &ubiblock_param_ops, NULL, 0);
172MODULE_PARM_DESC(block, "Attach block devices to UBI volumes. Parameter format: block=<path|dev,num|dev,name>.\n"
173 "Multiple \"block\" parameters may be specified.\n"
174 "UBI volumes may be specified by their number, name, or path to the device node.\n"
175 "Examples\n"
176 "Using the UBI volume path:\n"
177 "ubi.block=/dev/ubi0_0\n"
178 "Using the UBI device, and the volume name:\n"
179 "ubi.block=0,rootfs\n"
180 "Using both UBI device number and UBI volume number:\n"
181 "ubi.block=0,0\n");
182
183static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
184{
185 struct ubiblock *dev;
186
187 list_for_each_entry(dev, &ubiblock_devices, list)
188 if (dev->ubi_num == ubi_num && dev->vol_id == vol_id)
189 return dev;
190 return NULL;
191}
192
193static int ubiblock_read(struct ubiblock_pdu *pdu)
194{
195 int ret, leb, offset, bytes_left, to_read;
196 u64 pos;
197 struct request *req = blk_mq_rq_from_pdu(pdu);
198 struct ubiblock *dev = req->q->queuedata;
199
200 to_read = blk_rq_bytes(req);
201 pos = blk_rq_pos(req) << 9;
202
203 /* Get LEB:offset address to read from */
204 offset = do_div(pos, dev->leb_size);
205 leb = pos;
206 bytes_left = to_read;
207
208 while (bytes_left) {
209 /*
210 * We can only read one LEB at a time. Therefore if the read
211 * length is larger than one LEB size, we split the operation.
212 */
213 if (offset + to_read > dev->leb_size)
214 to_read = dev->leb_size - offset;
215
216 ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read);
217 if (ret < 0)
218 return ret;
219
220 bytes_left -= to_read;
221 to_read = bytes_left;
222 leb += 1;
223 offset = 0;
224 }
225 return 0;
226}
227
228static int ubiblock_open(struct block_device *bdev, fmode_t mode)
229{
230 struct ubiblock *dev = bdev->bd_disk->private_data;
231 int ret;
232
233 mutex_lock(&dev->dev_mutex);
234 if (dev->refcnt > 0) {
235 /*
236 * The volume is already open, just increase the reference
237 * counter.
238 */
239 goto out_done;
240 }
241
242 /*
243 * We want users to be aware they should only mount us as read-only.
244 * It's just a paranoid check, as write requests will get rejected
245 * in any case.
246 */
247 if (mode & FMODE_WRITE) {
248 ret = -EROFS;
249 goto out_unlock;
250 }
251
252 dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY);
253 if (IS_ERR(dev->desc)) {
254 dev_err(disk_to_dev(dev->gd), "failed to open ubi volume %d_%d",
255 dev->ubi_num, dev->vol_id);
256 ret = PTR_ERR(dev->desc);
257 dev->desc = NULL;
258 goto out_unlock;
259 }
260
261out_done:
262 dev->refcnt++;
263 mutex_unlock(&dev->dev_mutex);
264 return 0;
265
266out_unlock:
267 mutex_unlock(&dev->dev_mutex);
268 return ret;
269}
270
271static void ubiblock_release(struct gendisk *gd, fmode_t mode)
272{
273 struct ubiblock *dev = gd->private_data;
274
275 mutex_lock(&dev->dev_mutex);
276 dev->refcnt--;
277 if (dev->refcnt == 0) {
278 ubi_close_volume(dev->desc);
279 dev->desc = NULL;
280 }
281 mutex_unlock(&dev->dev_mutex);
282}
283
284static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo)
285{
286 /* Some tools might require this information */
287 geo->heads = 1;
288 geo->cylinders = 1;
289 geo->sectors = get_capacity(bdev->bd_disk);
290 geo->start = 0;
291 return 0;
292}
293
294static const struct block_device_operations ubiblock_ops = {
295 .owner = THIS_MODULE,
296 .open = ubiblock_open,
297 .release = ubiblock_release,
298 .getgeo = ubiblock_getgeo,
299};
300
301static void ubiblock_do_work(struct work_struct *work)
302{
303 int ret;
304 struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work);
305 struct request *req = blk_mq_rq_from_pdu(pdu);
306
307 blk_mq_start_request(req);
308
309 /*
310 * It is safe to ignore the return value of blk_rq_map_sg() because
311 * the number of sg entries is limited to UBI_MAX_SG_COUNT
312 * and ubi_read_sg() will check that limit.
313 */
314 blk_rq_map_sg(req->q, req, pdu->usgl.sg);
315
316 ret = ubiblock_read(pdu);
317 rq_flush_dcache_pages(req);
318
319 blk_mq_end_request(req, errno_to_blk_status(ret));
320}
321
322static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
323 const struct blk_mq_queue_data *bd)
324{
325 struct request *req = bd->rq;
326 struct ubiblock *dev = hctx->queue->queuedata;
327 struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
328
329 switch (req_op(req)) {
330 case REQ_OP_READ:
331 ubi_sgl_init(&pdu->usgl);
332 queue_work(dev->wq, &pdu->work);
333 return BLK_STS_OK;
334 default:
335 return BLK_STS_IOERR;
336 }
337
338}
339
340static int ubiblock_init_request(struct blk_mq_tag_set *set,
341 struct request *req, unsigned int hctx_idx,
342 unsigned int numa_node)
343{
344 struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
345
346 sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT);
347 INIT_WORK(&pdu->work, ubiblock_do_work);
348
349 return 0;
350}
351
352static const struct blk_mq_ops ubiblock_mq_ops = {
353 .queue_rq = ubiblock_queue_rq,
354 .init_request = ubiblock_init_request,
355};
356
357int ubiblock_create(struct ubi_volume_info *vi)
358{
359 struct ubiblock *dev;
360 struct gendisk *gd;
361 u64 disk_capacity = vi->used_bytes >> 9;
362 int ret;
363
364 if ((sector_t)disk_capacity != disk_capacity)
365 return -EFBIG;
366 /* Check that the volume isn't already handled */
367 mutex_lock(&devices_mutex);
368 if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
369 ret = -EEXIST;
370 goto out_unlock;
371 }
372
373 dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
374 if (!dev) {
375 ret = -ENOMEM;
376 goto out_unlock;
377 }
378
379 mutex_init(&dev->dev_mutex);
380
381 dev->ubi_num = vi->ubi_num;
382 dev->vol_id = vi->vol_id;
383 dev->leb_size = vi->usable_leb_size;
384
385 /* Initialize the gendisk of this ubiblock device */
386 gd = alloc_disk(1);
387 if (!gd) {
388 pr_err("UBI: block: alloc_disk failed\n");
389 ret = -ENODEV;
390 goto out_free_dev;
391 }
392
393 gd->fops = &ubiblock_ops;
394 gd->major = ubiblock_major;
395 gd->first_minor = idr_alloc(&ubiblock_minor_idr, dev, 0, 0, GFP_KERNEL);
396 if (gd->first_minor < 0) {
397 dev_err(disk_to_dev(gd),
398 "block: dynamic minor allocation failed");
399 ret = -ENODEV;
400 goto out_put_disk;
401 }
402 gd->private_data = dev;
403 sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
404 set_capacity(gd, disk_capacity);
405 dev->gd = gd;
406
407 dev->tag_set.ops = &ubiblock_mq_ops;
408 dev->tag_set.queue_depth = 64;
409 dev->tag_set.numa_node = NUMA_NO_NODE;
410 dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
411 dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
412 dev->tag_set.driver_data = dev;
413 dev->tag_set.nr_hw_queues = 1;
414
415 ret = blk_mq_alloc_tag_set(&dev->tag_set);
416 if (ret) {
417 dev_err(disk_to_dev(dev->gd), "blk_mq_alloc_tag_set failed");
418 goto out_remove_minor;
419 }
420
421 dev->rq = blk_mq_init_queue(&dev->tag_set);
422 if (IS_ERR(dev->rq)) {
423 dev_err(disk_to_dev(gd), "blk_mq_init_queue failed");
424 ret = PTR_ERR(dev->rq);
425 goto out_free_tags;
426 }
427 blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
428
429 dev->rq->queuedata = dev;
430 dev->gd->queue = dev->rq;
431
432 /*
433 * Create one workqueue per volume (per registered block device).
434 * Rembember workqueues are cheap, they're not threads.
435 */
436 dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
437 if (!dev->wq) {
438 ret = -ENOMEM;
439 goto out_free_queue;
440 }
441
442 list_add_tail(&dev->list, &ubiblock_devices);
443
444 /* Must be the last step: anyone can call file ops from now on */
445 add_disk(dev->gd);
446 dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
447 dev->ubi_num, dev->vol_id, vi->name);
448 mutex_unlock(&devices_mutex);
449
450 if (!strcmp(vi->name, "rootfs") &&
451 IS_ENABLED(CONFIG_MTD_ROOTFS_ROOT_DEV) &&
452 ROOT_DEV == 0) {
453 pr_notice("ubiblock: device ubiblock%d_%d (%s) set to be root filesystem\n",
454 dev->ubi_num, dev->vol_id, vi->name);
455 ROOT_DEV = MKDEV(gd->major, gd->first_minor);
456 }
457
458 return 0;
459
460out_free_queue:
461 blk_cleanup_queue(dev->rq);
462out_free_tags:
463 blk_mq_free_tag_set(&dev->tag_set);
464out_remove_minor:
465 idr_remove(&ubiblock_minor_idr, gd->first_minor);
466out_put_disk:
467 put_disk(dev->gd);
468out_free_dev:
469 kfree(dev);
470out_unlock:
471 mutex_unlock(&devices_mutex);
472
473 return ret;
474}
475
476static void ubiblock_cleanup(struct ubiblock *dev)
477{
478 /* Stop new requests to arrive */
479 del_gendisk(dev->gd);
480 /* Flush pending work */
481 destroy_workqueue(dev->wq);
482 /* Finally destroy the blk queue */
483 blk_cleanup_queue(dev->rq);
484 blk_mq_free_tag_set(&dev->tag_set);
485 dev_info(disk_to_dev(dev->gd), "released");
486 idr_remove(&ubiblock_minor_idr, dev->gd->first_minor);
487 put_disk(dev->gd);
488}
489
490int ubiblock_remove(struct ubi_volume_info *vi)
491{
492 struct ubiblock *dev;
493 int ret;
494
495 mutex_lock(&devices_mutex);
496 dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
497 if (!dev) {
498 ret = -ENODEV;
499 goto out_unlock;
500 }
501
502 /* Found a device, let's lock it so we can check if it's busy */
503 mutex_lock(&dev->dev_mutex);
504 if (dev->refcnt > 0) {
505 ret = -EBUSY;
506 goto out_unlock_dev;
507 }
508
509 /* Remove from device list */
510 list_del(&dev->list);
511 ubiblock_cleanup(dev);
512 mutex_unlock(&dev->dev_mutex);
513 mutex_unlock(&devices_mutex);
514
515 kfree(dev);
516 return 0;
517
518out_unlock_dev:
519 mutex_unlock(&dev->dev_mutex);
520out_unlock:
521 mutex_unlock(&devices_mutex);
522 return ret;
523}
524
525static int ubiblock_resize(struct ubi_volume_info *vi)
526{
527 struct ubiblock *dev;
528 u64 disk_capacity = vi->used_bytes >> 9;
529
530 /*
531 * Need to lock the device list until we stop using the device,
532 * otherwise the device struct might get released in
533 * 'ubiblock_remove()'.
534 */
535 mutex_lock(&devices_mutex);
536 dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
537 if (!dev) {
538 mutex_unlock(&devices_mutex);
539 return -ENODEV;
540 }
541 if ((sector_t)disk_capacity != disk_capacity) {
542 mutex_unlock(&devices_mutex);
543 dev_warn(disk_to_dev(dev->gd), "the volume is too big (%d LEBs), cannot resize",
544 vi->size);
545 return -EFBIG;
546 }
547
548 mutex_lock(&dev->dev_mutex);
549
550 if (get_capacity(dev->gd) != disk_capacity) {
551 set_capacity(dev->gd, disk_capacity);
552 dev_info(disk_to_dev(dev->gd), "resized to %lld bytes",
553 vi->used_bytes);
554 }
555 mutex_unlock(&dev->dev_mutex);
556 mutex_unlock(&devices_mutex);
557 return 0;
558}
559
560static int ubiblock_notify(struct notifier_block *nb,
561 unsigned long notification_type, void *ns_ptr)
562{
563 struct ubi_notification *nt = ns_ptr;
564
565 switch (notification_type) {
566 case UBI_VOLUME_ADDED:
567 /*
568 * We want to enforce explicit block device creation for
569 * volumes, so when a volume is added we do nothing.
570 */
571 break;
572 case UBI_VOLUME_REMOVED:
573 ubiblock_remove(&nt->vi);
574 break;
575 case UBI_VOLUME_RESIZED:
576 ubiblock_resize(&nt->vi);
577 break;
578 case UBI_VOLUME_UPDATED:
579 /*
580 * If the volume is static, a content update might mean the
581 * size (i.e. used_bytes) was also changed.
582 */
583 if (nt->vi.vol_type == UBI_STATIC_VOLUME)
584 ubiblock_resize(&nt->vi);
585 break;
586 default:
587 break;
588 }
589 return NOTIFY_OK;
590}
591
592static struct notifier_block ubiblock_notifier = {
593 .notifier_call = ubiblock_notify,
594};
595
596static struct ubi_volume_desc * __init
597open_volume_desc(const char *name, int ubi_num, int vol_id)
598{
599 if (ubi_num == -1)
600 /* No ubi num, name must be a vol device path */
601 return ubi_open_volume_path(name, UBI_READONLY);
602 else if (vol_id == -1)
603 /* No vol_id, must be vol_name */
604 return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
605 else
606 return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
607}
608
609static void __init ubiblock_create_from_param(void)
610{
611 int i, ret = 0;
612 struct ubiblock_param *p;
613 struct ubi_volume_desc *desc;
614 struct ubi_volume_info vi;
615
616 /*
617 * If there is an error creating one of the ubiblocks, continue on to
618 * create the following ubiblocks. This helps in a circumstance where
619 * the kernel command-line specifies multiple block devices and some
620 * may be broken, but we still want the working ones to come up.
621 */
622 for (i = 0; i < ubiblock_devs; i++) {
623 p = &ubiblock_param[i];
624
625 desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
626 if (IS_ERR(desc)) {
627 pr_err(
628 "UBI: block: can't open volume on ubi%d_%d, err=%ld\n",
629 p->ubi_num, p->vol_id, PTR_ERR(desc));
630 continue;
631 }
632
633 ubi_get_volume_info(desc, &vi);
634 ubi_close_volume(desc);
635
636 ret = ubiblock_create(&vi);
637 if (ret) {
638 pr_err(
639 "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
640 vi.name, p->ubi_num, p->vol_id, ret);
641 continue;
642 }
643 }
644}
645
646#define UBIFS_NODE_MAGIC 0x06101831
647static inline int ubi_vol_is_ubifs(struct ubi_volume_desc *desc)
648{
649 int ret;
650 uint32_t magic_of, magic;
651 ret = ubi_read(desc, 0, (char *)&magic_of, 0, 4);
652 if (ret)
653 return 0;
654 magic = le32_to_cpu(magic_of);
655 return magic == UBIFS_NODE_MAGIC;
656}
657
658static void __init ubiblock_create_auto_rootfs(void)
659{
660 int ubi_num, ret, is_ubifs;
661 struct ubi_volume_desc *desc;
662 struct ubi_volume_info vi;
663
664 for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) {
665 desc = ubi_open_volume_nm(ubi_num, "rootfs", UBI_READONLY);
666 if (IS_ERR(desc))
667 continue;
668
669 ubi_get_volume_info(desc, &vi);
670 is_ubifs = ubi_vol_is_ubifs(desc);
671 ubi_close_volume(desc);
672 if (is_ubifs)
673 break;
674
675 ret = ubiblock_create(&vi);
676 if (ret)
677 pr_err("UBI error: block: can't add '%s' volume, err=%d\n",
678 vi.name, ret);
679 /* always break if we get here */
680 break;
681 }
682}
683
684static void ubiblock_remove_all(void)
685{
686 struct ubiblock *next;
687 struct ubiblock *dev;
688
689 mutex_lock(&devices_mutex);
690 list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
691 /* The module is being forcefully removed */
692 WARN_ON(dev->desc);
693 /* Remove from device list */
694 list_del(&dev->list);
695 ubiblock_cleanup(dev);
696 kfree(dev);
697 }
698 mutex_unlock(&devices_mutex);
699}
700
701int __init ubiblock_init(void)
702{
703 int ret;
704
705 ubiblock_major = register_blkdev(0, "ubiblock");
706 if (ubiblock_major < 0)
707 return ubiblock_major;
708
709 /*
710 * Attach block devices from 'block=' module param.
711 * Even if one block device in the param list fails to come up,
712 * still allow the module to load and leave any others up.
713 */
714 ubiblock_create_from_param();
715
716 /* auto-attach "rootfs" volume if existing and non-ubifs */
717 if (IS_ENABLED(CONFIG_MTD_ROOTFS_ROOT_DEV))
718 ubiblock_create_auto_rootfs();
719
720 /*
721 * Block devices are only created upon user requests, so we ignore
722 * existing volumes.
723 */
724 ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
725 if (ret)
726 goto err_unreg;
727 return 0;
728
729err_unreg:
730 unregister_blkdev(ubiblock_major, "ubiblock");
731 ubiblock_remove_all();
732 return ret;
733}
734
735void __exit ubiblock_exit(void)
736{
737 ubi_unregister_volume_notifier(&ubiblock_notifier);
738 ubiblock_remove_all();
739 unregister_blkdev(ubiblock_major, "ubiblock");
740}