| lh | 9ed821d | 2023-04-07 01:36:19 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (C) 2001-2003 Sistina Software (UK) Limited. | 
 | 3 |  * | 
 | 4 |  * This file is released under the GPL. | 
 | 5 |  */ | 
 | 6 |  | 
 | 7 | #include <linux/device-mapper.h> | 
 | 8 |  | 
 | 9 | #include <linux/module.h> | 
 | 10 | #include <linux/init.h> | 
 | 11 | #include <linux/blkdev.h> | 
 | 12 | #include <linux/bio.h> | 
 | 13 | #include <linux/slab.h> | 
 | 14 | #include <linux/log2.h> | 
 | 15 |  | 
 | 16 | #define DM_MSG_PREFIX "striped" | 
 | 17 | #define DM_IO_ERROR_THRESHOLD 15 | 
 | 18 |  | 
 | 19 | struct stripe { | 
 | 20 | 	struct dm_dev *dev; | 
 | 21 | 	sector_t physical_start; | 
 | 22 |  | 
 | 23 | 	atomic_t error_count; | 
 | 24 | }; | 
 | 25 |  | 
 | 26 | struct stripe_c { | 
 | 27 | 	uint32_t stripes; | 
 | 28 | 	int stripes_shift; | 
 | 29 | 	sector_t stripes_mask; | 
 | 30 |  | 
 | 31 | 	/* The size of this target / num. stripes */ | 
 | 32 | 	sector_t stripe_width; | 
 | 33 |  | 
 | 34 | 	/* stripe chunk size */ | 
 | 35 | 	uint32_t chunk_shift; | 
 | 36 | 	sector_t chunk_mask; | 
 | 37 |  | 
 | 38 | 	/* Needed for handling events */ | 
 | 39 | 	struct dm_target *ti; | 
 | 40 |  | 
 | 41 | 	/* Work struct used for triggering events*/ | 
 | 42 | 	struct work_struct trigger_event; | 
 | 43 |  | 
 | 44 | 	struct stripe stripe[0]; | 
 | 45 | }; | 
 | 46 |  | 
 | 47 | /* | 
 | 48 |  * An event is triggered whenever a drive | 
 | 49 |  * drops out of a stripe volume. | 
 | 50 |  */ | 
 | 51 | static void trigger_event(struct work_struct *work) | 
 | 52 | { | 
 | 53 | 	struct stripe_c *sc = container_of(work, struct stripe_c, | 
 | 54 | 					   trigger_event); | 
 | 55 | 	dm_table_event(sc->ti->table); | 
 | 56 | } | 
 | 57 |  | 
 | 58 | static inline struct stripe_c *alloc_context(unsigned int stripes) | 
 | 59 | { | 
 | 60 | 	size_t len; | 
 | 61 |  | 
 | 62 | 	if (dm_array_too_big(sizeof(struct stripe_c), sizeof(struct stripe), | 
 | 63 | 			     stripes)) | 
 | 64 | 		return NULL; | 
 | 65 |  | 
 | 66 | 	len = sizeof(struct stripe_c) + (sizeof(struct stripe) * stripes); | 
 | 67 |  | 
 | 68 | 	return kmalloc(len, GFP_KERNEL); | 
 | 69 | } | 
 | 70 |  | 
 | 71 | /* | 
 | 72 |  * Parse a single <dev> <sector> pair | 
 | 73 |  */ | 
 | 74 | static int get_stripe(struct dm_target *ti, struct stripe_c *sc, | 
 | 75 | 		      unsigned int stripe, char **argv) | 
 | 76 | { | 
 | 77 | 	unsigned long long start; | 
 | 78 | 	char dummy; | 
 | 79 |  | 
 | 80 | 	if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1) | 
 | 81 | 		return -EINVAL; | 
 | 82 |  | 
 | 83 | 	if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), | 
 | 84 | 			  &sc->stripe[stripe].dev)) | 
 | 85 | 		return -ENXIO; | 
 | 86 |  | 
 | 87 | 	sc->stripe[stripe].physical_start = start; | 
 | 88 |  | 
 | 89 | 	return 0; | 
 | 90 | } | 
 | 91 |  | 
 | 92 | /* | 
 | 93 |  * Construct a striped mapping. | 
 | 94 |  * <number of stripes> <chunk size (2^^n)> [<dev_path> <offset>]+ | 
 | 95 |  */ | 
 | 96 | static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) | 
 | 97 | { | 
 | 98 | 	struct stripe_c *sc; | 
 | 99 | 	sector_t width; | 
 | 100 | 	uint32_t stripes; | 
 | 101 | 	uint32_t chunk_size; | 
 | 102 | 	char *end; | 
 | 103 | 	int r; | 
 | 104 | 	unsigned int i; | 
 | 105 |  | 
 | 106 | 	if (argc < 2) { | 
 | 107 | 		ti->error = "Not enough arguments"; | 
 | 108 | 		return -EINVAL; | 
 | 109 | 	} | 
 | 110 |  | 
 | 111 | 	stripes = simple_strtoul(argv[0], &end, 10); | 
 | 112 | 	if (!stripes || *end) { | 
 | 113 | 		ti->error = "Invalid stripe count"; | 
 | 114 | 		return -EINVAL; | 
 | 115 | 	} | 
 | 116 |  | 
 | 117 | 	chunk_size = simple_strtoul(argv[1], &end, 10); | 
 | 118 | 	if (*end) { | 
 | 119 | 		ti->error = "Invalid chunk_size"; | 
 | 120 | 		return -EINVAL; | 
 | 121 | 	} | 
 | 122 |  | 
 | 123 | 	/* | 
 | 124 | 	 * chunk_size is a power of two | 
 | 125 | 	 */ | 
 | 126 | 	if (!is_power_of_2(chunk_size) || | 
 | 127 | 	    (chunk_size < (PAGE_SIZE >> SECTOR_SHIFT))) { | 
 | 128 | 		ti->error = "Invalid chunk size"; | 
 | 129 | 		return -EINVAL; | 
 | 130 | 	} | 
 | 131 |  | 
 | 132 | 	if (ti->len & (chunk_size - 1)) { | 
 | 133 | 		ti->error = "Target length not divisible by " | 
 | 134 | 		    "chunk size"; | 
 | 135 | 		return -EINVAL; | 
 | 136 | 	} | 
 | 137 |  | 
 | 138 | 	width = ti->len; | 
 | 139 | 	if (sector_div(width, stripes)) { | 
 | 140 | 		ti->error = "Target length not divisible by " | 
 | 141 | 		    "number of stripes"; | 
 | 142 | 		return -EINVAL; | 
 | 143 | 	} | 
 | 144 |  | 
 | 145 | 	/* | 
 | 146 | 	 * Do we have enough arguments for that many stripes ? | 
 | 147 | 	 */ | 
 | 148 | 	if (argc != (2 + 2 * stripes)) { | 
 | 149 | 		ti->error = "Not enough destinations " | 
 | 150 | 			"specified"; | 
 | 151 | 		return -EINVAL; | 
 | 152 | 	} | 
 | 153 |  | 
 | 154 | 	sc = alloc_context(stripes); | 
 | 155 | 	if (!sc) { | 
 | 156 | 		ti->error = "Memory allocation for striped context " | 
 | 157 | 		    "failed"; | 
 | 158 | 		return -ENOMEM; | 
 | 159 | 	} | 
 | 160 |  | 
 | 161 | 	INIT_WORK(&sc->trigger_event, trigger_event); | 
 | 162 |  | 
 | 163 | 	/* Set pointer to dm target; used in trigger_event */ | 
 | 164 | 	sc->ti = ti; | 
 | 165 | 	sc->stripes = stripes; | 
 | 166 | 	sc->stripe_width = width; | 
 | 167 |  | 
 | 168 | 	if (stripes & (stripes - 1)) | 
 | 169 | 		sc->stripes_shift = -1; | 
 | 170 | 	else { | 
 | 171 | 		sc->stripes_shift = ffs(stripes) - 1; | 
 | 172 | 		sc->stripes_mask = ((sector_t) stripes) - 1; | 
 | 173 | 	} | 
 | 174 |  | 
 | 175 | 	ti->split_io = chunk_size; | 
 | 176 | 	ti->num_flush_requests = stripes; | 
 | 177 | 	ti->num_discard_requests = stripes; | 
 | 178 |  | 
 | 179 | 	sc->chunk_shift = ffs(chunk_size) - 1; | 
 | 180 | 	sc->chunk_mask = ((sector_t) chunk_size) - 1; | 
 | 181 |  | 
 | 182 | 	/* | 
 | 183 | 	 * Get the stripe destinations. | 
 | 184 | 	 */ | 
 | 185 | 	for (i = 0; i < stripes; i++) { | 
 | 186 | 		argv += 2; | 
 | 187 |  | 
 | 188 | 		r = get_stripe(ti, sc, i, argv); | 
 | 189 | 		if (r < 0) { | 
 | 190 | 			ti->error = "Couldn't parse stripe destination"; | 
 | 191 | 			while (i--) | 
 | 192 | 				dm_put_device(ti, sc->stripe[i].dev); | 
 | 193 | 			kfree(sc); | 
 | 194 | 			return r; | 
 | 195 | 		} | 
 | 196 | 		atomic_set(&(sc->stripe[i].error_count), 0); | 
 | 197 | 	} | 
 | 198 |  | 
 | 199 | 	ti->private = sc; | 
 | 200 |  | 
 | 201 | 	return 0; | 
 | 202 | } | 
 | 203 |  | 
 | 204 | static void stripe_dtr(struct dm_target *ti) | 
 | 205 | { | 
 | 206 | 	unsigned int i; | 
 | 207 | 	struct stripe_c *sc = (struct stripe_c *) ti->private; | 
 | 208 |  | 
 | 209 | 	for (i = 0; i < sc->stripes; i++) | 
 | 210 | 		dm_put_device(ti, sc->stripe[i].dev); | 
 | 211 |  | 
 | 212 | 	flush_work_sync(&sc->trigger_event); | 
 | 213 | 	kfree(sc); | 
 | 214 | } | 
 | 215 |  | 
 | 216 | static void stripe_map_sector(struct stripe_c *sc, sector_t sector, | 
 | 217 | 			      uint32_t *stripe, sector_t *result) | 
 | 218 | { | 
 | 219 | 	sector_t offset = dm_target_offset(sc->ti, sector); | 
 | 220 | 	sector_t chunk = offset >> sc->chunk_shift; | 
 | 221 |  | 
 | 222 | 	if (sc->stripes_shift < 0) | 
 | 223 | 		*stripe = sector_div(chunk, sc->stripes); | 
 | 224 | 	else { | 
 | 225 | 		*stripe = chunk & sc->stripes_mask; | 
 | 226 | 		chunk >>= sc->stripes_shift; | 
 | 227 | 	} | 
 | 228 |  | 
 | 229 | 	*result = (chunk << sc->chunk_shift) | (offset & sc->chunk_mask); | 
 | 230 | } | 
 | 231 |  | 
 | 232 | static void stripe_map_range_sector(struct stripe_c *sc, sector_t sector, | 
 | 233 | 				    uint32_t target_stripe, sector_t *result) | 
 | 234 | { | 
 | 235 | 	uint32_t stripe; | 
 | 236 |  | 
 | 237 | 	stripe_map_sector(sc, sector, &stripe, result); | 
 | 238 | 	if (stripe == target_stripe) | 
 | 239 | 		return; | 
 | 240 | 	*result &= ~sc->chunk_mask;			/* round down */ | 
 | 241 | 	if (target_stripe < stripe) | 
 | 242 | 		*result += sc->chunk_mask + 1;		/* next chunk */ | 
 | 243 | } | 
 | 244 |  | 
 | 245 | static int stripe_map_discard(struct stripe_c *sc, struct bio *bio, | 
 | 246 | 			      uint32_t target_stripe) | 
 | 247 | { | 
 | 248 | 	sector_t begin, end; | 
 | 249 |  | 
 | 250 | 	stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin); | 
 | 251 | 	stripe_map_range_sector(sc, bio->bi_sector + bio_sectors(bio), | 
 | 252 | 				target_stripe, &end); | 
 | 253 | 	if (begin < end) { | 
 | 254 | 		bio->bi_bdev = sc->stripe[target_stripe].dev->bdev; | 
 | 255 | 		bio->bi_sector = begin + sc->stripe[target_stripe].physical_start; | 
 | 256 | 		bio->bi_size = to_bytes(end - begin); | 
 | 257 | 		return DM_MAPIO_REMAPPED; | 
 | 258 | 	} else { | 
 | 259 | 		/* The range doesn't map to the target stripe */ | 
 | 260 | 		bio_endio(bio, 0); | 
 | 261 | 		return DM_MAPIO_SUBMITTED; | 
 | 262 | 	} | 
 | 263 | } | 
 | 264 |  | 
 | 265 | static int stripe_map(struct dm_target *ti, struct bio *bio, | 
 | 266 | 		      union map_info *map_context) | 
 | 267 | { | 
 | 268 | 	struct stripe_c *sc = ti->private; | 
 | 269 | 	uint32_t stripe; | 
 | 270 | 	unsigned target_request_nr; | 
 | 271 |  | 
 | 272 | 	if (bio->bi_rw & REQ_FLUSH) { | 
 | 273 | 		target_request_nr = map_context->target_request_nr; | 
 | 274 | 		BUG_ON(target_request_nr >= sc->stripes); | 
 | 275 | 		bio->bi_bdev = sc->stripe[target_request_nr].dev->bdev; | 
 | 276 | 		return DM_MAPIO_REMAPPED; | 
 | 277 | 	} | 
 | 278 | 	if (unlikely(bio->bi_rw & REQ_DISCARD)) { | 
 | 279 | 		target_request_nr = map_context->target_request_nr; | 
 | 280 | 		BUG_ON(target_request_nr >= sc->stripes); | 
 | 281 | 		return stripe_map_discard(sc, bio, target_request_nr); | 
 | 282 | 	} | 
 | 283 |  | 
 | 284 | 	stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector); | 
 | 285 |  | 
 | 286 | 	bio->bi_sector += sc->stripe[stripe].physical_start; | 
 | 287 | 	bio->bi_bdev = sc->stripe[stripe].dev->bdev; | 
 | 288 |  | 
 | 289 | 	return DM_MAPIO_REMAPPED; | 
 | 290 | } | 
 | 291 |  | 
 | 292 | /* | 
 | 293 |  * Stripe status: | 
 | 294 |  * | 
 | 295 |  * INFO | 
 | 296 |  * #stripes [stripe_name <stripe_name>] [group word count] | 
 | 297 |  * [error count 'A|D' <error count 'A|D'>] | 
 | 298 |  * | 
 | 299 |  * TABLE | 
 | 300 |  * #stripes [stripe chunk size] | 
 | 301 |  * [stripe_name physical_start <stripe_name physical_start>] | 
 | 302 |  * | 
 | 303 |  */ | 
 | 304 |  | 
 | 305 | static void stripe_status(struct dm_target *ti, | 
 | 306 | 			  status_type_t type, char *result, unsigned int maxlen) | 
 | 307 | { | 
 | 308 | 	struct stripe_c *sc = (struct stripe_c *) ti->private; | 
 | 309 | 	char buffer[sc->stripes + 1]; | 
 | 310 | 	unsigned int sz = 0; | 
 | 311 | 	unsigned int i; | 
 | 312 |  | 
 | 313 | 	switch (type) { | 
 | 314 | 	case STATUSTYPE_INFO: | 
 | 315 | 		DMEMIT("%d ", sc->stripes); | 
 | 316 | 		for (i = 0; i < sc->stripes; i++)  { | 
 | 317 | 			DMEMIT("%s ", sc->stripe[i].dev->name); | 
 | 318 | 			buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ? | 
 | 319 | 				'D' : 'A'; | 
 | 320 | 		} | 
 | 321 | 		buffer[i] = '\0'; | 
 | 322 | 		DMEMIT("1 %s", buffer); | 
 | 323 | 		break; | 
 | 324 |  | 
 | 325 | 	case STATUSTYPE_TABLE: | 
 | 326 | 		DMEMIT("%d %llu", sc->stripes, | 
 | 327 | 			(unsigned long long)sc->chunk_mask + 1); | 
 | 328 | 		for (i = 0; i < sc->stripes; i++) | 
 | 329 | 			DMEMIT(" %s %llu", sc->stripe[i].dev->name, | 
 | 330 | 			    (unsigned long long)sc->stripe[i].physical_start); | 
 | 331 | 		break; | 
 | 332 | 	} | 
 | 333 | } | 
 | 334 |  | 
 | 335 | static int stripe_end_io(struct dm_target *ti, struct bio *bio, | 
 | 336 | 			 int error, union map_info *map_context) | 
 | 337 | { | 
 | 338 | 	unsigned i; | 
 | 339 | 	char major_minor[16]; | 
 | 340 | 	struct stripe_c *sc = ti->private; | 
 | 341 |  | 
 | 342 | 	if (!error) | 
 | 343 | 		return 0; /* I/O complete */ | 
 | 344 |  | 
 | 345 | 	if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD)) | 
 | 346 | 		return error; | 
 | 347 |  | 
 | 348 | 	if (error == -EOPNOTSUPP) | 
 | 349 | 		return error; | 
 | 350 |  | 
 | 351 | 	memset(major_minor, 0, sizeof(major_minor)); | 
 | 352 | 	sprintf(major_minor, "%d:%d", | 
 | 353 | 		MAJOR(disk_devt(bio->bi_bdev->bd_disk)), | 
 | 354 | 		MINOR(disk_devt(bio->bi_bdev->bd_disk))); | 
 | 355 |  | 
 | 356 | 	/* | 
 | 357 | 	 * Test to see which stripe drive triggered the event | 
 | 358 | 	 * and increment error count for all stripes on that device. | 
 | 359 | 	 * If the error count for a given device exceeds the threshold | 
 | 360 | 	 * value we will no longer trigger any further events. | 
 | 361 | 	 */ | 
 | 362 | 	for (i = 0; i < sc->stripes; i++) | 
 | 363 | 		if (!strcmp(sc->stripe[i].dev->name, major_minor)) { | 
 | 364 | 			atomic_inc(&(sc->stripe[i].error_count)); | 
 | 365 | 			if (atomic_read(&(sc->stripe[i].error_count)) < | 
 | 366 | 			    DM_IO_ERROR_THRESHOLD) | 
 | 367 | 				schedule_work(&sc->trigger_event); | 
 | 368 | 		} | 
 | 369 |  | 
 | 370 | 	return error; | 
 | 371 | } | 
 | 372 |  | 
 | 373 | static int stripe_iterate_devices(struct dm_target *ti, | 
 | 374 | 				  iterate_devices_callout_fn fn, void *data) | 
 | 375 | { | 
 | 376 | 	struct stripe_c *sc = ti->private; | 
 | 377 | 	int ret = 0; | 
 | 378 | 	unsigned i = 0; | 
 | 379 |  | 
 | 380 | 	do { | 
 | 381 | 		ret = fn(ti, sc->stripe[i].dev, | 
 | 382 | 			 sc->stripe[i].physical_start, | 
 | 383 | 			 sc->stripe_width, data); | 
 | 384 | 	} while (!ret && ++i < sc->stripes); | 
 | 385 |  | 
 | 386 | 	return ret; | 
 | 387 | } | 
 | 388 |  | 
 | 389 | static void stripe_io_hints(struct dm_target *ti, | 
 | 390 | 			    struct queue_limits *limits) | 
 | 391 | { | 
 | 392 | 	struct stripe_c *sc = ti->private; | 
 | 393 | 	unsigned chunk_size = (sc->chunk_mask + 1) << 9; | 
 | 394 |  | 
 | 395 | 	blk_limits_io_min(limits, chunk_size); | 
 | 396 | 	blk_limits_io_opt(limits, chunk_size * sc->stripes); | 
 | 397 | } | 
 | 398 |  | 
 | 399 | static int stripe_merge(struct dm_target *ti, struct bvec_merge_data *bvm, | 
 | 400 | 			struct bio_vec *biovec, int max_size) | 
 | 401 | { | 
 | 402 | 	struct stripe_c *sc = ti->private; | 
 | 403 | 	sector_t bvm_sector = bvm->bi_sector; | 
 | 404 | 	uint32_t stripe; | 
 | 405 | 	struct request_queue *q; | 
 | 406 |  | 
 | 407 | 	stripe_map_sector(sc, bvm_sector, &stripe, &bvm_sector); | 
 | 408 |  | 
 | 409 | 	q = bdev_get_queue(sc->stripe[stripe].dev->bdev); | 
 | 410 | 	if (!q->merge_bvec_fn) | 
 | 411 | 		return max_size; | 
 | 412 |  | 
 | 413 | 	bvm->bi_bdev = sc->stripe[stripe].dev->bdev; | 
 | 414 | 	bvm->bi_sector = sc->stripe[stripe].physical_start + bvm_sector; | 
 | 415 |  | 
 | 416 | 	return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); | 
 | 417 | } | 
 | 418 |  | 
 | 419 | static struct target_type stripe_target = { | 
 | 420 | 	.name   = "striped", | 
 | 421 | 	.version = {1, 4, 0}, | 
 | 422 | 	.module = THIS_MODULE, | 
 | 423 | 	.ctr    = stripe_ctr, | 
 | 424 | 	.dtr    = stripe_dtr, | 
 | 425 | 	.map    = stripe_map, | 
 | 426 | 	.end_io = stripe_end_io, | 
 | 427 | 	.status = stripe_status, | 
 | 428 | 	.iterate_devices = stripe_iterate_devices, | 
 | 429 | 	.io_hints = stripe_io_hints, | 
 | 430 | 	.merge  = stripe_merge, | 
 | 431 | }; | 
 | 432 |  | 
 | 433 | int __init dm_stripe_init(void) | 
 | 434 | { | 
 | 435 | 	int r; | 
 | 436 |  | 
 | 437 | 	r = dm_register_target(&stripe_target); | 
 | 438 | 	if (r < 0) { | 
 | 439 | 		DMWARN("target registration failed"); | 
 | 440 | 		return r; | 
 | 441 | 	} | 
 | 442 |  | 
 | 443 | 	return r; | 
 | 444 | } | 
 | 445 |  | 
 | 446 | void dm_stripe_exit(void) | 
 | 447 | { | 
 | 448 | 	dm_unregister_target(&stripe_target); | 
 | 449 | } |