rjw | 1f88458 | 2022-01-06 17:20:42 +0800 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (c) 2013, Google, Inc. All rights reserved |
| 3 | * Copyright (c) 2014, Travis Geiselbrecht |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining |
| 6 | * a copy of this software and associated documentation files |
| 7 | * (the "Software"), to deal in the Software without restriction, |
| 8 | * including without limitation the rights to use, copy, modify, merge, |
| 9 | * publish, distribute, sublicense, and/or sell copies of the Software, |
| 10 | * and to permit persons to whom the Software is furnished to do so, |
| 11 | * subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be |
| 14 | * included in all copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 17 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 18 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. |
| 19 | * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY |
| 20 | * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
| 21 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
| 22 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 23 | */ |
| 24 | #include <lib/ptable.h> |
| 25 | #include <debug.h> |
| 26 | #include <trace.h> |
| 27 | #include <assert.h> |
| 28 | #include <err.h> |
| 29 | #include <string.h> |
| 30 | #include <malloc.h> |
| 31 | #include <stdlib.h> |
| 32 | #include <list.h> |
| 33 | #include <lib/bio.h> |
| 34 | #include <lib/cksum.h> |
| 35 | #include <lk/init.h> |
| 36 | |
| 37 | #define LOCAL_TRACE 0 |
| 38 | |
| 39 | #define PTABLE_MAGIC '1BTP' |
| 40 | #define PTABLE_MIN_ENTRIES 16 |
| 41 | #define PTABLE_PART_NAME "ptable" |
| 42 | |
| 43 | struct ptable_header { |
| 44 | uint32_t magic; |
| 45 | uint32_t crc32; /* (total ptable according to total_length, 0 where crc field is) */ |
| 46 | uint32_t generation; /* incremented by one every time its saved */ |
| 47 | uint32_t total_length; /* valid length of table, only covers entries that are used */ |
| 48 | }; |
| 49 | |
| 50 | struct ptable_mem_entry { |
| 51 | struct list_node node; |
| 52 | struct ptable_entry entry; |
| 53 | }; |
| 54 | |
| 55 | static struct ptable_state { |
| 56 | bdev_t *bdev; |
| 57 | uint32_t gen; |
| 58 | struct list_node list; |
| 59 | } ptable; |
| 60 | |
| 61 | #define PTABLE_HEADER_NUM_ENTRIES(header) (((header).total_length - sizeof(struct ptable_header)) / sizeof(struct ptable_entry)) |
| 62 | #define BAIL(__err) do { err = __err; goto bailout; } while (0) |
| 63 | |
| 64 | static inline size_t ptable_length(size_t entry_cnt) |
| 65 | { |
| 66 | return sizeof(struct ptable_header) + (sizeof(struct ptable_entry) * entry_cnt); |
| 67 | } |
| 68 | |
| 69 | static status_t validate_entry(const struct ptable_entry *entry) |
| 70 | { |
| 71 | if (entry->offset > entry->offset + entry->length) |
| 72 | return ERR_GENERIC; |
| 73 | if (entry->offset + entry->length > (uint64_t)ptable.bdev->total_size) |
| 74 | return ERR_GENERIC; |
| 75 | |
| 76 | uint i; |
| 77 | for (i = 0; i < sizeof(entry->name); i++) |
| 78 | if (entry->name[i] == 0) |
| 79 | break; |
| 80 | |
| 81 | if (!i || (i >= sizeof(entry->name))) |
| 82 | return ERR_GENERIC; |
| 83 | |
| 84 | return NO_ERROR; |
| 85 | } |
| 86 | |
| 87 | static status_t ptable_write(void) |
| 88 | { |
| 89 | uint8_t* buf = NULL; |
| 90 | bdev_t* bdev = NULL; |
| 91 | ssize_t err = ERR_GENERIC; |
| 92 | |
| 93 | if (!ptable_found_valid()) |
| 94 | return ERR_NOT_MOUNTED; |
| 95 | |
| 96 | bdev = bio_open(PTABLE_PART_NAME); |
| 97 | if (!bdev) |
| 98 | return ERR_BAD_STATE; |
| 99 | |
| 100 | /* count the number of entries in the list and calculate the total size */ |
| 101 | size_t count = 0; |
| 102 | struct list_node *node; |
| 103 | list_for_every(&ptable.list, node) { |
| 104 | count++; |
| 105 | } |
| 106 | LTRACEF("%u entries\n", count); |
| 107 | size_t total_length = sizeof(struct ptable_header) + sizeof(struct ptable_entry) * count; |
| 108 | |
| 109 | /* can we fit our partition table in our ptable subdevice? */ |
| 110 | if (total_length > bdev->total_size) |
| 111 | BAIL(ERR_TOO_BIG); |
| 112 | |
| 113 | /* allocate a buffer to hold it */ |
| 114 | buf = malloc(total_length); |
| 115 | if (!buf) |
| 116 | BAIL(ERR_NO_MEMORY); |
| 117 | |
| 118 | /* fill in a default header */ |
| 119 | struct ptable_header *header = (struct ptable_header *)buf; |
| 120 | header->magic = PTABLE_MAGIC; |
| 121 | header->crc32 = 0; |
| 122 | header->generation = ptable.gen++; |
| 123 | header->total_length = total_length; |
| 124 | |
| 125 | /* start the crc calculation */ |
| 126 | header->crc32 = crc32(0, (void *)header, sizeof(*header)); |
| 127 | |
| 128 | /* start by writing the entries */ |
| 129 | size_t off = sizeof(struct ptable_header); |
| 130 | struct ptable_mem_entry *mentry; |
| 131 | list_for_every_entry(&ptable.list, mentry, struct ptable_mem_entry, node) { |
| 132 | const struct ptable_entry *entry = &mentry->entry; |
| 133 | |
| 134 | memcpy(buf + off, entry, sizeof(struct ptable_entry)); |
| 135 | |
| 136 | /* update the header */ |
| 137 | header->crc32 = crc32(header->crc32, (void *)entry, sizeof(struct ptable_entry)); |
| 138 | |
| 139 | off += sizeof(struct ptable_entry); |
| 140 | } |
| 141 | |
| 142 | /* write it to the block device. If the device has an erase geometry, start |
| 143 | * by erasing the partition. |
| 144 | */ |
| 145 | if (bdev->geometry_count && bdev->geometry) { |
| 146 | /* This is a subdevice, it should have a homogeneous erase geometry */ |
| 147 | DEBUG_ASSERT(1 == bdev->geometry_count); |
| 148 | |
| 149 | ssize_t err = bio_erase(bdev, 0, bdev->total_size); |
| 150 | if (err != (ssize_t)bdev->total_size) { |
| 151 | LTRACEF("error %d erasing device\n", (int)err); |
| 152 | BAIL(ERR_IO); |
| 153 | } |
| 154 | } |
| 155 | |
| 156 | err = bio_write(bdev, buf, 0, total_length); |
| 157 | if (err < (ssize_t)total_length) { |
| 158 | LTRACEF("error %d writing data to device\n", (int)err); |
| 159 | BAIL(ERR_IO); |
| 160 | } |
| 161 | |
| 162 | LTRACEF("wrote ptable:\n"); |
| 163 | if (LOCAL_TRACE) |
| 164 | hexdump(buf, total_length); |
| 165 | |
| 166 | err = NO_ERROR; |
| 167 | |
| 168 | bailout: |
| 169 | if (bdev) |
| 170 | bio_close(bdev); |
| 171 | |
| 172 | free(buf); |
| 173 | |
| 174 | return err; |
| 175 | } |
| 176 | |
| 177 | static void ptable_init(uint level) |
| 178 | { |
| 179 | memset(&ptable, 0, sizeof(ptable)); |
| 180 | list_initialize(&ptable.list); |
| 181 | } |
| 182 | |
| 183 | LK_INIT_HOOK(ptable, &ptable_init, LK_INIT_LEVEL_THREADING); |
| 184 | |
| 185 | static void ptable_unpublish(struct ptable_mem_entry* mentry) |
| 186 | { |
| 187 | if (mentry) { |
| 188 | bdev_t* bdev; |
| 189 | |
| 190 | bdev = bio_open((char*)mentry->entry.name); |
| 191 | if (bdev) { |
| 192 | bio_unregister_device(bdev); |
| 193 | bio_close(bdev); |
| 194 | } |
| 195 | |
| 196 | if (list_in_list(&mentry->node)) |
| 197 | list_delete(&mentry->node); |
| 198 | |
| 199 | free(mentry); |
| 200 | } |
| 201 | } |
| 202 | |
| 203 | static void ptable_reset(void) |
| 204 | { |
| 205 | /* walk through the partition list, clearing any entries */ |
| 206 | struct ptable_mem_entry *mentry; |
| 207 | struct ptable_mem_entry *temp; |
| 208 | list_for_every_entry_safe(&ptable.list, mentry, temp, struct ptable_mem_entry, node) { |
| 209 | ptable_unpublish(mentry); |
| 210 | } |
| 211 | |
| 212 | /* release our reference to our primary device */ |
| 213 | if (NULL != ptable.bdev) |
| 214 | bio_close(ptable.bdev); |
| 215 | |
| 216 | /* Reset initialize our bookkeeping */ |
| 217 | ptable_init(LK_INIT_LEVEL_THREADING); |
| 218 | } |
| 219 | |
| 220 | static void ptable_push_entry (struct ptable_mem_entry *mentry) |
| 221 | { |
| 222 | DEBUG_ASSERT (mentry); |
| 223 | |
| 224 | // iterator for the list |
| 225 | struct ptable_mem_entry *it_mentry; |
| 226 | |
| 227 | // The ptable list must be ordered by offset, so let's find the correct |
| 228 | // spot for this entry |
| 229 | list_for_every_entry(&ptable.list, it_mentry, struct ptable_mem_entry, node) { |
| 230 | if (it_mentry->entry.offset > mentry->entry.offset) { |
| 231 | // push the entry and we are done ! |
| 232 | list_add_before(&it_mentry->node, &mentry->node); |
| 233 | // All done |
| 234 | return; |
| 235 | } |
| 236 | } |
| 237 | |
| 238 | // if we exist the loop, that means that the |
| 239 | // entry has not been added, let add it at the tail |
| 240 | list_add_tail(&ptable.list, &mentry->node); |
| 241 | } |
| 242 | |
| 243 | static status_t ptable_publish(const struct ptable_entry* entry) { |
| 244 | status_t err; |
| 245 | struct ptable_mem_entry *mentry = NULL; |
| 246 | |
| 247 | DEBUG_ASSERT(entry && ptable.bdev); |
| 248 | size_t block_mask = ((size_t)0x01 << ptable.bdev->block_shift) - 1; |
| 249 | |
| 250 | err = validate_entry(entry); |
| 251 | if (err < 0) { |
| 252 | LTRACEF("entry failed valid check\n"); |
| 253 | BAIL(ERR_NOT_FOUND); |
| 254 | } |
| 255 | |
| 256 | // Make sure the partition does not already exist. |
| 257 | const char* part_name = (const char*)entry->name; |
| 258 | err = ptable_find(part_name, 0); |
| 259 | if (err >= 0) { |
| 260 | LTRACEF("entry \"%s\" already exists\n", part_name); |
| 261 | BAIL(ERR_ALREADY_EXISTS); |
| 262 | } |
| 263 | |
| 264 | // make sure that the partition is aligned properly |
| 265 | if ((entry->offset & block_mask) || (entry->length & block_mask)) { |
| 266 | LTRACEF("Entry in parition (\"%s\") is misaligned " |
| 267 | "(off 0x%llx len 0x%llx blockmask 0x%zx\n", |
| 268 | part_name, entry->offset, entry->length, block_mask); |
| 269 | BAIL(ERR_BAD_STATE); |
| 270 | } |
| 271 | |
| 272 | // make sure that length is non-zero and does not wrap |
| 273 | if ((entry->offset + entry->length) <= entry->offset) { |
| 274 | LTRACEF("Bad offset/length 0x%llx/0x%llx\n", entry->offset, entry->length); |
| 275 | BAIL(ERR_INVALID_ARGS); |
| 276 | } |
| 277 | |
| 278 | // make sure entry can fit in the device |
| 279 | if ((entry->offset + entry->length) > (uint64_t)ptable.bdev->total_size) { |
| 280 | LTRACEF("outside of device\n"); |
| 281 | BAIL(ERR_INVALID_ARGS); |
| 282 | } |
| 283 | |
| 284 | /* create an in-memory copy and attempt to publish a subdevice for the |
| 285 | * partition |
| 286 | */ |
| 287 | mentry = calloc(1, sizeof(struct ptable_mem_entry)); |
| 288 | if (!mentry) { |
| 289 | LTRACEF("Out of memory\n"); |
| 290 | BAIL(ERR_NO_MEMORY); |
| 291 | } |
| 292 | |
| 293 | memcpy(&mentry->entry, entry, sizeof(struct ptable_entry)); |
| 294 | err = bio_publish_subdevice(ptable.bdev->name, part_name, |
| 295 | entry->offset >> ptable.bdev->block_shift, |
| 296 | entry->length >> ptable.bdev->block_shift); |
| 297 | if (err < 0) { |
| 298 | LTRACEF("Failed to publish subdevice for \"%s\"\n", part_name); |
| 299 | goto bailout; |
| 300 | } |
| 301 | |
| 302 | err = NO_ERROR; |
| 303 | |
| 304 | bailout: |
| 305 | /* If we failed to publish, clean up whatever we may have allocated. |
| 306 | * Otherwise, put our new entry on the in-memory list. |
| 307 | */ |
| 308 | if (err < 0) { |
| 309 | ptable_unpublish(mentry); |
| 310 | } else { |
| 311 | ptable_push_entry (mentry); |
| 312 | } |
| 313 | |
| 314 | return err; |
| 315 | } |
| 316 | |
| 317 | static off_t ptable_adjust_request_for_erase_geometry(uint64_t region_start, |
| 318 | uint64_t region_len, |
| 319 | uint64_t* plength, |
| 320 | bool alloc_end) |
| 321 | { |
| 322 | DEBUG_ASSERT(plength && ptable.bdev); |
| 323 | |
| 324 | LTRACEF("[0x%llx, 0x%llx) len 0x%llx%s\n", |
| 325 | region_start, region_start + region_len, *plength, alloc_end ? " (alloc end)" : ""); |
| 326 | |
| 327 | uint64_t block_mask = ((uint64_t)0x1 << ptable.bdev->block_shift) - 1; |
| 328 | DEBUG_ASSERT(!(*plength & block_mask)); |
| 329 | DEBUG_ASSERT(!(region_start & block_mask)); |
| 330 | DEBUG_ASSERT(!(region_len & block_mask)); |
| 331 | |
| 332 | uint64_t region_end = region_start + region_len; |
| 333 | DEBUG_ASSERT(region_end >= region_start); |
| 334 | |
| 335 | // Can we fit in the region at all? |
| 336 | if (*plength > region_len) { |
| 337 | LTRACEF("Request too large for region (0x%llx > 0x%llx)\n", *plength, region_len); |
| 338 | return ERR_TOO_BIG; |
| 339 | } |
| 340 | |
| 341 | // If our block device does not have an erase geometry to obey, then great! |
| 342 | // No special modifications to the request are needed. Just determine the |
| 343 | // offset based on if we are allocating from the start or the end. |
| 344 | if (!ptable.bdev->geometry_count || !ptable.bdev->geometry) { |
| 345 | off_t ret = alloc_end ? (region_start + region_len - *plength) : region_start; |
| 346 | LTRACEF("No geometry; allocating at [0x%llx, 0x%llx)\n", ret, ret + *plength); |
| 347 | return ret; |
| 348 | } |
| 349 | |
| 350 | // Intersect each of the erase regions with the region being proposed and |
| 351 | // see if we can fit the allocation request in the intersection, after |
| 352 | // adjusting the intersection and requested length to multiples of and |
| 353 | // alligned to the erase block size. Test the geometries back-to-front |
| 354 | // instead of front-to-back if alloc_end has been reqeusted. |
| 355 | for (size_t i = 0; i < ptable.bdev->geometry_count; ++i) { |
| 356 | size_t geo_index = alloc_end ? (ptable.bdev->geometry_count - i - 1) : i; |
| 357 | const bio_erase_geometry_info_t* geo = ptable.bdev->geometry + geo_index; |
| 358 | uint64_t erase_mask = ((uint64_t)0x1 << geo->erase_shift) - 1; |
| 359 | |
| 360 | LTRACEF("Considering erase region [0x%llx, 0x%llx) (erase size 0x%zx)\n", |
| 361 | geo->start, geo->start + geo->size, geo->erase_size); |
| 362 | |
| 363 | // If the erase region and the allocation region do not intersect at |
| 364 | // all, just move on to the next region. |
| 365 | if (!bio_does_overlap(region_start, region_len, geo->start, geo->size)) { |
| 366 | LTRACEF("No overlap...\n"); |
| 367 | continue; |
| 368 | } |
| 369 | |
| 370 | // Compute the intersection of the request region with the erase region. |
| 371 | uint64_t erase_end = geo->start + geo->size; |
| 372 | uint64_t rstart = MAX(region_start, (uint64_t)geo->start); |
| 373 | uint64_t rend = MIN(region_end, erase_end); |
| 374 | |
| 375 | // Align to erase unit boundaries. Move the start of the intersected |
| 376 | // region up and the end of the intersected region down. |
| 377 | rstart = (rstart + erase_mask) & ~erase_mask; |
| 378 | rend = rend & ~erase_mask; |
| 379 | |
| 380 | // Round the requested length up to a multiple of the erase unit. |
| 381 | uint64_t length = (*plength + erase_mask) & ~erase_mask; |
| 382 | |
| 383 | LTRACEF("Trimmed and aligned request [0x%llx, 0x%llx) len 0x%llx%s\n", |
| 384 | rstart, rend, length, alloc_end ? " (alloc end)" : ""); |
| 385 | |
| 386 | // Is there enough space in the aligned intersection to hold the |
| 387 | // request? |
| 388 | uint64_t tmp = rstart + length; |
| 389 | if ((tmp < rstart) || (rend < tmp)) { |
| 390 | LTRACEF("Not enough space\n"); |
| 391 | continue; |
| 392 | } |
| 393 | |
| 394 | // Yay! We found space for this allocation! Adjust the requested |
| 395 | // length and return the approprate offset based on whether we want to |
| 396 | // allocate from the start or the end. |
| 397 | off_t ret; |
| 398 | *plength = length; |
| 399 | ret = alloc_end ? (rend - length) : rstart; |
| 400 | LTRACEF("Allocating at [0x%llx, 0x%llx) (erase_size 0x%zx)\n", |
| 401 | ret, ret + *plength, geo->erase_size); |
| 402 | return ret; |
| 403 | } |
| 404 | |
| 405 | // Looks like we didn't find a place to put this allocation. |
| 406 | LTRACEF("No location found!\n"); |
| 407 | return ERR_INVALID_ARGS; |
| 408 | } |
| 409 | |
| 410 | static off_t ptable_allocate(uint64_t* plength, uint flags) |
| 411 | { |
| 412 | DEBUG_ASSERT(plength); |
| 413 | |
| 414 | if (!ptable.bdev) |
| 415 | return ERR_BAD_STATE; |
| 416 | |
| 417 | LTRACEF("length 0x%llx, flags 0x%x\n", *plength, flags); |
| 418 | |
| 419 | uint64_t block_mask = ((uint64_t)0x1 << ptable.bdev->block_shift) - 1; |
| 420 | uint64_t length = (*plength + block_mask) & ~block_mask; |
| 421 | off_t offset = ERR_NOT_FOUND; |
| 422 | bool alloc_end = 0 != (flags & FLASH_PTABLE_ALLOC_END); |
| 423 | |
| 424 | if (list_is_empty(&ptable.list)) { |
| 425 | /* If the ptable is empty, then we have the entire device to use for |
| 426 | * allocation. Apply the erase geometry and return the result. |
| 427 | */ |
| 428 | offset = ptable_adjust_request_for_erase_geometry(0, |
| 429 | ptable.bdev->total_size, |
| 430 | &length, |
| 431 | alloc_end); |
| 432 | goto done; |
| 433 | } |
| 434 | |
| 435 | const struct ptable_entry *lastentry = NULL; |
| 436 | struct ptable_mem_entry *mentry; |
| 437 | uint64_t region_start; |
| 438 | uint64_t region_len; |
| 439 | uint64_t test_len; |
| 440 | off_t test_offset; |
| 441 | |
| 442 | list_for_every_entry(&ptable.list, mentry, struct ptable_mem_entry, node) { |
| 443 | const struct ptable_entry *entry = &mentry->entry; |
| 444 | |
| 445 | // Figure out the region we are testing, then adjust the request |
| 446 | // based on the device erase geometry. |
| 447 | region_start = lastentry ? (lastentry->offset + lastentry->length): 0; |
| 448 | region_len = entry->offset - region_start; |
| 449 | DEBUG_ASSERT((int64_t)region_len >= 0); |
| 450 | |
| 451 | LTRACEF("Considering region [0x%llx, 0x%llx) between \"%s\" and \"%s\"\n", |
| 452 | region_start, |
| 453 | region_start + region_len, |
| 454 | lastentry ? (char*)lastentry->name : "<device start>", |
| 455 | entry->name); |
| 456 | lastentry = entry; |
| 457 | |
| 458 | // Don't bother with the region if it is of zero length |
| 459 | if (!region_len) |
| 460 | continue; |
| 461 | |
| 462 | test_len = length; |
| 463 | test_offset = ptable_adjust_request_for_erase_geometry(region_start, |
| 464 | region_len, |
| 465 | &test_len, |
| 466 | alloc_end); |
| 467 | |
| 468 | // If this region was no good, move onto the next one. |
| 469 | if (test_offset < 0) |
| 470 | continue; |
| 471 | |
| 472 | // We found a possible answer, go ahead and record it. If we are |
| 473 | // allocating from the front, then we are finished. If we are |
| 474 | // attempting to allocate from the back, keep looking to see if |
| 475 | // there are other (better) answers. |
| 476 | offset = test_offset; |
| 477 | length = test_len; |
| 478 | if (!alloc_end) |
| 479 | goto done; |
| 480 | } |
| 481 | |
| 482 | /* still looking... the final region to test goes from the end of the previous |
| 483 | * region to the end of the device. |
| 484 | */ |
| 485 | DEBUG_ASSERT(lastentry); /* should always have a valid tail */ |
| 486 | |
| 487 | region_start = lastentry->offset + lastentry->length; |
| 488 | region_len = ptable.bdev->total_size - region_start; |
| 489 | DEBUG_ASSERT((int64_t)region_len >= 0); |
| 490 | |
| 491 | if (region_len) { |
| 492 | LTRACEF("Considering region [0x%llx, 0x%llx) between \"%s\" and \"%s\"\n", |
| 493 | region_start, |
| 494 | region_start + region_len, |
| 495 | lastentry->name, |
| 496 | "<device end>"); |
| 497 | test_len = length; |
| 498 | test_offset = ptable_adjust_request_for_erase_geometry(region_start, |
| 499 | region_len, |
| 500 | &test_len, |
| 501 | alloc_end); |
| 502 | if (test_offset >= 0) { |
| 503 | offset = test_offset; |
| 504 | length = test_len; |
| 505 | } |
| 506 | } |
| 507 | |
| 508 | done: |
| 509 | if (offset < 0) { |
| 510 | LTRACEF("Failed to find a suitable region of at least length %llu (err %lld)\n", |
| 511 | *plength, offset); |
| 512 | } else { |
| 513 | LTRACEF("Found region for %lld byte request @[%lld, %lld)\n", |
| 514 | *plength, offset, offset + length); |
| 515 | *plength = length; |
| 516 | } |
| 517 | |
| 518 | return offset; |
| 519 | } |
| 520 | |
| 521 | static status_t ptable_allocate_at(off_t _offset, uint64_t* plength) |
| 522 | { |
| 523 | if (!ptable.bdev) |
| 524 | return ERR_BAD_STATE; |
| 525 | |
| 526 | if ((_offset < 0) || !plength) |
| 527 | return ERR_INVALID_ARGS; |
| 528 | |
| 529 | /* to make life easier, get our offset into unsigned */ |
| 530 | uint64_t offset = (uint64_t)_offset; |
| 531 | |
| 532 | /* Make certain the request was aligned to a program block boundary, and |
| 533 | * adjust the length to be a multiple of program blocks in size. |
| 534 | */ |
| 535 | uint64_t block_mask = ((uint64_t)0x1 << ptable.bdev->block_shift) - 1; |
| 536 | if (offset & block_mask) |
| 537 | return ERR_INVALID_ARGS; |
| 538 | |
| 539 | *plength = (*plength + block_mask) & ~block_mask; |
| 540 | |
| 541 | /* Make sure the request is contained within the extent of the device |
| 542 | * itself. |
| 543 | */ |
| 544 | if (!bio_contains_range(0, ptable.bdev->total_size, offset, *plength)) |
| 545 | return ERR_INVALID_ARGS; |
| 546 | |
| 547 | /* Adjust the request base on the erase geometry. If the offset needs to |
| 548 | * move to accomadate the erase geometry, we cannot satisfy this request. |
| 549 | */ |
| 550 | uint64_t new_offset = ptable_adjust_request_for_erase_geometry(offset, |
| 551 | ptable.bdev->total_size - offset, |
| 552 | plength, |
| 553 | false); |
| 554 | if (new_offset != offset) |
| 555 | return ERR_INVALID_ARGS; |
| 556 | |
| 557 | /* Finally, check the adjusted request against all of the existing |
| 558 | * partitions. The final region may not overlap an of the existing |
| 559 | * partitions. |
| 560 | */ |
| 561 | struct ptable_mem_entry *mentry; |
| 562 | list_for_every_entry(&ptable.list, mentry, struct ptable_mem_entry, node) { |
| 563 | const struct ptable_entry *entry = &mentry->entry; |
| 564 | |
| 565 | if (bio_does_overlap(offset, *plength, entry->offset, entry->length)) |
| 566 | return ERR_NOT_FOUND; |
| 567 | } |
| 568 | |
| 569 | // Success. |
| 570 | return NO_ERROR; |
| 571 | } |
| 572 | |
| 573 | status_t ptable_scan(const char* bdev_name, uint64_t offset) |
| 574 | { |
| 575 | ssize_t err; |
| 576 | DEBUG_ASSERT(bdev_name); |
| 577 | |
| 578 | ptable_reset(); |
| 579 | |
| 580 | /* Open a reference to the main block device */ |
| 581 | ptable.bdev = bio_open(bdev_name); |
| 582 | if (NULL == ptable.bdev) { |
| 583 | LTRACEF("Failed to find device \"%s\"", bdev_name); |
| 584 | BAIL(ERR_NOT_FOUND); |
| 585 | } |
| 586 | |
| 587 | /* validate the header */ |
| 588 | struct ptable_header header; |
| 589 | |
| 590 | err = bio_read(ptable.bdev, &header, offset, sizeof(header)); |
| 591 | if (err < (ssize_t)sizeof(header)) { |
| 592 | LTRACEF("failed to read partition table header @%llu (%ld)\n", offset, err); |
| 593 | goto bailout; |
| 594 | } |
| 595 | |
| 596 | if (LOCAL_TRACE) |
| 597 | hexdump(&header, sizeof(struct ptable_header)); |
| 598 | |
| 599 | if (header.magic != PTABLE_MAGIC) { |
| 600 | LTRACEF("failed magic test\n"); |
| 601 | BAIL(ERR_NOT_FOUND); |
| 602 | } |
| 603 | if (header.total_length < sizeof(struct ptable_header)) { |
| 604 | LTRACEF("total length too short\n"); |
| 605 | BAIL(ERR_NOT_FOUND); |
| 606 | } |
| 607 | if (header.total_length > ptable.bdev->block_size) { |
| 608 | LTRACEF("total length too long\n"); |
| 609 | BAIL(ERR_NOT_FOUND); |
| 610 | } |
| 611 | if (((header.total_length - sizeof(struct ptable_header)) % sizeof(struct ptable_entry)) != 0) { |
| 612 | LTRACEF("total length not multiple of header + multiple of entry size\n"); |
| 613 | BAIL(ERR_NOT_FOUND); |
| 614 | } |
| 615 | |
| 616 | /* start a crc check by calculating the header */ |
| 617 | uint32_t crc; |
| 618 | uint32_t saved_crc = header.crc32; |
| 619 | header.crc32 = 0; |
| 620 | crc = crc32(0, (void *)&header, sizeof(header)); |
| 621 | header.crc32 = saved_crc; |
| 622 | bool found_ptable = false; |
| 623 | |
| 624 | /* read the entries into memory */ |
| 625 | off_t off = offset + sizeof(struct ptable_header); |
| 626 | for (uint i = 0; i < PTABLE_HEADER_NUM_ENTRIES(header); i++) { |
| 627 | struct ptable_entry entry; |
| 628 | |
| 629 | /* read the next entry off the device */ |
| 630 | err = bio_read(ptable.bdev, &entry, off, sizeof(entry)); |
| 631 | if (err < 0) { |
| 632 | LTRACEF("failed to read entry\n"); |
| 633 | goto bailout; |
| 634 | } |
| 635 | |
| 636 | LTRACEF("looking at entry:\n"); |
| 637 | if (LOCAL_TRACE) |
| 638 | hexdump(&entry, sizeof(entry)); |
| 639 | |
| 640 | /* Attempt to publish the entry */ |
| 641 | err = ptable_publish(&entry); |
| 642 | if (err < 0) { |
| 643 | goto bailout; |
| 644 | } |
| 645 | |
| 646 | /* If this was the "ptable" entry, was it in the right place? */ |
| 647 | if (!strncmp((char*)entry.name, PTABLE_PART_NAME, sizeof(entry.name))) { |
| 648 | found_ptable = true; |
| 649 | |
| 650 | if (entry.offset != offset) { |
| 651 | LTRACEF("\"ptable\" in the wrong location! (expected %lld got %lld)\n", |
| 652 | offset, entry.offset); |
| 653 | BAIL(ERR_BAD_STATE); |
| 654 | } |
| 655 | } |
| 656 | |
| 657 | /* append the crc */ |
| 658 | crc = crc32(crc, (void *)&entry, sizeof(entry)); |
| 659 | |
| 660 | /* Move on to the next entry */ |
| 661 | off += sizeof(struct ptable_entry); |
| 662 | } |
| 663 | |
| 664 | if (header.crc32 != crc) { |
| 665 | LTRACEF("failed crc check at the end (0x%08x != 0x%08x)\n", header.crc32, crc); |
| 666 | BAIL(ERR_CRC_FAIL); |
| 667 | } |
| 668 | |
| 669 | if (!found_ptable) { |
| 670 | LTRACEF("\"ptable\" partition not found\n"); |
| 671 | BAIL(ERR_NOT_FOUND); |
| 672 | } |
| 673 | |
| 674 | err = NO_ERROR; |
| 675 | |
| 676 | bailout: |
| 677 | if (err < 0) |
| 678 | ptable_reset(); |
| 679 | |
| 680 | return (status_t)err; |
| 681 | } |
| 682 | |
| 683 | bool ptable_found_valid(void) |
| 684 | { |
| 685 | return (NULL != ptable.bdev); |
| 686 | } |
| 687 | |
| 688 | bdev_t *ptable_get_device(void) |
| 689 | { |
| 690 | return ptable.bdev; |
| 691 | } |
| 692 | |
| 693 | status_t ptable_find(const char *name, struct ptable_entry *_entry) |
| 694 | { |
| 695 | struct ptable_mem_entry *mentry; |
| 696 | list_for_every_entry(&ptable.list, mentry, struct ptable_mem_entry, node) { |
| 697 | const struct ptable_entry *entry = &mentry->entry; |
| 698 | if (strcmp(name, (void *)entry->name) == 0) { |
| 699 | /* copy the entry to the passed in pointer */ |
| 700 | if (_entry) { |
| 701 | memcpy(_entry, entry, sizeof(struct ptable_entry)); |
| 702 | } |
| 703 | |
| 704 | return NO_ERROR; |
| 705 | } |
| 706 | } |
| 707 | |
| 708 | return ERR_NOT_FOUND; |
| 709 | } |
| 710 | |
| 711 | status_t ptable_create_default(const char* bdev_name, uint64_t offset) |
| 712 | { |
| 713 | DEBUG_ASSERT(bdev_name); |
| 714 | |
| 715 | /* Reset the system */ |
| 716 | ptable_reset(); |
| 717 | ptable.bdev = bio_open(bdev_name); |
| 718 | if (!ptable.bdev) { |
| 719 | LTRACEF("Failed to open \"%s\"\n", bdev_name); |
| 720 | return ERR_NOT_FOUND; |
| 721 | } |
| 722 | |
| 723 | /* See if we can put the partition table partition at the requested |
| 724 | * location, and determine the size needed based on program block size and |
| 725 | * erase block geometry. |
| 726 | */ |
| 727 | uint64_t len = ptable_length(PTABLE_MIN_ENTRIES); |
| 728 | status_t err = ptable_allocate_at(offset, &len); |
| 729 | if (err < 0) { |
| 730 | LTRACEF("Failed to allocate partition of len 0x%llx @ 0x%llx (err %d)\n", |
| 731 | len, offset, err); |
| 732 | goto bailout; |
| 733 | } |
| 734 | |
| 735 | /* Publish the ptable partition */ |
| 736 | struct ptable_entry ptable_entry; |
| 737 | memset(&ptable_entry, 0, sizeof(ptable_entry)); |
| 738 | ptable_entry.offset = offset; |
| 739 | ptable_entry.length = len; |
| 740 | ptable_entry.flags = 0; |
| 741 | |
| 742 | strlcpy((char *)ptable_entry.name, PTABLE_PART_NAME, sizeof(ptable_entry.name)); |
| 743 | err = ptable_publish(&ptable_entry); |
| 744 | if (err < 0) { |
| 745 | LTRACEF("Failed to publish ptable partition\n"); |
| 746 | goto bailout; |
| 747 | } |
| 748 | |
| 749 | /* Commit the partition table to storage */ |
| 750 | err = ptable_write(); |
| 751 | if (err < 0) { |
| 752 | LTRACEF("Failed to commit ptable\n"); |
| 753 | goto bailout; |
| 754 | } |
| 755 | |
| 756 | bailout: |
| 757 | /* if we failed, reset the system. */ |
| 758 | if (err < 0) |
| 759 | ptable_reset(); |
| 760 | |
| 761 | return err; |
| 762 | } |
| 763 | |
| 764 | status_t ptable_remove(const char *name) |
| 765 | { |
| 766 | DEBUG_ASSERT(ptable.bdev); |
| 767 | |
| 768 | LTRACEF("name %s\n", name); |
| 769 | |
| 770 | if (!ptable_found_valid()) |
| 771 | return ERR_NOT_MOUNTED; |
| 772 | |
| 773 | if (!name) |
| 774 | return ERR_INVALID_ARGS; |
| 775 | |
| 776 | if (!strcmp(name, "ptable")) |
| 777 | return ERR_NOT_ALLOWED; |
| 778 | |
| 779 | bool found = false; |
| 780 | struct ptable_mem_entry *mentry; |
| 781 | list_for_every_entry(&ptable.list, mentry, struct ptable_mem_entry, node) { |
| 782 | const struct ptable_entry *entry = &mentry->entry; |
| 783 | if (strcmp(name, (void *)entry->name) == 0) { |
| 784 | ptable_unpublish(mentry); |
| 785 | found = true; |
| 786 | break; |
| 787 | } |
| 788 | } |
| 789 | |
| 790 | if (!found) |
| 791 | return ERR_NOT_FOUND; |
| 792 | |
| 793 | /* rewrite the page table */ |
| 794 | status_t err = ptable_write(); |
| 795 | return err; |
| 796 | } |
| 797 | |
| 798 | status_t ptable_add(const char *name, uint64_t min_len, uint32_t flags) |
| 799 | { |
| 800 | LTRACEF("name %s min_len 0x%llx flags 0x%x\n", name, min_len, flags); |
| 801 | |
| 802 | if (!ptable_found_valid()) |
| 803 | return ERR_NOT_MOUNTED; |
| 804 | |
| 805 | /* see if the name is valid */ |
| 806 | if (strlen(name) > MAX_FLASH_PTABLE_NAME_LEN - 1) { |
| 807 | LTRACEF("Name too long\n"); |
| 808 | return ERR_INVALID_ARGS; |
| 809 | } |
| 810 | |
| 811 | // Find a place for the requested partition, adjust the length as needed |
| 812 | off_t part_loc = ptable_allocate(&min_len, flags); |
| 813 | if (part_loc < 0) { |
| 814 | LTRACEF("Failed to usable find location.\n"); |
| 815 | return (status_t)part_loc; |
| 816 | } |
| 817 | |
| 818 | /* Attempt to publish the partition */ |
| 819 | struct ptable_entry ptable_entry; |
| 820 | memset(&ptable_entry, 0, sizeof(ptable_entry)); |
| 821 | ptable_entry.offset = part_loc; |
| 822 | ptable_entry.length = min_len; |
| 823 | ptable_entry.flags = 0; |
| 824 | strlcpy((char *)ptable_entry.name, name, sizeof(ptable_entry.name)); |
| 825 | status_t err = ptable_publish(&ptable_entry); |
| 826 | if (err < 0) { |
| 827 | LTRACEF("Failed to publish\n"); |
| 828 | return err; |
| 829 | } |
| 830 | |
| 831 | /* Commit the partition table */ |
| 832 | err = ptable_write(); |
| 833 | if (err < 0) { |
| 834 | LTRACEF("Failed to commit ptable\n"); |
| 835 | } |
| 836 | |
| 837 | return err; |
| 838 | } |
| 839 | |
| 840 | void ptable_dump(void) |
| 841 | { |
| 842 | int i = 0; |
| 843 | struct ptable_mem_entry *mentry; |
| 844 | list_for_every_entry(&ptable.list, mentry, struct ptable_mem_entry, node) { |
| 845 | const struct ptable_entry *entry = &mentry->entry; |
| 846 | |
| 847 | printf("%d: %16s off 0x%016llx len 0x%016llx flags 0x%08x\n", |
| 848 | i, entry->name, entry->offset, entry->length, entry->flags); |
| 849 | i++; |
| 850 | } |
| 851 | } |
| 852 | |
| 853 | #if WITH_LIB_CONSOLE |
| 854 | |
| 855 | #include <lib/console.h> |
| 856 | |
| 857 | static int cmd_ptable(int argc, const cmd_args *argv) |
| 858 | { |
| 859 | if (argc < 2) { |
| 860 | notenoughargs: |
| 861 | printf("not enough arguments\n"); |
| 862 | usage: |
| 863 | printf("usage: %s scan <bio_device> <offset>\n", argv[0].str); |
| 864 | printf("usage: %s default <bio_device> <offset>\n", argv[0].str); |
| 865 | printf("usage: %s list\n", argv[0].str); |
| 866 | printf("usage: %s add <name> <length> <flags>\n", argv[0].str); |
| 867 | printf("usage: %s remove <name>\n", argv[0].str); |
| 868 | printf("usage: %s alloc <len>\n", argv[0].str); |
| 869 | printf("usage: %s allocend <len>\n", argv[0].str); |
| 870 | printf("usage: %s write\n", argv[0].str); |
| 871 | return -1; |
| 872 | } |
| 873 | |
| 874 | status_t err; |
| 875 | if (!strcmp(argv[1].str, "scan")) { |
| 876 | if (argc < 4) goto notenoughargs; |
| 877 | status_t err = ptable_scan(argv[2].str, argv[3].u); |
| 878 | printf("ptable_scan returns %d\n", err); |
| 879 | } else if (!strcmp(argv[1].str, "default")) { |
| 880 | if (argc < 4) goto notenoughargs; |
| 881 | status_t err = ptable_create_default(argv[2].str, argv[3].u); |
| 882 | printf("ptable_create_default returns %d\n", err); |
| 883 | } else if (!strcmp(argv[1].str, "list")) { |
| 884 | ptable_dump(); |
| 885 | } else if (!strcmp(argv[1].str, "nuke")) { |
| 886 | bdev_t* ptable_dev = bio_open(PTABLE_PART_NAME); |
| 887 | |
| 888 | if (ptable_dev) { |
| 889 | status_t err; |
| 890 | err = bio_erase(ptable_dev, 0, ptable_dev->total_size); |
| 891 | if (err < 0) { |
| 892 | printf("ptable nuke failed (err %d)\n", err); |
| 893 | } else { |
| 894 | printf("ptable nuke OK\n"); |
| 895 | } |
| 896 | bio_close(ptable_dev); |
| 897 | } else { |
| 898 | printf("Failed to find ptable device\n"); |
| 899 | } |
| 900 | } else if (!strcmp(argv[1].str, "add")) { |
| 901 | if (argc < 5) goto notenoughargs; |
| 902 | err = ptable_add(argv[2].str, argv[3].u, argv[4].u); |
| 903 | if (err < NO_ERROR) |
| 904 | printf("ptable_add returns err %d\n", err); |
| 905 | } else if (!strcmp(argv[1].str, "remove")) { |
| 906 | if (argc < 3) goto notenoughargs; |
| 907 | ptable_remove(argv[2].str); |
| 908 | } else if (!strcmp(argv[1].str, "alloc") || |
| 909 | !strcmp(argv[1].str, "allocend")) { |
| 910 | if (argc < 3) goto notenoughargs; |
| 911 | |
| 912 | uint flags = !strcmp(argv[1].str, "allocend") ? FLASH_PTABLE_ALLOC_END : 0; |
| 913 | uint64_t len = argv[2].u; |
| 914 | off_t off = ptable_allocate(&len, flags); |
| 915 | |
| 916 | if (off < 0) { |
| 917 | printf("%s of 0x%lx failed (err %lld)\n", |
| 918 | argv[1].str, argv[2].u, off); |
| 919 | } else { |
| 920 | printf("%s of 0x%lx gives [0x%llx, 0x%llx)\n", |
| 921 | argv[1].str, argv[2].u, off, off + len); |
| 922 | } |
| 923 | } else if (!strcmp(argv[1].str, "write")) { |
| 924 | printf("ptable_write result %d\n", ptable_write()); |
| 925 | } else { |
| 926 | goto usage; |
| 927 | } |
| 928 | |
| 929 | return 0; |
| 930 | } |
| 931 | |
| 932 | STATIC_COMMAND_START |
| 933 | STATIC_COMMAND("ptable", "commands for manipulating the flash partition table", &cmd_ptable) |
| 934 | STATIC_COMMAND_END(ptable); |
| 935 | |
| 936 | #endif // WITH_LIB_CONSOLE |
| 937 | |