Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(3)

Side by Side Diff: dm-bht.c

Issue 6880133: verity: change maybe_read_entries to maybe_read_entry (Closed) Base URL: http://git.chromium.org/git/dm-verity.git@master
Patch Set: Created 9 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2010 The Chromium OS Authors <chromium-os-dev@chromium.org> 2 * Copyright (C) 2010 The Chromium OS Authors <chromium-os-dev@chromium.org>
3 * 3 *
4 * Device-Mapper block hash tree interface. 4 * Device-Mapper block hash tree interface.
5 * See Documentation/device-mapper/dm-bht.txt for details. 5 * See Documentation/device-mapper/dm-bht.txt for details.
6 * 6 *
7 * This file is released under the GPL. 7 * This file is released under the GPL.
8 */ 8 */
9 9
10 #include <asm/atomic.h> 10 #include <asm/atomic.h>
(...skipping 287 matching lines...) Expand 10 before | Expand all | Expand 10 after
298 } 298 }
299 299
300 /* Setup callback stubs */ 300 /* Setup callback stubs */
301 bht->read_cb = &dm_bht_read_callback_stub; 301 bht->read_cb = &dm_bht_read_callback_stub;
302 bht->write_cb = &dm_bht_write_callback_stub; 302 bht->write_cb = &dm_bht_write_callback_stub;
303 303
304 status = dm_bht_initialize_entries(bht); 304 status = dm_bht_initialize_entries(bht);
305 if (status) 305 if (status)
306 goto bad_entries_alloc; 306 goto bad_entries_alloc;
307 307
308 /* We compute depth such that there is only be 1 block at level 0. */
309 BUG_ON(bht->levels[0].count != 1);
310
308 return 0; 311 return 0;
309 312
310 bad_entries_alloc: 313 bad_entries_alloc:
311 while (bht->depth-- > 0) 314 while (bht->depth-- > 0)
312 kfree(bht->levels[bht->depth].entries); 315 kfree(bht->levels[bht->depth].entries);
313 kfree(bht->levels); 316 kfree(bht->levels);
314 bad_node_count: 317 bad_node_count:
315 bad_level_alloc: 318 bad_level_alloc:
316 bad_block_count: 319 bad_block_count:
317 kfree(bht->root_digest); 320 kfree(bht->root_digest);
(...skipping 136 matching lines...) Expand 10 before | Expand all | Expand 10 after
454 { 457 {
455 if (status) { 458 if (status) {
456 DMCRIT("an I/O error occurred while writing entry"); 459 DMCRIT("an I/O error occurred while writing entry");
457 atomic_set(&entry->state, DM_BHT_ENTRY_ERROR_IO); 460 atomic_set(&entry->state, DM_BHT_ENTRY_ERROR_IO);
458 /* entry->nodes will be freed later */ 461 /* entry->nodes will be freed later */
459 return; 462 return;
460 } 463 }
461 } 464 }
462 EXPORT_SYMBOL(dm_bht_write_completed); 465 EXPORT_SYMBOL(dm_bht_write_completed);
463 466
467 /* dm_bht_maybe_read_entry
468 * Attempts to atomically acquire an entry, allocate any needed
469 * memory, and issues the I/O callback to load the hash from disk.
470 * Return value is negative on error. When positive, it is the state
471 * value.
472 */
473 static int dm_bht_maybe_read_entry(struct dm_bht *bht, void *ctx,
474 unsigned int depth, unsigned int index)
475 {
476 struct dm_bht_level *level = &bht->levels[depth];
477 struct dm_bht_entry *entry = &level->entries[index];
478 sector_t current_sector = level->sector + to_sector(index * PAGE_SIZE);
479 struct page *node_page;
480 int state;
464 481
465 /* dm_bht_maybe_read_entries
466 * Attempts to atomically acquire each entry, allocated any needed
467 * memory, and issues I/O callbacks to load the hashes from disk.
468 * Returns 0 if all entries are loaded and verified. On error, the
469 * return value is negative. When positive, it is the state values
470 * ORd.
471 */
472 static int dm_bht_maybe_read_entries(struct dm_bht *bht, void *ctx,
473 unsigned int depth, unsigned int index,
474 unsigned int count, bool until_exist)
475 {
476 struct dm_bht_level *level;
477 struct dm_bht_entry *entry, *last_entry;
478 sector_t current_sector;
479 int state = 0;
480 int status = 0;
481 struct page *node_page = NULL;
482 BUG_ON(depth >= bht->depth); 482 BUG_ON(depth >= bht->depth);
483 483
484 level = &bht->levels[depth];
485 if (count > level->count - index) {
486 DMERR("dm_bht_maybe_read_entries(d=%u,ei=%u,count=%u): "
487 "index+count exceeds available entries %u",
488 depth, index, count, level->count);
489 return -EINVAL;
490 }
491 /* XXX: hardcoding PAGE_SIZE means that a perfectly valid image 484 /* XXX: hardcoding PAGE_SIZE means that a perfectly valid image
492 * on one system may not work on a different kernel. 485 * on one system may not work on a different kernel.
493 * TODO(wad) abstract PAGE_SIZE with a bht->entry_size or 486 * TODO(wad) abstract PAGE_SIZE with a bht->entry_size or
494 * at least a define and ensure bht->entry_size is 487 * at least a define and ensure bht->entry_size is
495 * sector aligned at least. 488 * sector aligned at least.
496 */ 489 */
497 current_sector = level->sector + to_sector(index * PAGE_SIZE);
498 for (entry = &level->entries[index], last_entry = entry + count;
499 entry < last_entry;
500 ++entry, current_sector += to_sector(PAGE_SIZE)) {
501 /* If the entry's state is UNALLOCATED, then we'll claim it
502 * for allocation and loading.
503 */
504 state = atomic_cmpxchg(&entry->state,
505 DM_BHT_ENTRY_UNALLOCATED,
506 DM_BHT_ENTRY_PENDING);
507 DMDEBUG("dm_bht_maybe_read_entries(d=%u,ei=%u,count=%u): "
508 "ei=%lu, state=%d",
509 depth, index, count,
510 (unsigned long)(entry - level->entries), state);
511 if (state <= DM_BHT_ENTRY_ERROR) {
512 DMCRIT("entry %u is in an error state", index);
513 return state;
514 }
515 490
516 » » /* Currently, the verified state is unused. */ 491 » /* If the entry's state is UNALLOCATED, then we'll claim it
517 » » if (state == DM_BHT_ENTRY_VERIFIED) { 492 » * for allocation and loading.
518 » » » if (until_exist) 493 » */
519 » » » » return 0; 494 » state = atomic_cmpxchg(&entry->state,
520 » » » /* Makes 0 == verified. Is that ideal? */ 495 » » » DM_BHT_ENTRY_UNALLOCATED,
521 » » » continue; 496 » » » DM_BHT_ENTRY_PENDING);
522 » » } 497 » DMDEBUG("dm_bht_maybe_read_entry(d=%u,ei=%u): ei=%lu, state=%d",
498 » » depth, index, (unsigned long)(entry - level->entries), state);
523 499
524 » » if (state != DM_BHT_ENTRY_UNALLOCATED) { 500 » if (state != DM_BHT_ENTRY_UNALLOCATED)
525 » » » /* PENDING, READY, ... */ 501 » » goto out;
526 » » » if (until_exist)
527 » » » » return state;
528 » » » status |= state;
529 » » » continue;
530 » » }
531 » » /* Current entry is claimed for allocation and loading */
532 » » node_page = (struct page *) mempool_alloc(bht->entry_pool,
533 » » » » » » » GFP_NOIO);
534 » » if (!node_page) {
535 » » » DMCRIT("failed to allocate memory for "
536 » » » "entry->nodes from pool");
537 » » » return -ENOMEM;
538 » » }
539 » » /* dm-bht guarantees page-aligned memory for callbacks. */
540 » » entry->nodes = page_address(node_page);
541 » » /* Let the caller know that not all the data is yet available */
542 » » status |= DM_BHT_ENTRY_REQUESTED;
543 » » /* Issue the read callback */
544 » » /* TODO(wad) error check callback here too */
545 » » DMDEBUG("dm_bht_maybe_read_entries(d=%u,ei=%u,count=%u): "
546 » » » "reading %lu",
547 » » » depth, index, count,
548 » » » (unsigned long)(entry - level->entries));
549 » » bht->read_cb(ctx, /* external context */
550 » » » current_sector, /* starting sector */
551 » » » entry->nodes, /* destination */
552 » » » to_sector(PAGE_SIZE),
553 » » » entry); /* io context */
554 502
555 » } 503 » state = DM_BHT_ENTRY_REQUESTED;
556 » /* Should only be 0 if all entries were verified and not just ready */ 504
557 » return status; 505 » /* Current entry is claimed for allocation and loading */
506 » node_page = (struct page *) mempool_alloc(bht->entry_pool, GFP_NOIO);
507 » if (!node_page)
508 » » goto nomem;
509 » /* dm-bht guarantees page-aligned memory for callbacks. */
510 » entry->nodes = page_address(node_page);
511
512 » /* TODO(wad) error check callback here too */
513 » DMDEBUG("dm_bht_maybe_read_entry(d=%u,ei=%u): reading %lu",
514 » » depth, index, (unsigned long)(entry - level->entries));
515 » bht->read_cb(ctx, current_sector, entry->nodes,
516 » » to_sector(PAGE_SIZE), entry);
517
518 out:
519 » if (state <= DM_BHT_ENTRY_ERROR)
520 » » DMCRIT("entry %u is in an error state", index);
521
522 » return state;
523
524 nomem:
525 » DMCRIT("failed to allocate memory for entry->nodes from pool");
526 » return -ENOMEM;
527
528
558 } 529 }
559 530
560 static int dm_bht_compare_hash(struct dm_bht *bht, u8 *known, u8 *computed) 531 static int dm_bht_compare_hash(struct dm_bht *bht, u8 *known, u8 *computed)
561 { 532 {
562 return memcmp(known, computed, bht->digest_size); 533 return memcmp(known, computed, bht->digest_size);
563 } 534 }
564 535
565 static int dm_bht_update_hash(struct dm_bht *bht, u8 *known, u8 *computed) 536 static int dm_bht_update_hash(struct dm_bht *bht, u8 *known, u8 *computed)
566 { 537 {
567 #ifdef CONFIG_DM_DEBUG 538 #ifdef CONFIG_DM_DEBUG
(...skipping 249 matching lines...) Expand 10 before | Expand all | Expand 10 after
817 { 788 {
818 int depth, r; 789 int depth, r;
819 790
820 for (depth = bht->depth - 2; depth >= 0; depth--) { 791 for (depth = bht->depth - 2; depth >= 0; depth--) {
821 struct dm_bht_level *level = dm_bht_get_level(bht, depth); 792 struct dm_bht_level *level = dm_bht_get_level(bht, depth);
822 struct dm_bht_level *child_level = level + 1; 793 struct dm_bht_level *child_level = level + 1;
823 struct dm_bht_entry *entry = level->entries; 794 struct dm_bht_entry *entry = level->entries;
824 struct dm_bht_entry *child = child_level->entries; 795 struct dm_bht_entry *child = child_level->entries;
825 unsigned int i, j; 796 unsigned int i, j;
826 797
827 r = dm_bht_maybe_read_entries(bht, read_cb_ctx, depth,
828 0, level->count, true);
829 if (r < 0) {
830 DMCRIT("an error occurred while reading entry");
831 goto out;
832 }
833
834 for (i = 0; i < level->count; i++, entry++) { 798 for (i = 0; i < level->count; i++, entry++) {
835 unsigned int count = bht->node_count; 799 unsigned int count = bht->node_count;
800
801 r = dm_bht_maybe_read_entry(bht, read_cb_ctx, depth, i);
802 if (r < 0) {
803 DMCRIT("an error occurred while reading entry");
804 goto out;
805 }
806
836 if (i == (level->count - 1)) 807 if (i == (level->count - 1))
837 count = child_level->count % bht->node_count; 808 count = child_level->count % bht->node_count;
838 if (count == 0) 809 if (count == 0)
839 count = bht->node_count; 810 count = bht->node_count;
840 for (j = 0; j < count; j++, child++) { 811 for (j = 0; j < count; j++, child++) {
841 struct page *pg = virt_to_page(child->nodes); 812 struct page *pg = virt_to_page(child->nodes);
842 u8 *digest = dm_bht_node(bht, entry, j); 813 u8 *digest = dm_bht_node(bht, entry, j);
843 814
844 r = dm_bht_compute_hash(bht, pg, 0, digest); 815 r = dm_bht_compute_hash(bht, pg, 0, digest);
845 if (r) { 816 if (r) {
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
961 932
962 /* Load in all of level 0 if the root is unverified */ 933 /* Load in all of level 0 if the root is unverified */
963 root_state = atomic_read(&bht->root_state); 934 root_state = atomic_read(&bht->root_state);
964 /* TODO(wad) create a separate io object for the root request which 935 /* TODO(wad) create a separate io object for the root request which
965 * can continue on and be verified and stop making every request 936 * can continue on and be verified and stop making every request
966 * check. 937 * check.
967 */ 938 */
968 if (root_state != DM_BHT_ENTRY_VERIFIED) { 939 if (root_state != DM_BHT_ENTRY_VERIFIED) {
969 DMDEBUG("root data is not yet loaded"); 940 DMDEBUG("root data is not yet loaded");
970 /* If positive, it means some are pending. */ 941 /* If positive, it means some are pending. */
971 » » populated = dm_bht_maybe_read_entries(bht, read_cb_ctx, 0, 0, 942 » » populated = dm_bht_maybe_read_entry(bht, read_cb_ctx, 0, 0);
972 » » » » » » bht->levels[0].count,
973 » » » » » » true);
974 if (populated < 0) { 943 if (populated < 0) {
975 DMCRIT("an error occurred while reading level[0]"); 944 DMCRIT("an error occurred while reading level[0]");
976 /* TODO(wad) define std error codes */ 945 /* TODO(wad) define std error codes */
977 return populated; 946 return populated;
978 } 947 }
979 } 948 }
980 949
981 for (depth = 1; depth < bht->depth; ++depth) { 950 for (depth = 1; depth < bht->depth; ++depth) {
982 level = dm_bht_get_level(bht, depth); 951 level = dm_bht_get_level(bht, depth);
983 entry_index = dm_bht_index_at_level(bht, depth, 952 entry_index = dm_bht_index_at_level(bht, depth,
984 block_index); 953 block_index);
985 DMDEBUG("populate for bi=%u on d=%d ei=%u (max=%u)", 954 DMDEBUG("populate for bi=%u on d=%d ei=%u (max=%u)",
986 block_index, depth, entry_index, level->count); 955 block_index, depth, entry_index, level->count);
987 956
988 /* Except for the root node case, we should only ever need 957 /* Except for the root node case, we should only ever need
989 * to load one entry along the path. 958 * to load one entry along the path.
990 */ 959 */
991 » » read_status = dm_bht_maybe_read_entries(bht, read_cb_ctx, 960 » » read_status = dm_bht_maybe_read_entry(bht, read_cb_ctx,
992 » » » » » » » depth, entry_index, 961 » » » » » » depth, entry_index);
993 » » » » » » » 1, false);
994 if (unlikely(read_status < 0)) { 962 if (unlikely(read_status < 0)) {
995 DMCRIT("failure occurred reading entry %u depth %u", 963 DMCRIT("failure occurred reading entry %u depth %u",
996 entry_index, depth); 964 entry_index, depth);
997 return read_status; 965 return read_status;
998 } 966 }
999 /* Accrue return code flags */ 967 /* Accrue return code flags */
1000 populated |= read_status; 968 populated |= read_status;
1001 } 969 }
1002 970
1003 /* All nodes are ready. The hash for the block_index can be verified */ 971 /* All nodes are ready. The hash for the block_index can be verified */
(...skipping 171 matching lines...) Expand 10 before | Expand all | Expand 10 after
1175 if (!bht->root_digest) { 1143 if (!bht->root_digest) {
1176 DMERR("no root digest exists to export"); 1144 DMERR("no root digest exists to export");
1177 if (available > 0) 1145 if (available > 0)
1178 *hexdigest = 0; 1146 *hexdigest = 0;
1179 return -1; 1147 return -1;
1180 } 1148 }
1181 dm_bht_bin_to_hex(bht->root_digest, hexdigest, bht->digest_size); 1149 dm_bht_bin_to_hex(bht->root_digest, hexdigest, bht->digest_size);
1182 return 0; 1150 return 0;
1183 } 1151 }
1184 EXPORT_SYMBOL(dm_bht_root_hexdigest); 1152 EXPORT_SYMBOL(dm_bht_root_hexdigest);
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698