Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(158)

Side by Side Diff: drivers/md/dm-bht.c

Issue 6883252: CHROMIUM: verity: pull up maybe_read_entry (Closed) Base URL: http://git.chromium.org/git/kernel-next.git@chromeos-2.6.38
Patch Set: Created 9 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | drivers/md/dm-verity.c » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2010 The Chromium OS Authors <chromium-os-dev@chromium.org> 2 * Copyright (C) 2010 The Chromium OS Authors <chromium-os-dev@chromium.org>
3 * 3 *
4 * Device-Mapper block hash tree interface. 4 * Device-Mapper block hash tree interface.
5 * See Documentation/device-mapper/dm-bht.txt for details. 5 * See Documentation/device-mapper/dm-bht.txt for details.
6 * 6 *
7 * This file is released under the GPL. 7 * This file is released under the GPL.
8 */ 8 */
9 9
10 #include <asm/atomic.h> 10 #include <asm/atomic.h>
(...skipping 441 matching lines...) Expand 10 before | Expand all | Expand 10 after
452 { 452 {
453 if (status) { 453 if (status) {
454 DMCRIT("an I/O error occurred while writing entry"); 454 DMCRIT("an I/O error occurred while writing entry");
455 atomic_set(&entry->state, DM_BHT_ENTRY_ERROR_IO); 455 atomic_set(&entry->state, DM_BHT_ENTRY_ERROR_IO);
456 /* entry->nodes will be freed later */ 456 /* entry->nodes will be freed later */
457 return; 457 return;
458 } 458 }
459 } 459 }
460 EXPORT_SYMBOL(dm_bht_write_completed); 460 EXPORT_SYMBOL(dm_bht_write_completed);
461 461
462 /* dm_bht_maybe_read_entry
463 * Attempts to atomically acquire an entry, allocate any needed
464 * memory, and issues the I/O callback to load the hash from disk.
465 * Return value is negative on error. When positive, it is the state
466 * value.
467 */
468 static int dm_bht_maybe_read_entry(struct dm_bht *bht, void *ctx,
469 unsigned int depth, unsigned int index)
470 {
471 struct dm_bht_level *level = &bht->levels[depth];
472 struct dm_bht_entry *entry = &level->entries[index];
473 sector_t current_sector = level->sector + to_sector(index * PAGE_SIZE);
474 struct page *node_page;
475 int state;
476
477 BUG_ON(depth >= bht->depth);
478
479 /* XXX: hardcoding PAGE_SIZE means that a perfectly valid image
480 * on one system may not work on a different kernel.
481 * TODO(wad) abstract PAGE_SIZE with a bht->entry_size or
482 * at least a define and ensure bht->entry_size is
483 * sector aligned at least.
484 */
485
486 /* If the entry's state is UNALLOCATED, then we'll claim it
487 * for allocation and loading.
488 */
489 state = atomic_cmpxchg(&entry->state,
490 DM_BHT_ENTRY_UNALLOCATED,
491 DM_BHT_ENTRY_PENDING);
492 DMDEBUG("dm_bht_maybe_read_entry(d=%u,ei=%u): ei=%lu, state=%d",
493 depth, index, (unsigned long)(entry - level->entries), state);
494
495 if (state != DM_BHT_ENTRY_UNALLOCATED)
496 goto out;
497
498 state = DM_BHT_ENTRY_REQUESTED;
499
500 /* Current entry is claimed for allocation and loading */
501 node_page = (struct page *) mempool_alloc(bht->entry_pool, GFP_NOIO);
502 if (!node_page)
503 goto nomem;
504 /* dm-bht guarantees page-aligned memory for callbacks. */
505 entry->nodes = page_address(node_page);
506
507 /* TODO(wad) error check callback here too */
508 DMDEBUG("dm_bht_maybe_read_entry(d=%u,ei=%u): reading %lu",
509 depth, index, (unsigned long)(entry - level->entries));
510 bht->read_cb(ctx, current_sector, entry->nodes,
511 to_sector(PAGE_SIZE), entry);
512
513 out:
514 if (state <= DM_BHT_ENTRY_ERROR)
515 DMCRIT("entry %u is in an error state", index);
516
517 return state;
518
519 nomem:
520 DMCRIT("failed to allocate memory for entry->nodes from pool");
521 return -ENOMEM;
522
523
524 }
525
526 /* dm_bht_verify_path 462 /* dm_bht_verify_path
527 * Verifies the path. Returns 0 on ok. 463 * Verifies the path. Returns 0 on ok.
528 */ 464 */
529 static int dm_bht_verify_path(struct dm_bht *bht, unsigned int block_index, 465 static int dm_bht_verify_path(struct dm_bht *bht, unsigned int block_index,
530 struct page *pg, unsigned int offset) 466 struct page *pg, unsigned int offset)
531 { 467 {
532 unsigned int depth = bht->depth; 468 unsigned int depth = bht->depth;
533 u8 digest[DM_BHT_MAX_DIGEST_SIZE]; 469 u8 digest[DM_BHT_MAX_DIGEST_SIZE];
534 struct dm_bht_entry *entry; 470 struct dm_bht_entry *entry;
535 u8 *node; 471 u8 *node;
(...skipping 173 matching lines...) Expand 10 before | Expand all | Expand 10 after
709 645
710 for (depth = bht->depth - 2; depth >= 0; depth--) { 646 for (depth = bht->depth - 2; depth >= 0; depth--) {
711 struct dm_bht_level *level = dm_bht_get_level(bht, depth); 647 struct dm_bht_level *level = dm_bht_get_level(bht, depth);
712 struct dm_bht_level *child_level = level + 1; 648 struct dm_bht_level *child_level = level + 1;
713 struct dm_bht_entry *entry = level->entries; 649 struct dm_bht_entry *entry = level->entries;
714 struct dm_bht_entry *child = child_level->entries; 650 struct dm_bht_entry *child = child_level->entries;
715 unsigned int i, j; 651 unsigned int i, j;
716 652
717 for (i = 0; i < level->count; i++, entry++) { 653 for (i = 0; i < level->count; i++, entry++) {
718 unsigned int count = bht->node_count; 654 unsigned int count = bht->node_count;
655 struct page *pg;
719 656
720 » » » r = dm_bht_maybe_read_entry(bht, read_cb_ctx, depth, i); 657 » » » pg = (struct page *) mempool_alloc(bht->entry_pool,
721 » » » if (r < 0) { 658 » » » » » » » GFP_NOIO);
659 » » » if (!pg) {
722 DMCRIT("an error occurred while reading entry"); 660 DMCRIT("an error occurred while reading entry");
723 goto out; 661 goto out;
724 } 662 }
725 663
664 entry->nodes = page_address(pg);
665 atomic_set(&entry->state, DM_BHT_ENTRY_READY);
666
726 if (i == (level->count - 1)) 667 if (i == (level->count - 1))
727 count = child_level->count % bht->node_count; 668 count = child_level->count % bht->node_count;
728 if (count == 0) 669 if (count == 0)
729 count = bht->node_count; 670 count = bht->node_count;
730 for (j = 0; j < count; j++, child++) { 671 for (j = 0; j < count; j++, child++) {
731 struct page *pg = virt_to_page(child->nodes); 672 struct page *pg = virt_to_page(child->nodes);
732 u8 *digest = dm_bht_node(bht, entry, j); 673 u8 *digest = dm_bht_node(bht, entry, j);
733 674
734 r = dm_bht_compute_hash(bht, pg, 0, digest); 675 r = dm_bht_compute_hash(bht, pg, 0, digest);
735 if (r) { 676 if (r) {
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
820 return false; 761 return false;
821 } 762 }
822 763
823 return true; 764 return true;
824 } 765 }
825 EXPORT_SYMBOL(dm_bht_is_populated); 766 EXPORT_SYMBOL(dm_bht_is_populated);
826 767
827 /** 768 /**
828 * dm_bht_populate - reads entries from disk needed to verify a given block 769 * dm_bht_populate - reads entries from disk needed to verify a given block
829 * @bht: pointer to a dm_bht_create()d bht 770 * @bht: pointer to a dm_bht_create()d bht
830 * @read_cb_ctx:context used for all read_cb calls on this request 771 * @ctx: context used for all read_cb calls on this request
831 * @block_index:specific block data is expected from 772 * @block_index:specific block data is expected from
832 * 773 *
833 * Returns negative value on error. 774 * Returns negative value on error. Returns 0 on success.
834 */ 775 */
835 int dm_bht_populate(struct dm_bht *bht, void *read_cb_ctx, 776 int dm_bht_populate(struct dm_bht *bht, void *ctx,
836 unsigned int block_index) 777 unsigned int block_index)
837 { 778 {
838 » unsigned int depth, entry_index; 779 » unsigned int depth;
839 » int status, populated = 0; 780 » int state = 0;
840 781
841 BUG_ON(block_index >= bht->block_count); 782 BUG_ON(block_index >= bht->block_count);
842 783
843 DMDEBUG("dm_bht_populate(%u)", block_index); 784 DMDEBUG("dm_bht_populate(%u)", block_index);
844 785
845 for (depth = 0; depth < bht->depth; ++depth) { 786 for (depth = 0; depth < bht->depth; ++depth) {
846 » » entry_index = dm_bht_index_at_level(bht, depth, block_index); 787 » » struct dm_bht_level *level;
847 » » status = dm_bht_maybe_read_entry(bht, read_cb_ctx, depth, 788 » » struct dm_bht_entry *entry;
848 » » » » » » entry_index); 789 » » unsigned int index;
849 » » if (status < 0) 790 » » struct page *pg;
850 » » » goto read_error;
851 791
852 » » /* Accrue return code flags */ 792 » » entry = dm_bht_get_entry(bht, depth, block_index);
853 » » populated |= status; 793 » » state = atomic_cmpxchg(&entry->state,
794 » » » » DM_BHT_ENTRY_UNALLOCATED,
795 » » » » DM_BHT_ENTRY_PENDING);
796
797 » » if (state <= DM_BHT_ENTRY_ERROR)
798 » » » goto error_state;
799
800 » » if (state != DM_BHT_ENTRY_UNALLOCATED)
801 » » » continue;
802
803 » » /* Current entry is claimed for allocation and loading */
804 » » pg = (struct page *) mempool_alloc(bht->entry_pool, GFP_NOIO);
805 » » if (!pg)
806 » » » goto nomem;
807
808 » » /* dm-bht guarantees page-aligned memory for callbacks. */
809 » » entry->nodes = page_address(pg);
810
811 » » /* TODO(wad) error check callback here too */
812
813 » » level = &bht->levels[depth];
814 » » index = dm_bht_index_at_level(bht, depth, block_index);
815 » » bht->read_cb(ctx, level->sector + to_sector(index * PAGE_SIZE),
816 » » » entry->nodes, to_sector(PAGE_SIZE), entry);
854 } 817 }
855 818
856 » return populated; 819 » return 0;
857 820
858 read_error: 821 error_state:
859 » DMCRIT("failure reading entry %u depth %u", entry_index, depth); 822 » DMCRIT("block %u at depth %u is in an error state", block_index, depth);
Paul T 2011/07/19 15:00:51 Why not add the state to the error message?
860 » return status; 823 » return state;
861 824
825 nomem:
826 DMCRIT("failed to allocate memory for entry->nodes from pool");
827 return -ENOMEM;
862 } 828 }
863 EXPORT_SYMBOL(dm_bht_populate); 829 EXPORT_SYMBOL(dm_bht_populate);
864 830
865 831
866 /** 832 /**
867 * dm_bht_verify_block - checks that all nodes in the path for @block are valid 833 * dm_bht_verify_block - checks that all nodes in the path for @block are valid
868 * @bht: pointer to a dm_bht_create()d bht 834 * @bht: pointer to a dm_bht_create()d bht
869 * @block_index:specific block data is expected from 835 * @block_index:specific block data is expected from
870 * @block: virtual address of the block data in memory 836 * @block: virtual address of the block data in memory
871 * (must be aligned to block size) 837 * (must be aligned to block size)
(...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after
1011 if (!bht->root_digest) { 977 if (!bht->root_digest) {
1012 DMERR("no root digest exists to export"); 978 DMERR("no root digest exists to export");
1013 if (available > 0) 979 if (available > 0)
1014 *hexdigest = 0; 980 *hexdigest = 0;
1015 return -1; 981 return -1;
1016 } 982 }
1017 dm_bht_bin_to_hex(bht->root_digest, hexdigest, bht->digest_size); 983 dm_bht_bin_to_hex(bht->root_digest, hexdigest, bht->digest_size);
1018 return 0; 984 return 0;
1019 } 985 }
1020 EXPORT_SYMBOL(dm_bht_root_hexdigest); 986 EXPORT_SYMBOL(dm_bht_root_hexdigest);
OLDNEW
« no previous file with comments | « no previous file | drivers/md/dm-verity.c » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698