| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2010 The Chromium OS Authors <chromium-os-dev@chromium.org> | 2 * Copyright (C) 2010 The Chromium OS Authors <chromium-os-dev@chromium.org> |
| 3 * | 3 * |
| 4 * Device-Mapper block hash tree interface. | 4 * Device-Mapper block hash tree interface. |
| 5 * See Documentation/device-mapper/dm-bht.txt for details. | 5 * See Documentation/device-mapper/dm-bht.txt for details. |
| 6 * | 6 * |
| 7 * This file is released under the GPL. | 7 * This file is released under the GPL. |
| 8 */ | 8 */ |
| 9 | 9 |
| 10 #include <asm/atomic.h> | 10 #include <asm/atomic.h> |
| (...skipping 247 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 258 | 258 |
| 259 /* Configure the tree */ | 259 /* Configure the tree */ |
| 260 bht->block_count = block_count; | 260 bht->block_count = block_count; |
| 261 DMDEBUG("Setting block_count %u", block_count); | 261 DMDEBUG("Setting block_count %u", block_count); |
| 262 if (block_count == 0) { | 262 if (block_count == 0) { |
| 263 DMERR("block_count must be non-zero"); | 263 DMERR("block_count must be non-zero"); |
| 264 status = -EINVAL; | 264 status = -EINVAL; |
| 265 goto bad_block_count; | 265 goto bad_block_count; |
| 266 } | 266 } |
| 267 | 267 |
| 268 bht->depth = depth; /* assignment below */ | |
| 269 DMDEBUG("Setting depth %u", depth); | |
| 270 if (!depth || depth > UINT_MAX / sizeof(struct dm_bht_level)) { | |
| 271 DMERR("bht depth is invalid: %u", depth); | |
| 272 status = -EINVAL; | |
| 273 goto bad_depth; | |
| 274 } | |
| 275 | |
| 276 /* Allocate levels. Each level of the tree may have an arbitrary number | |
| 277 * of dm_bht_entry structs. Each entry contains node_count nodes. | |
| 278 * Each node in the tree is a cryptographic digest of either node_count | |
| 279 * nodes on the subsequent level or of a specific block on disk. | |
| 280 */ | |
| 281 bht->levels = (struct dm_bht_level *) | |
| 282 kcalloc(depth, sizeof(struct dm_bht_level), GFP_KERNEL); | |
| 283 if (!bht->levels) { | |
| 284 DMERR("failed to allocate tree levels"); | |
| 285 status = -ENOMEM; | |
| 286 goto bad_level_alloc; | |
| 287 } | |
| 288 | |
| 289 /* Each dm_bht_entry->nodes is one page. The node code tracks | 268 /* Each dm_bht_entry->nodes is one page. The node code tracks |
| 290 * how many nodes fit into one entry where a node is a single | 269 * how many nodes fit into one entry where a node is a single |
| 291 * hash (message digest). | 270 * hash (message digest). |
| 292 */ | 271 */ |
| 293 bht->node_count_shift = fls(PAGE_SIZE / bht->digest_size) - 1; | 272 bht->node_count_shift = fls(PAGE_SIZE / bht->digest_size) - 1; |
| 294 /* Round down to the nearest power of two. This makes indexing | 273 /* Round down to the nearest power of two. This makes indexing |
| 295 * into the tree much less painful. | 274 * into the tree much less painful. |
| 296 */ | 275 */ |
| 297 bht->node_count = 1 << bht->node_count_shift; | 276 bht->node_count = 1 << bht->node_count_shift; |
| 298 | 277 |
| 299 /* This is unlikely to happen, but with 64k pages, who knows. */ | 278 /* This is unlikely to happen, but with 64k pages, who knows. */ |
| 300 if (bht->node_count > UINT_MAX / bht->digest_size) { | 279 if (bht->node_count > UINT_MAX / bht->digest_size) { |
| 301 DMERR("node_count * hash_len exceeds UINT_MAX!"); | 280 DMERR("node_count * hash_len exceeds UINT_MAX!"); |
| 302 status = -EINVAL; | 281 status = -EINVAL; |
| 303 goto bad_node_count; | 282 goto bad_node_count; |
| 304 } | 283 } |
| 284 |
| 285 /* if depth == 0, create a "regular" trie with a single root block */ |
| 286 if (depth == 0) |
| 287 depth = DIV_ROUND_UP(fls(block_count - 1), |
| 288 bht->node_count_shift); |
| 289 if (depth > UINT_MAX / sizeof(struct dm_bht_level)) { |
| 290 DMERR("bht depth is invalid: %u", depth); |
| 291 status = -EINVAL; |
| 292 goto bad_depth; |
| 293 } |
| 294 DMDEBUG("Setting depth to %u.", depth); |
| 295 bht->depth = depth; |
| 296 |
| 305 /* Ensure that we can safely shift by this value. */ | 297 /* Ensure that we can safely shift by this value. */ |
| 306 if (depth * bht->node_count_shift >= sizeof(unsigned int) * 8) { | 298 if (depth * bht->node_count_shift >= sizeof(unsigned int) * 8) { |
| 307 DMERR("specified depth and node_count_shift is too large"); | 299 DMERR("specified depth and node_count_shift is too large"); |
| 308 status = -EINVAL; | 300 status = -EINVAL; |
| 309 goto bad_node_count; | 301 goto bad_node_count; |
| 310 } | 302 } |
| 311 | 303 |
| 304 /* Allocate levels. Each level of the tree may have an arbitrary number |
| 305 * of dm_bht_entry structs. Each entry contains node_count nodes. |
| 306 * Each node in the tree is a cryptographic digest of either node_count |
| 307 * nodes on the subsequent level or of a specific block on disk. |
| 308 */ |
| 309 bht->levels = (struct dm_bht_level *) |
| 310 kcalloc(depth, sizeof(struct dm_bht_level), GFP_KERNEL); |
| 311 if (!bht->levels) { |
| 312 DMERR("failed to allocate tree levels"); |
| 313 status = -ENOMEM; |
| 314 goto bad_level_alloc; |
| 315 } |
| 316 |
| 312 /* Setup callback stubs */ | 317 /* Setup callback stubs */ |
| 313 bht->read_cb = &dm_bht_read_callback_stub; | 318 bht->read_cb = &dm_bht_read_callback_stub; |
| 314 bht->write_cb = &dm_bht_write_callback_stub; | 319 bht->write_cb = &dm_bht_write_callback_stub; |
| 315 | 320 |
| 316 status = dm_bht_initialize_entries(bht); | 321 status = dm_bht_initialize_entries(bht); |
| 317 if (status) | 322 if (status) |
| 318 goto bad_entries_alloc; | 323 goto bad_entries_alloc; |
| 319 | 324 |
| 320 return 0; | 325 return 0; |
| 321 | 326 |
| 322 bad_entries_alloc: | 327 bad_entries_alloc: |
| 323 while (bht->depth-- > 0) | 328 while (bht->depth-- > 0) |
| 324 kfree(bht->levels[bht->depth].entries); | 329 kfree(bht->levels[bht->depth].entries); |
| 330 kfree(bht->levels); |
| 325 bad_node_count: | 331 bad_node_count: |
| 326 kfree(bht->levels); | |
| 327 bad_level_alloc: | 332 bad_level_alloc: |
| 328 bad_block_count: | 333 bad_block_count: |
| 329 bad_depth: | 334 bad_depth: |
| 330 kfree(bht->root_digest); | 335 kfree(bht->root_digest); |
| 331 bad_root_digest_alloc: | 336 bad_root_digest_alloc: |
| 332 bad_digest_len: | 337 bad_digest_len: |
| 333 for (cpu = 0; cpu < nr_cpu_ids; ++cpu) | 338 for (cpu = 0; cpu < nr_cpu_ids; ++cpu) |
| 334 if (bht->hash_desc[cpu].tfm) | 339 if (bht->hash_desc[cpu].tfm) |
| 335 crypto_free_hash(bht->hash_desc[cpu].tfm); | 340 crypto_free_hash(bht->hash_desc[cpu].tfm); |
| 336 bad_hash_alg: | 341 bad_hash_alg: |
| (...skipping 848 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1185 if (!bht->root_digest) { | 1190 if (!bht->root_digest) { |
| 1186 DMERR("no root digest exists to export"); | 1191 DMERR("no root digest exists to export"); |
| 1187 if (available > 0) | 1192 if (available > 0) |
| 1188 *hexdigest = 0; | 1193 *hexdigest = 0; |
| 1189 return -1; | 1194 return -1; |
| 1190 } | 1195 } |
| 1191 dm_bht_bin_to_hex(bht->root_digest, hexdigest, bht->digest_size); | 1196 dm_bht_bin_to_hex(bht->root_digest, hexdigest, bht->digest_size); |
| 1192 return 0; | 1197 return 0; |
| 1193 } | 1198 } |
| 1194 EXPORT_SYMBOL(dm_bht_root_hexdigest); | 1199 EXPORT_SYMBOL(dm_bht_root_hexdigest); |
| OLD | NEW |