| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2010 The Chromium OS Authors <chromium-os-dev@chromium.org> | 2 * Copyright (C) 2010 The Chromium OS Authors <chromium-os-dev@chromium.org> |
| 3 * | 3 * |
| 4 * Device-Mapper block hash tree interface. | 4 * Device-Mapper block hash tree interface. |
| 5 * See Documentation/device-mapper/dm-bht.txt for details. | 5 * See Documentation/device-mapper/dm-bht.txt for details. |
| 6 * | 6 * |
| 7 * This file is released under the GPL. | 7 * This file is released under the GPL. |
| 8 */ | 8 */ |
| 9 | 9 |
| 10 #include <asm/atomic.h> | 10 #include <asm/atomic.h> |
| (...skipping 186 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 197 * @bht: pointer to a dm_bht_create()d bht | 197 * @bht: pointer to a dm_bht_create()d bht |
| 198 * @depth: tree depth without the root; including block hashes | 198 * @depth: tree depth without the root; including block hashes |
| 199 * @block_count:the number of block hashes / tree leaves | 199 * @block_count:the number of block hashes / tree leaves |
| 200 * @alg_name: crypto hash algorithm name | 200 * @alg_name: crypto hash algorithm name |
| 201 * | 201 * |
| 202 * Returns 0 on success. | 202 * Returns 0 on success. |
| 203 * | 203 * |
| 204 * Callers can offset into devices by storing the data in the io callbacks. | 204 * Callers can offset into devices by storing the data in the io callbacks. |
| 205 * TODO(wad) bust up into smaller helpers | 205 * TODO(wad) bust up into smaller helpers |
| 206 */ | 206 */ |
| 207 int dm_bht_create(struct dm_bht *bht, unsigned int depth, | 207 int dm_bht_create(struct dm_bht *bht, unsigned int block_count, |
| 208 » » unsigned int block_count, const char *alg_name) | 208 » » const char *alg_name) |
| 209 { | 209 { |
| 210 int status = 0; | 210 int status = 0; |
| 211 int cpu = 0; | 211 int cpu = 0; |
| 212 | 212 |
| 213 /* Allocate enough crypto contexts to be able to perform verifies | 213 /* Allocate enough crypto contexts to be able to perform verifies |
| 214 * on all available CPUs. | 214 * on all available CPUs. |
| 215 */ | 215 */ |
| 216 bht->hash_desc = (struct hash_desc *) | 216 bht->hash_desc = (struct hash_desc *) |
| 217 kcalloc(nr_cpu_ids, sizeof(struct hash_desc), GFP_KERNEL); | 217 kcalloc(nr_cpu_ids, sizeof(struct hash_desc), GFP_KERNEL); |
| 218 if (!bht->hash_desc) { | 218 if (!bht->hash_desc) { |
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 275 */ | 275 */ |
| 276 bht->node_count = 1 << bht->node_count_shift; | 276 bht->node_count = 1 << bht->node_count_shift; |
| 277 | 277 |
| 278 /* This is unlikely to happen, but with 64k pages, who knows. */ | 278 /* This is unlikely to happen, but with 64k pages, who knows. */ |
| 279 if (bht->node_count > UINT_MAX / bht->digest_size) { | 279 if (bht->node_count > UINT_MAX / bht->digest_size) { |
| 280 DMERR("node_count * hash_len exceeds UINT_MAX!"); | 280 DMERR("node_count * hash_len exceeds UINT_MAX!"); |
| 281 status = -EINVAL; | 281 status = -EINVAL; |
| 282 goto bad_node_count; | 282 goto bad_node_count; |
| 283 } | 283 } |
| 284 | 284 |
| 285 » /* if depth == 0, create a "regular" trie with a single root block */ | 285 » bht->depth = DIV_ROUND_UP(fls(block_count - 1), bht->node_count_shift); |
| 286 » if (depth == 0) | 286 » DMDEBUG("Setting depth to %u.", bht->depth); |
| 287 » » depth = DIV_ROUND_UP(fls(block_count - 1), | |
| 288 » » » » bht->node_count_shift); | |
| 289 » if (depth > UINT_MAX / sizeof(struct dm_bht_level)) { | |
| 290 » » DMERR("bht depth is invalid: %u", depth); | |
| 291 » » status = -EINVAL; | |
| 292 » » goto bad_depth; | |
| 293 » } | |
| 294 » DMDEBUG("Setting depth to %u.", depth); | |
| 295 » bht->depth = depth; | |
| 296 | 287 |
| 297 /* Ensure that we can safely shift by this value. */ | 288 /* Ensure that we can safely shift by this value. */ |
| 298 » if (depth * bht->node_count_shift >= sizeof(unsigned int) * 8) { | 289 » if (bht->depth * bht->node_count_shift >= sizeof(unsigned int) * 8) { |
| 299 DMERR("specified depth and node_count_shift is too large"); | 290 DMERR("specified depth and node_count_shift is too large"); |
| 300 status = -EINVAL; | 291 status = -EINVAL; |
| 301 goto bad_node_count; | 292 goto bad_node_count; |
| 302 } | 293 } |
| 303 | 294 |
| 304 /* Allocate levels. Each level of the tree may have an arbitrary number | 295 /* Allocate levels. Each level of the tree may have an arbitrary number |
| 305 * of dm_bht_entry structs. Each entry contains node_count nodes. | 296 * of dm_bht_entry structs. Each entry contains node_count nodes. |
| 306 * Each node in the tree is a cryptographic digest of either node_count | 297 * Each node in the tree is a cryptographic digest of either node_count |
| 307 * nodes on the subsequent level or of a specific block on disk. | 298 * nodes on the subsequent level or of a specific block on disk. |
| 308 */ | 299 */ |
| 309 bht->levels = (struct dm_bht_level *) | 300 bht->levels = (struct dm_bht_level *) |
| 310 » » » kcalloc(depth, sizeof(struct dm_bht_level), GFP_KERNEL); | 301 » » » kcalloc(bht->depth, |
| 302 » » » » sizeof(struct dm_bht_level), GFP_KERNEL); |
| 311 if (!bht->levels) { | 303 if (!bht->levels) { |
| 312 DMERR("failed to allocate tree levels"); | 304 DMERR("failed to allocate tree levels"); |
| 313 status = -ENOMEM; | 305 status = -ENOMEM; |
| 314 goto bad_level_alloc; | 306 goto bad_level_alloc; |
| 315 } | 307 } |
| 316 | 308 |
| 317 /* Setup callback stubs */ | 309 /* Setup callback stubs */ |
| 318 bht->read_cb = &dm_bht_read_callback_stub; | 310 bht->read_cb = &dm_bht_read_callback_stub; |
| 319 bht->write_cb = &dm_bht_write_callback_stub; | 311 bht->write_cb = &dm_bht_write_callback_stub; |
| 320 | 312 |
| 321 status = dm_bht_initialize_entries(bht); | 313 status = dm_bht_initialize_entries(bht); |
| 322 if (status) | 314 if (status) |
| 323 goto bad_entries_alloc; | 315 goto bad_entries_alloc; |
| 324 | 316 |
| 325 return 0; | 317 return 0; |
| 326 | 318 |
| 327 bad_entries_alloc: | 319 bad_entries_alloc: |
| 328 while (bht->depth-- > 0) | 320 while (bht->depth-- > 0) |
| 329 kfree(bht->levels[bht->depth].entries); | 321 kfree(bht->levels[bht->depth].entries); |
| 330 kfree(bht->levels); | 322 kfree(bht->levels); |
| 331 bad_node_count: | 323 bad_node_count: |
| 332 bad_level_alloc: | 324 bad_level_alloc: |
| 333 bad_block_count: | 325 bad_block_count: |
| 334 bad_depth: | |
| 335 kfree(bht->root_digest); | 326 kfree(bht->root_digest); |
| 336 bad_root_digest_alloc: | 327 bad_root_digest_alloc: |
| 337 bad_digest_len: | 328 bad_digest_len: |
| 338 for (cpu = 0; cpu < nr_cpu_ids; ++cpu) | 329 for (cpu = 0; cpu < nr_cpu_ids; ++cpu) |
| 339 if (bht->hash_desc[cpu].tfm) | 330 if (bht->hash_desc[cpu].tfm) |
| 340 crypto_free_hash(bht->hash_desc[cpu].tfm); | 331 crypto_free_hash(bht->hash_desc[cpu].tfm); |
| 341 bad_hash_alg: | 332 bad_hash_alg: |
| 342 kfree(bht->hash_desc); | 333 kfree(bht->hash_desc); |
| 343 return status; | 334 return status; |
| 344 } | 335 } |
| (...skipping 845 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1190 if (!bht->root_digest) { | 1181 if (!bht->root_digest) { |
| 1191 DMERR("no root digest exists to export"); | 1182 DMERR("no root digest exists to export"); |
| 1192 if (available > 0) | 1183 if (available > 0) |
| 1193 *hexdigest = 0; | 1184 *hexdigest = 0; |
| 1194 return -1; | 1185 return -1; |
| 1195 } | 1186 } |
| 1196 dm_bht_bin_to_hex(bht->root_digest, hexdigest, bht->digest_size); | 1187 dm_bht_bin_to_hex(bht->root_digest, hexdigest, bht->digest_size); |
| 1197 return 0; | 1188 return 0; |
| 1198 } | 1189 } |
| 1199 EXPORT_SYMBOL(dm_bht_root_hexdigest); | 1190 EXPORT_SYMBOL(dm_bht_root_hexdigest); |
| OLD | NEW |