| OLD | NEW |
| 1 /* Copyright (c) 2010 The Chromium OS Authors. All rights reserved. | 1 /* Copyright (c) 2010 The Chromium OS Authors. All rights reserved. |
| 2 * Use of this source code is governed by a BSD-style license that can be | 2 * Use of this source code is governed by a BSD-style license that can be |
| 3 * found in the LICENSE file. | 3 * found in the LICENSE file. |
| 4 * | 4 * |
| 5 * Functions for loading a kernel from disk. | 5 * Functions for loading a kernel from disk. |
| 6 * (Firmware portion) | 6 * (Firmware portion) |
| 7 */ | 7 */ |
| 8 | 8 |
| 9 #include "vboot_kernel.h" | 9 #include "vboot_kernel.h" |
| 10 | 10 |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 60 /* Writes any changes for the GPT data back to the drive, then frees | 60 /* Writes any changes for the GPT data back to the drive, then frees |
| 61 * the buffers. | 61 * the buffers. |
| 62 * | 62 * |
| 63 * Returns 0 if successful, 1 if error. */ | 63 * Returns 0 if successful, 1 if error. */ |
| 64 int WriteAndFreeGptData(GptData* gptdata) { | 64 int WriteAndFreeGptData(GptData* gptdata) { |
| 65 | 65 |
| 66 uint64_t entries_sectors = TOTAL_ENTRIES_SIZE / gptdata->sector_bytes; | 66 uint64_t entries_sectors = TOTAL_ENTRIES_SIZE / gptdata->sector_bytes; |
| 67 | 67 |
| 68 if (gptdata->primary_header) { | 68 if (gptdata->primary_header) { |
| 69 if (gptdata->modified & GPT_MODIFIED_HEADER1) { | 69 if (gptdata->modified & GPT_MODIFIED_HEADER1) { |
| 70 debug("Updating GPT header 1\n"); | 70 VBDEBUG(("Updating GPT header 1\n")); |
| 71 if (0 != BootDeviceWriteLBA(1, 1, gptdata->primary_header)) | 71 if (0 != BootDeviceWriteLBA(1, 1, gptdata->primary_header)) |
| 72 return 1; | 72 return 1; |
| 73 } | 73 } |
| 74 Free(gptdata->primary_header); | 74 Free(gptdata->primary_header); |
| 75 } | 75 } |
| 76 | 76 |
| 77 if (gptdata->primary_entries) { | 77 if (gptdata->primary_entries) { |
| 78 if (gptdata->modified & GPT_MODIFIED_ENTRIES1) { | 78 if (gptdata->modified & GPT_MODIFIED_ENTRIES1) { |
| 79 debug("Updating GPT entries 1\n"); | 79 VBDEBUG(("Updating GPT entries 1\n")); |
| 80 if (0 != BootDeviceWriteLBA(2, entries_sectors, | 80 if (0 != BootDeviceWriteLBA(2, entries_sectors, |
| 81 gptdata->primary_entries)) | 81 gptdata->primary_entries)) |
| 82 return 1; | 82 return 1; |
| 83 } | 83 } |
| 84 Free(gptdata->primary_entries); | 84 Free(gptdata->primary_entries); |
| 85 } | 85 } |
| 86 | 86 |
| 87 if (gptdata->secondary_entries) { | 87 if (gptdata->secondary_entries) { |
| 88 if (gptdata->modified & GPT_MODIFIED_ENTRIES2) { | 88 if (gptdata->modified & GPT_MODIFIED_ENTRIES2) { |
| 89 debug("Updating GPT header 2\n"); | 89 VBDEBUG(("Updating GPT header 2\n")); |
| 90 if (0 != BootDeviceWriteLBA(gptdata->drive_sectors - entries_sectors - 1, | 90 if (0 != BootDeviceWriteLBA(gptdata->drive_sectors - entries_sectors - 1, |
| 91 entries_sectors, gptdata->secondary_entries)) | 91 entries_sectors, gptdata->secondary_entries)) |
| 92 return 1; | 92 return 1; |
| 93 } | 93 } |
| 94 Free(gptdata->secondary_entries); | 94 Free(gptdata->secondary_entries); |
| 95 } | 95 } |
| 96 | 96 |
| 97 if (gptdata->secondary_header) { | 97 if (gptdata->secondary_header) { |
| 98 if (gptdata->modified & GPT_MODIFIED_HEADER2) { | 98 if (gptdata->modified & GPT_MODIFIED_HEADER2) { |
| 99 debug("Updating GPT entries 2\n"); | 99 VBDEBUG(("Updating GPT entries 2\n")); |
| 100 if (0 != BootDeviceWriteLBA(gptdata->drive_sectors - 1, 1, | 100 if (0 != BootDeviceWriteLBA(gptdata->drive_sectors - 1, 1, |
| 101 gptdata->secondary_header)) | 101 gptdata->secondary_header)) |
| 102 return 1; | 102 return 1; |
| 103 } | 103 } |
| 104 Free(gptdata->secondary_header); | 104 Free(gptdata->secondary_header); |
| 105 } | 105 } |
| 106 | 106 |
| 107 /* Success */ | 107 /* Success */ |
| 108 return 0; | 108 return 0; |
| 109 } | 109 } |
| (...skipping 21 matching lines...) Expand all Loading... |
| 131 | 131 |
| 132 /* Clear output params in case we fail */ | 132 /* Clear output params in case we fail */ |
| 133 params->partition_number = 0; | 133 params->partition_number = 0; |
| 134 params->bootloader_address = 0; | 134 params->bootloader_address = 0; |
| 135 params->bootloader_size = 0; | 135 params->bootloader_size = 0; |
| 136 | 136 |
| 137 /* Let the TPM know if we're in recovery mode */ | 137 /* Let the TPM know if we're in recovery mode */ |
| 138 if (BOOT_FLAG_RECOVERY & params->boot_flags) { | 138 if (BOOT_FLAG_RECOVERY & params->boot_flags) { |
| 139 if (0 != RollbackKernelRecovery(BOOT_FLAG_DEVELOPER & params->boot_flags | 139 if (0 != RollbackKernelRecovery(BOOT_FLAG_DEVELOPER & params->boot_flags |
| 140 ? 1 : 0)) { | 140 ? 1 : 0)) { |
| 141 debug("Error setting up TPM for recovery kernel\n"); | 141 VBDEBUG(("Error setting up TPM for recovery kernel\n")); |
| 142 return LOAD_KERNEL_RECOVERY; | 142 return LOAD_KERNEL_RECOVERY; |
| 143 } | 143 } |
| 144 } | 144 } |
| 145 | 145 |
| 146 if (is_normal) { | 146 if (is_normal) { |
| 147 /* Read current kernel key index from TPM. Assumes TPM is already | 147 /* Read current kernel key index from TPM. Assumes TPM is already |
| 148 * initialized. */ | 148 * initialized. */ |
| 149 if (0 != RollbackKernelRead(&tpm_key_version, &tpm_kernel_version)) { | 149 if (0 != RollbackKernelRead(&tpm_key_version, &tpm_kernel_version)) { |
| 150 debug("Unable to get kernel versions from TPM\n"); | 150 VBDEBUG(("Unable to get kernel versions from TPM\n")); |
| 151 return LOAD_KERNEL_RECOVERY; | 151 return LOAD_KERNEL_RECOVERY; |
| 152 } | 152 } |
| 153 } else if (is_dev) { | 153 } else if (is_dev) { |
| 154 /* In developer mode, we ignore the kernel subkey, and just use | 154 /* In developer mode, we ignore the kernel subkey, and just use |
| 155 * the SHA-512 hash to verify the key block. */ | 155 * the SHA-512 hash to verify the key block. */ |
| 156 kernel_subkey = NULL; | 156 kernel_subkey = NULL; |
| 157 } | 157 } |
| 158 | 158 |
| 159 do { | 159 do { |
| 160 /* Read GPT data */ | 160 /* Read GPT data */ |
| 161 gpt.sector_bytes = (uint32_t)blba; | 161 gpt.sector_bytes = (uint32_t)blba; |
| 162 gpt.drive_sectors = params->ending_lba + 1; | 162 gpt.drive_sectors = params->ending_lba + 1; |
| 163 if (0 != AllocAndReadGptData(&gpt)) { | 163 if (0 != AllocAndReadGptData(&gpt)) { |
| 164 debug("Unable to read GPT data\n"); | 164 VBDEBUG(("Unable to read GPT data\n")); |
| 165 break; | 165 break; |
| 166 } | 166 } |
| 167 | 167 |
| 168 /* Initialize GPT library */ | 168 /* Initialize GPT library */ |
| 169 if (GPT_SUCCESS != GptInit(&gpt)) { | 169 if (GPT_SUCCESS != GptInit(&gpt)) { |
| 170 debug("Error parsing GPT\n"); | 170 VBDEBUG(("Error parsing GPT\n")); |
| 171 break; | 171 break; |
| 172 } | 172 } |
| 173 | 173 |
| 174 /* Allocate kernel header buffers */ | 174 /* Allocate kernel header buffers */ |
| 175 kbuf = (uint8_t*)Malloc(KBUF_SIZE); | 175 kbuf = (uint8_t*)Malloc(KBUF_SIZE); |
| 176 if (!kbuf) | 176 if (!kbuf) |
| 177 break; | 177 break; |
| 178 | 178 |
| 179 /* Loop over candidate kernel partitions */ | 179 /* Loop over candidate kernel partitions */ |
| 180 while (GPT_SUCCESS == GptNextKernelEntry(&gpt, &part_start, &part_size)) { | 180 while (GPT_SUCCESS == GptNextKernelEntry(&gpt, &part_start, &part_size)) { |
| 181 VbKeyBlockHeader* key_block; | 181 VbKeyBlockHeader* key_block; |
| 182 VbKernelPreambleHeader* preamble; | 182 VbKernelPreambleHeader* preamble; |
| 183 RSAPublicKey* data_key; | 183 RSAPublicKey* data_key; |
| 184 uint64_t key_version; | 184 uint64_t key_version; |
| 185 uint64_t body_offset; | 185 uint64_t body_offset; |
| 186 | 186 |
| 187 debug("Found kernel entry at %" PRIu64 " size %" PRIu64 "\n", | 187 VBDEBUG(("Found kernel entry at %" PRIu64 " size %" PRIu64 "\n", |
| 188 part_start, part_size); | 188 part_start, part_size)); |
| 189 | 189 |
| 190 /* Found at least one kernel partition. */ | 190 /* Found at least one kernel partition. */ |
| 191 found_partitions++; | 191 found_partitions++; |
| 192 | 192 |
| 193 /* Read the first part of the kernel partition */ | 193 /* Read the first part of the kernel partition */ |
| 194 if (part_size < kbuf_sectors) | 194 if (part_size < kbuf_sectors) |
| 195 continue; | 195 continue; |
| 196 if (0 != BootDeviceReadLBA(part_start, kbuf_sectors, kbuf)) | 196 if (0 != BootDeviceReadLBA(part_start, kbuf_sectors, kbuf)) |
| 197 continue; | 197 continue; |
| 198 | 198 |
| 199 /* Verify the key block */ | 199 /* Verify the key block */ |
| 200 key_block = (VbKeyBlockHeader*)kbuf; | 200 key_block = (VbKeyBlockHeader*)kbuf; |
| 201 if ((0 != KeyBlockVerify(key_block, KBUF_SIZE, kernel_subkey))) { | 201 if ((0 != KeyBlockVerify(key_block, KBUF_SIZE, kernel_subkey))) { |
| 202 debug("Verifying key block failed.\n"); | 202 VBDEBUG(("Verifying key block failed.\n")); |
| 203 continue; | 203 continue; |
| 204 } | 204 } |
| 205 | 205 |
| 206 /* Check the key block flags against the current boot mode */ | 206 /* Check the key block flags against the current boot mode */ |
| 207 if (!(key_block->key_block_flags && | 207 if (!(key_block->key_block_flags && |
| 208 ((BOOT_FLAG_DEVELOPER & params->boot_flags) ? | 208 ((BOOT_FLAG_DEVELOPER & params->boot_flags) ? |
| 209 KEY_BLOCK_FLAG_DEVELOPER_1 : KEY_BLOCK_FLAG_DEVELOPER_0))) { | 209 KEY_BLOCK_FLAG_DEVELOPER_1 : KEY_BLOCK_FLAG_DEVELOPER_0))) { |
| 210 debug("Developer flag mismatch.\n"); | 210 VBDEBUG(("Developer flag mismatch.\n")); |
| 211 continue; | 211 continue; |
| 212 } | 212 } |
| 213 if (!(key_block->key_block_flags && | 213 if (!(key_block->key_block_flags && |
| 214 ((BOOT_FLAG_RECOVERY & params->boot_flags) ? | 214 ((BOOT_FLAG_RECOVERY & params->boot_flags) ? |
| 215 KEY_BLOCK_FLAG_RECOVERY_1 : KEY_BLOCK_FLAG_RECOVERY_0))) { | 215 KEY_BLOCK_FLAG_RECOVERY_1 : KEY_BLOCK_FLAG_RECOVERY_0))) { |
| 216 debug("Recovery flag mismatch.\n"); | 216 VBDEBUG(("Recovery flag mismatch.\n")); |
| 217 continue; | 217 continue; |
| 218 } | 218 } |
| 219 | 219 |
| 220 /* Check for rollback of key version. Note this is implicitly | 220 /* Check for rollback of key version. Note this is implicitly |
| 221 * skipped in recovery and developer modes because those set | 221 * skipped in recovery and developer modes because those set |
| 222 * key_version=0 above. */ | 222 * key_version=0 above. */ |
| 223 key_version = key_block->data_key.key_version; | 223 key_version = key_block->data_key.key_version; |
| 224 if (key_version < tpm_key_version) { | 224 if (key_version < tpm_key_version) { |
| 225 debug("Key version too old.\n"); | 225 VBDEBUG(("Key version too old.\n")); |
| 226 continue; | 226 continue; |
| 227 } | 227 } |
| 228 | 228 |
| 229 /* Get the key for preamble/data verification from the key block */ | 229 /* Get the key for preamble/data verification from the key block */ |
| 230 data_key = PublicKeyToRSA(&key_block->data_key); | 230 data_key = PublicKeyToRSA(&key_block->data_key); |
| 231 if (!data_key) | 231 if (!data_key) |
| 232 continue; | 232 continue; |
| 233 | 233 |
| 234 /* Verify the preamble, which follows the key block */ | 234 /* Verify the preamble, which follows the key block */ |
| 235 preamble = (VbKernelPreambleHeader*)(kbuf + key_block->key_block_size); | 235 preamble = (VbKernelPreambleHeader*)(kbuf + key_block->key_block_size); |
| 236 if ((0 != VerifyKernelPreamble2(preamble, | 236 if ((0 != VerifyKernelPreamble2(preamble, |
| 237 KBUF_SIZE - key_block->key_block_size, | 237 KBUF_SIZE - key_block->key_block_size, |
| 238 data_key))) { | 238 data_key))) { |
| 239 debug("Preamble verification failed.\n"); | 239 VBDEBUG(("Preamble verification failed.\n")); |
| 240 RSAPublicKeyFree(data_key); | 240 RSAPublicKeyFree(data_key); |
| 241 continue; | 241 continue; |
| 242 } | 242 } |
| 243 | 243 |
| 244 /* Check for rollback of kernel version. Note this is implicitly | 244 /* Check for rollback of kernel version. Note this is implicitly |
| 245 * skipped in recovery and developer modes because those set | 245 * skipped in recovery and developer modes because those set |
| 246 * key_version=0 and kernel_version=0 above. */ | 246 * key_version=0 and kernel_version=0 above. */ |
| 247 if (key_version == tpm_key_version && | 247 if (key_version == tpm_key_version && |
| 248 preamble->kernel_version < tpm_kernel_version) { | 248 preamble->kernel_version < tpm_kernel_version) { |
| 249 debug("Kernel version too low.\n"); | 249 VBDEBUG(("Kernel version too low.\n")); |
| 250 RSAPublicKeyFree(data_key); | 250 RSAPublicKeyFree(data_key); |
| 251 continue; | 251 continue; |
| 252 } | 252 } |
| 253 | 253 |
| 254 debug("Kernel preamble is good.\n"); | 254 VBDEBUG(("Kernel preamble is good.\n")); |
| 255 | 255 |
| 256 /* Check for lowest key version from a valid header. */ | 256 /* Check for lowest key version from a valid header. */ |
| 257 if (lowest_key_version > key_version) { | 257 if (lowest_key_version > key_version) { |
| 258 lowest_key_version = key_version; | 258 lowest_key_version = key_version; |
| 259 lowest_kernel_version = preamble->kernel_version; | 259 lowest_kernel_version = preamble->kernel_version; |
| 260 } | 260 } |
| 261 else if (lowest_key_version == key_version && | 261 else if (lowest_key_version == key_version && |
| 262 lowest_kernel_version > preamble->kernel_version) { | 262 lowest_kernel_version > preamble->kernel_version) { |
| 263 lowest_kernel_version = preamble->kernel_version; | 263 lowest_kernel_version = preamble->kernel_version; |
| 264 } | 264 } |
| 265 | 265 |
| 266 /* If we already have a good kernel, no need to read another | 266 /* If we already have a good kernel, no need to read another |
| 267 * one; we only needed to look at the versions to check for | 267 * one; we only needed to look at the versions to check for |
| 268 * rollback. */ | 268 * rollback. */ |
| 269 if (-1 != good_partition) | 269 if (-1 != good_partition) |
| 270 continue; | 270 continue; |
| 271 | 271 |
| 272 /* Verify body load address matches what we expect */ | 272 /* Verify body load address matches what we expect */ |
| 273 if ((preamble->body_load_address != (size_t)params->kernel_buffer) && | 273 if ((preamble->body_load_address != (size_t)params->kernel_buffer) && |
| 274 !(params->boot_flags & BOOT_FLAG_SKIP_ADDR_CHECK)) { | 274 !(params->boot_flags & BOOT_FLAG_SKIP_ADDR_CHECK)) { |
| 275 debug("Wrong body load address.\n"); | 275 VBDEBUG(("Wrong body load address.\n")); |
| 276 RSAPublicKeyFree(data_key); | 276 RSAPublicKeyFree(data_key); |
| 277 continue; | 277 continue; |
| 278 } | 278 } |
| 279 | 279 |
| 280 /* Verify kernel body starts at a multiple of the sector size. */ | 280 /* Verify kernel body starts at a multiple of the sector size. */ |
| 281 body_offset = key_block->key_block_size + preamble->preamble_size; | 281 body_offset = key_block->key_block_size + preamble->preamble_size; |
| 282 if (0 != body_offset % blba) { | 282 if (0 != body_offset % blba) { |
| 283 debug("Kernel body not at multiple of sector size.\n"); | 283 VBDEBUG(("Kernel body not at multiple of sector size.\n")); |
| 284 RSAPublicKeyFree(data_key); | 284 RSAPublicKeyFree(data_key); |
| 285 continue; | 285 continue; |
| 286 } | 286 } |
| 287 | 287 |
| 288 /* Verify kernel body fits in the partition */ | 288 /* Verify kernel body fits in the partition */ |
| 289 if (body_offset + preamble->body_signature.data_size > | 289 if (body_offset + preamble->body_signature.data_size > |
| 290 part_size * blba) { | 290 part_size * blba) { |
| 291 debug("Kernel body doesn't fit in partition.\n"); | 291 VBDEBUG(("Kernel body doesn't fit in partition.\n")); |
| 292 RSAPublicKeyFree(data_key); | 292 RSAPublicKeyFree(data_key); |
| 293 continue; | 293 continue; |
| 294 } | 294 } |
| 295 | 295 |
| 296 /* Read the kernel data */ | 296 /* Read the kernel data */ |
| 297 if (0 != BootDeviceReadLBA( | 297 if (0 != BootDeviceReadLBA( |
| 298 part_start + (body_offset / blba), | 298 part_start + (body_offset / blba), |
| 299 (preamble->body_signature.data_size + blba - 1) / blba, | 299 (preamble->body_signature.data_size + blba - 1) / blba, |
| 300 params->kernel_buffer)) { | 300 params->kernel_buffer)) { |
| 301 debug("Unable to read kernel data.\n"); | 301 VBDEBUG(("Unable to read kernel data.\n")); |
| 302 RSAPublicKeyFree(data_key); | 302 RSAPublicKeyFree(data_key); |
| 303 continue; | 303 continue; |
| 304 } | 304 } |
| 305 | 305 |
| 306 /* Verify kernel data */ | 306 /* Verify kernel data */ |
| 307 if (0 != VerifyData((const uint8_t*)params->kernel_buffer, | 307 if (0 != VerifyData((const uint8_t*)params->kernel_buffer, |
| 308 &preamble->body_signature, data_key)) { | 308 &preamble->body_signature, data_key)) { |
| 309 debug("Kernel data verification failed.\n"); | 309 VBDEBUG(("Kernel data verification failed.\n")); |
| 310 RSAPublicKeyFree(data_key); | 310 RSAPublicKeyFree(data_key); |
| 311 continue; | 311 continue; |
| 312 } | 312 } |
| 313 | 313 |
| 314 /* Done with the kernel signing key, so can free it now */ | 314 /* Done with the kernel signing key, so can free it now */ |
| 315 RSAPublicKeyFree(data_key); | 315 RSAPublicKeyFree(data_key); |
| 316 | 316 |
| 317 /* If we're still here, the kernel is valid. */ | 317 /* If we're still here, the kernel is valid. */ |
| 318 /* Save the first good partition we find; that's the one we'll boot */ | 318 /* Save the first good partition we find; that's the one we'll boot */ |
| 319 debug("Partiton is good.\n"); | 319 VBDEBUG(("Partiton is good.\n")); |
| 320 /* TODO: GPT partitions start at 1, but cgptlib starts them at 0. | 320 /* TODO: GPT partitions start at 1, but cgptlib starts them at 0. |
| 321 * Adjust here, until cgptlib is fixed. */ | 321 * Adjust here, until cgptlib is fixed. */ |
| 322 good_partition = gpt.current_kernel + 1; | 322 good_partition = gpt.current_kernel + 1; |
| 323 params->partition_number = gpt.current_kernel + 1; | 323 params->partition_number = gpt.current_kernel + 1; |
| 324 params->bootloader_address = preamble->bootloader_address; | 324 params->bootloader_address = preamble->bootloader_address; |
| 325 params->bootloader_size = preamble->bootloader_size; | 325 params->bootloader_size = preamble->bootloader_size; |
| 326 /* If we're in developer or recovery mode, there's no rollback | 326 /* If we're in developer or recovery mode, there's no rollback |
| 327 * protection, so we can stop at the first valid kernel. */ | 327 * protection, so we can stop at the first valid kernel. */ |
| 328 if (!is_normal) { | 328 if (!is_normal) { |
| 329 debug("Boot_flags = !is_normal\n"); | 329 VBDEBUG(("Boot_flags = !is_normal\n")); |
| 330 break; | 330 break; |
| 331 } | 331 } |
| 332 | 332 |
| 333 /* Otherwise, we're in normal boot mode, so we do care about the | 333 /* Otherwise, we're in normal boot mode, so we do care about the |
| 334 * key index in the TPM. If the good partition's key version is | 334 * key index in the TPM. If the good partition's key version is |
| 335 * the same as the tpm, then the TPM doesn't need updating; we | 335 * the same as the tpm, then the TPM doesn't need updating; we |
| 336 * can stop now. Otherwise, we'll check all the other headers | 336 * can stop now. Otherwise, we'll check all the other headers |
| 337 * to see if they contain a newer key. */ | 337 * to see if they contain a newer key. */ |
| 338 if (key_version == tpm_key_version && | 338 if (key_version == tpm_key_version && |
| 339 preamble->kernel_version == tpm_kernel_version) { | 339 preamble->kernel_version == tpm_kernel_version) { |
| 340 debug("Same key version\n"); | 340 VBDEBUG(("Same key version\n")); |
| 341 break; | 341 break; |
| 342 } | 342 } |
| 343 } /* while(GptNextKernelEntry) */ | 343 } /* while(GptNextKernelEntry) */ |
| 344 } while(0); | 344 } while(0); |
| 345 | 345 |
| 346 /* Free kernel buffer */ | 346 /* Free kernel buffer */ |
| 347 if (kbuf) | 347 if (kbuf) |
| 348 Free(kbuf); | 348 Free(kbuf); |
| 349 | 349 |
| 350 /* Write and free GPT data */ | 350 /* Write and free GPT data */ |
| 351 WriteAndFreeGptData(&gpt); | 351 WriteAndFreeGptData(&gpt); |
| 352 | 352 |
| 353 /* Handle finding a good partition */ | 353 /* Handle finding a good partition */ |
| 354 if (good_partition >= 0) { | 354 if (good_partition >= 0) { |
| 355 debug("Good_partition >= 0\n"); | 355 VBDEBUG(("Good_partition >= 0\n")); |
| 356 | 356 |
| 357 /* See if we need to update the TPM */ | 357 /* See if we need to update the TPM */ |
| 358 if (is_normal) { | 358 if (is_normal) { |
| 359 /* We only update the TPM in normal boot mode. In developer | 359 /* We only update the TPM in normal boot mode. In developer |
| 360 * mode, the kernel is self-signed by the developer, so we can't | 360 * mode, the kernel is self-signed by the developer, so we can't |
| 361 * trust the key version and wouldn't want to roll the TPM | 361 * trust the key version and wouldn't want to roll the TPM |
| 362 * forward. In recovery mode, the TPM stays PP-unlocked, so | 362 * forward. In recovery mode, the TPM stays PP-unlocked, so |
| 363 * anything we write gets blown away by the firmware when we go | 363 * anything we write gets blown away by the firmware when we go |
| 364 * back to normal mode. */ | 364 * back to normal mode. */ |
| 365 debug("Boot_flags = is_normal\n"); | 365 VBDEBUG(("Boot_flags = is_normal\n")); |
| 366 if ((lowest_key_version > tpm_key_version) || | 366 if ((lowest_key_version > tpm_key_version) || |
| 367 (lowest_key_version == tpm_key_version && | 367 (lowest_key_version == tpm_key_version && |
| 368 lowest_kernel_version > tpm_kernel_version)) { | 368 lowest_kernel_version > tpm_kernel_version)) { |
| 369 if (0 != RollbackKernelWrite((uint16_t)lowest_key_version, | 369 if (0 != RollbackKernelWrite((uint16_t)lowest_key_version, |
| 370 (uint16_t)lowest_kernel_version)) { | 370 (uint16_t)lowest_kernel_version)) { |
| 371 debug("Error writing kernel versions to TPM.\n"); | 371 VBDEBUG(("Error writing kernel versions to TPM.\n")); |
| 372 return LOAD_KERNEL_RECOVERY; | 372 return LOAD_KERNEL_RECOVERY; |
| 373 } | 373 } |
| 374 } | 374 } |
| 375 } | 375 } |
| 376 | 376 |
| 377 /* Lock the kernel versions, since we're about to boot the kernel */ | 377 /* Lock the kernel versions, since we're about to boot the kernel */ |
| 378 if (0 != RollbackKernelLock()) { | 378 if (0 != RollbackKernelLock()) { |
| 379 debug("Error locking kernel versions.\n"); | 379 VBDEBUG(("Error locking kernel versions.\n")); |
| 380 return LOAD_KERNEL_RECOVERY; | 380 return LOAD_KERNEL_RECOVERY; |
| 381 } | 381 } |
| 382 | 382 |
| 383 /* Success! */ | 383 /* Success! */ |
| 384 return LOAD_KERNEL_SUCCESS; | 384 return LOAD_KERNEL_SUCCESS; |
| 385 } | 385 } |
| 386 | 386 |
| 387 // Handle error cases | 387 // Handle error cases |
| 388 if (found_partitions) | 388 if (found_partitions) |
| 389 return LOAD_KERNEL_INVALID; | 389 return LOAD_KERNEL_INVALID; |
| 390 else | 390 else |
| 391 return LOAD_KERNEL_NOT_FOUND; | 391 return LOAD_KERNEL_NOT_FOUND; |
| 392 } | 392 } |
| OLD | NEW |