OLD | NEW |
---|---|
1 /* Copyright (c) 2010 The Chromium OS Authors. All rights reserved. | 1 /* Copyright (c) 2010 The Chromium OS Authors. All rights reserved. |
2 * Use of this source code is governed by a BSD-style license that can be | 2 * Use of this source code is governed by a BSD-style license that can be |
3 * found in the LICENSE file. | 3 * found in the LICENSE file. |
4 * | 4 * |
5 * Functions for querying, manipulating and locking rollback indices | 5 * Functions for querying, manipulating and locking rollback indices |
6 * stored in the TPM NVRAM. | 6 * stored in the TPM NVRAM. |
7 */ | 7 */ |
8 | 8 |
9 #include "rollback_index.h" | 9 #include "rollback_index.h" |
10 | 10 |
11 #include "tlcl.h" | 11 #include "tlcl.h" |
12 #include "tss_constants.h" | 12 #include "tss_constants.h" |
13 #include "utility.h" | 13 #include "utility.h" |
14 | 14 |
15 static int g_rollback_recovery_mode = 0; | 15 static int g_rollback_recovery_mode = 0; |
16 | 16 |
17 /* disable MSVC warning on const logical expression (as in } while(0);) */ | 17 /* disable MSVC warning on const logical expression (as in } while(0);) */ |
18 __pragma(warning (disable: 4127)) | 18 __pragma(warning (disable: 4127)) |
19 | 19 |
20 #define RETURN_ON_FAILURE(tpm_command) do { \ | 20 #define RETURN_ON_FAILURE(tpm_command) do { \ |
21 uint32_t result; \ | 21 uint32_t result; \ |
22 if ((result = (tpm_command)) != TPM_SUCCESS) { \ | 22 if ((result = (tpm_command)) != TPM_SUCCESS) { \ |
23 VBDEBUG(("Rollback: %08x returned by " #tpm_command "\n", (int)result)); \ | 23 VBDEBUG(("Rollback: %08x returned by " #tpm_command "\n", (int)result)); \ |
24 return result; \ | 24 return result; \ |
25 } \ | 25 } \ |
26 } while (0) | 26 } while (0) |
27 | 27 |
28 | |
28 uint32_t TPMClearAndReenable(void) { | 29 uint32_t TPMClearAndReenable(void) { |
29 VBDEBUG(("TPM: Clear and re-enable\n")); | 30 VBDEBUG(("TPM: Clear and re-enable\n")); |
30 RETURN_ON_FAILURE(TlclForceClear()); | 31 RETURN_ON_FAILURE(TlclForceClear()); |
31 RETURN_ON_FAILURE(TlclSetEnable()); | 32 RETURN_ON_FAILURE(TlclSetEnable()); |
32 RETURN_ON_FAILURE(TlclSetDeactivated(0)); | 33 RETURN_ON_FAILURE(TlclSetDeactivated(0)); |
33 | 34 |
34 return TPM_SUCCESS; | 35 return TPM_SUCCESS; |
35 } | 36 } |
36 | 37 |
38 | |
37 /* Like TlclWrite(), but checks for write errors due to hitting the 64-write | 39 /* Like TlclWrite(), but checks for write errors due to hitting the 64-write |
38 * limit and clears the TPM when that happens. This can only happen when the | 40 * limit and clears the TPM when that happens. This can only happen when the |
39 * TPM is unowned, so it is OK to clear it (and we really have no choice). | 41 * TPM is unowned, so it is OK to clear it (and we really have no choice). |
40 * This is not expected to happen frequently, but it could happen. | 42 * This is not expected to happen frequently, but it could happen. |
41 */ | 43 */ |
42 static uint32_t SafeWrite(uint32_t index, uint8_t* data, uint32_t length) { | 44 static uint32_t SafeWrite(uint32_t index, const void* data, uint32_t length) { |
43 uint32_t result = TlclWrite(index, data, length); | 45 uint32_t result = TlclWrite(index, data, length); |
44 if (result == TPM_E_MAXNVWRITES) { | 46 if (result == TPM_E_MAXNVWRITES) { |
45 RETURN_ON_FAILURE(TPMClearAndReenable()); | 47 RETURN_ON_FAILURE(TPMClearAndReenable()); |
46 return TlclWrite(index, data, length); | 48 return TlclWrite(index, data, length); |
47 } else { | 49 } else { |
48 return result; | 50 return result; |
49 } | 51 } |
50 } | 52 } |
51 | 53 |
54 | |
52 /* Similarly to SafeWrite(), this ensures we don't fail a DefineSpace because | 55 /* Similarly to SafeWrite(), this ensures we don't fail a DefineSpace because |
53 * we hit the TPM write limit. This is even less likely to happen than with | 56 * we hit the TPM write limit. This is even less likely to happen than with |
54 * writes because we only define spaces once at initialization, but we'd rather | 57 * writes because we only define spaces once at initialization, but we'd rather |
55 * be paranoid about this. | 58 * be paranoid about this. |
56 */ | 59 */ |
57 static uint32_t SafeDefineSpace(uint32_t index, uint32_t perm, uint32_t size) { | 60 static uint32_t SafeDefineSpace(uint32_t index, uint32_t perm, uint32_t size) { |
58 uint32_t result = TlclDefineSpace(index, perm, size); | 61 uint32_t result = TlclDefineSpace(index, perm, size); |
59 if (result == TPM_E_MAXNVWRITES) { | 62 if (result == TPM_E_MAXNVWRITES) { |
60 RETURN_ON_FAILURE(TPMClearAndReenable()); | 63 RETURN_ON_FAILURE(TPMClearAndReenable()); |
61 return TlclDefineSpace(index, perm, size); | 64 return TlclDefineSpace(index, perm, size); |
62 } else { | 65 } else { |
63 return result; | 66 return result; |
64 } | 67 } |
65 } | 68 } |
66 | 69 |
67 static uint32_t InitializeKernelVersionsSpaces(void) { | 70 |
68 RETURN_ON_FAILURE(SafeDefineSpace(KERNEL_VERSIONS_NV_INDEX, | 71 /* Functions to read and write firmware and kernel spaces. */ |
69 TPM_NV_PER_PPWRITE, KERNEL_SPACE_SIZE)); | 72 static uint32_t ReadSpaceFirmware(RollbackSpaceFirmware *rsf) { |
70 RETURN_ON_FAILURE(SafeWrite(KERNEL_VERSIONS_NV_INDEX, KERNEL_SPACE_INIT_DATA, | 73 return TlclRead(FIRMWARE_NV_INDEX, rsf, sizeof(RollbackSpaceFirmware)); |
Luigi Semenzato
2010/08/12 01:12:30
I think Google's convention is to put the star nea
gauravsh
2010/08/12 02:15:51
The style guide doesn't enforce any. convention bu
Randall Spangler
2010/08/12 20:00:47
Done.
| |
71 KERNEL_SPACE_SIZE)); | |
72 return TPM_SUCCESS; | |
73 } | 74 } |
74 | 75 |
75 /* When the return value is TPM_SUCCESS, this function sets *|initialized| to 1 | 76 static uint32_t WriteSpaceFirmware(const RollbackSpaceFirmware *rsf) { |
76 * if the spaces have been fully initialized, to 0 if not. Otherwise | 77 return SafeWrite(FIRMWARE_NV_INDEX, rsf, sizeof(RollbackSpaceFirmware)); |
77 * *|initialized| is not changed. | |
78 */ | |
79 uint32_t GetSpacesInitialized(int* initialized) { | |
80 uint32_t space_holder; | |
81 uint32_t result; | |
82 result = TlclRead(TPM_IS_INITIALIZED_NV_INDEX, | |
83 (uint8_t*) &space_holder, sizeof(space_holder)); | |
84 switch (result) { | |
85 case TPM_SUCCESS: | |
86 *initialized = 1; | |
87 break; | |
88 case TPM_E_BADINDEX: | |
89 *initialized = 0; | |
90 result = TPM_SUCCESS; | |
91 break; | |
92 } | |
93 return result; | |
94 } | 78 } |
95 | 79 |
96 /* Creates the NVRAM spaces, and sets their initial values as needed. | 80 static uint32_t ReadSpaceKernel(RollbackSpaceKernel *rsk) { |
97 */ | 81 return TlclRead(KERNEL_NV_INDEX, rsk, sizeof(RollbackSpaceKernel)); |
98 static uint32_t InitializeSpaces(void) { | 82 } |
99 uint32_t zero = 0; | 83 |
100 uint32_t firmware_perm = TPM_NV_PER_GLOBALLOCK | TPM_NV_PER_PPWRITE; | 84 static uint32_t WriteSpaceKernel(const RollbackSpaceKernel *rsk) { |
85 return SafeWrite(KERNEL_NV_INDEX, rsk, sizeof(RollbackSpaceKernel)); | |
86 } | |
87 | |
88 | |
89 | |
90 /* Creates the NVRAM spaces, and sets their initial values as needed. */ | |
91 static uint32_t InitializeSpaces(RollbackSpaceFirmware *rsf, | |
92 RollbackSpaceKernel *rsk) { | |
93 static const RollbackSpaceFirmware rsf_init = { | |
94 ROLLBACK_SPACE_FIRMWARE_VERSION, 0, 0, 0}; | |
95 static const RollbackSpaceKernel rsk_init = { | |
96 ROLLBACK_SPACE_KERNEL_VERSION, ROLLBACK_SPACE_KERNEL_UID, 0, 0}; | |
101 uint8_t nvlocked = 0; | 97 uint8_t nvlocked = 0; |
102 | 98 |
103 VBDEBUG(("TPM: Initializing spaces\n")); | 99 VBDEBUG(("TPM: Initializing spaces\n")); |
104 | 100 |
105 /* Force the TPM clear, in case it previously had an owner, so that we can | 101 /* The TPM will not enforce the NV authorization restrictions until the |
106 * redefine the NVRAM spaces. */ | |
107 RETURN_ON_FAILURE(TPMClearAndReenable()); | |
Luigi Semenzato
2010/08/12 01:12:30
We just discussed this in person. We have this co
| |
108 | |
109 /* The TPM will not enforce the NV authorization restrictions until the | |
110 * execution of a TPM_NV_DefineSpace with the handle of TPM_NV_INDEX_LOCK. | 102 * execution of a TPM_NV_DefineSpace with the handle of TPM_NV_INDEX_LOCK. |
111 * Create that space if it doesn't already exist. */ | 103 * Create that space if it doesn't already exist. */ |
112 RETURN_ON_FAILURE(TlclGetFlags(NULL, NULL, &nvlocked)); | 104 RETURN_ON_FAILURE(TlclGetFlags(NULL, NULL, &nvlocked)); |
113 VBDEBUG(("TPM: nvlocked=%d\n", nvlocked)); | 105 VBDEBUG(("TPM: nvlocked=%d\n", nvlocked)); |
114 if (!nvlocked) { | 106 if (!nvlocked) { |
115 VBDEBUG(("TPM: Enabling NV locking\n")); | 107 VBDEBUG(("TPM: Enabling NV locking\n")); |
116 RETURN_ON_FAILURE(TlclSetNvLocked()); | 108 RETURN_ON_FAILURE(TlclSetNvLocked()); |
117 } | 109 } |
118 | 110 |
119 RETURN_ON_FAILURE(SafeDefineSpace(FIRMWARE_VERSIONS_NV_INDEX, | 111 /* Initialize the firmware and kernel spaces */ |
120 firmware_perm, sizeof(uint32_t))); | 112 Memcpy(rsf, &rsf_init, sizeof(RollbackSpaceFirmware)); |
121 RETURN_ON_FAILURE(SafeWrite(FIRMWARE_VERSIONS_NV_INDEX, | 113 /* Initialize the backup copy of the kernel space to the same data |
122 (uint8_t*) &zero, sizeof(uint32_t))); | 114 * as the kernel space */ |
115 Memcpy(&rsf->kernel_backup, &rsk_init, sizeof(RollbackSpaceKernel)); | |
116 Memcpy(rsk, &rsk_init, sizeof(RollbackSpaceKernel)); | |
123 | 117 |
124 RETURN_ON_FAILURE(InitializeKernelVersionsSpaces()); | 118 /* Define and set firmware and kernel spaces */ |
125 | 119 RETURN_ON_FAILURE(SafeDefineSpace(FIRMWARE_NV_INDEX, |
126 /* The space KERNEL_VERSIONS_BACKUP_NV_INDEX is used to protect the kernel | 120 TPM_NV_PER_GLOBALLOCK | TPM_NV_PER_PPWRITE, |
127 * versions. The content of space KERNEL_MUST_USE_BACKUP determines whether | 121 sizeof(RollbackSpaceFirmware))); |
128 * only the backup value should be trusted. | 122 RETURN_ON_FAILURE(WriteSpaceFirmware(rsf)); |
129 */ | 123 RETURN_ON_FAILURE(SafeDefineSpace(KERNEL_NV_INDEX, TPM_NV_PER_PPWRITE, |
130 RETURN_ON_FAILURE(SafeDefineSpace(KERNEL_VERSIONS_BACKUP_NV_INDEX, | 124 sizeof(RollbackSpaceKernel))); |
131 firmware_perm, sizeof(uint32_t))); | 125 RETURN_ON_FAILURE(WriteSpaceKernel(rsk)); |
Luigi Semenzato
2010/08/12 01:12:30
Let's see if I have this right. You removed the T
Randall Spangler
2010/08/12 20:00:47
Either way, we'll need to fix it in recovery mode.
| |
132 RETURN_ON_FAILURE(SafeWrite(KERNEL_VERSIONS_BACKUP_NV_INDEX, | |
133 (uint8_t*) &zero, sizeof(uint32_t))); | |
134 RETURN_ON_FAILURE(SafeDefineSpace(KERNEL_MUST_USE_BACKUP_NV_INDEX, | |
135 firmware_perm, sizeof(uint32_t))); | |
136 RETURN_ON_FAILURE(SafeWrite(KERNEL_MUST_USE_BACKUP_NV_INDEX, | |
137 (uint8_t*) &zero, sizeof(uint32_t))); | |
138 RETURN_ON_FAILURE(SafeDefineSpace(DEVELOPER_MODE_NV_INDEX, | |
139 firmware_perm, sizeof(uint32_t))); | |
140 RETURN_ON_FAILURE(SafeWrite(DEVELOPER_MODE_NV_INDEX, | |
141 (uint8_t*) &zero, sizeof(uint32_t))); | |
142 | |
143 /* The space TPM_IS_INITIALIZED_NV_INDEX is used to indicate that the TPM | |
144 * initialization has completed. Without it we cannot be sure that the last | |
145 * space to be created was also initialized (power could have been lost right | |
146 * after its creation). | |
147 */ | |
148 RETURN_ON_FAILURE(SafeDefineSpace(TPM_IS_INITIALIZED_NV_INDEX, | |
149 firmware_perm, sizeof(uint32_t))); | |
150 return TPM_SUCCESS; | 126 return TPM_SUCCESS; |
151 } | 127 } |
152 | 128 |
153 static uint32_t SetDistrustKernelSpaceAtNextBoot(uint32_t distrust) { | |
154 uint32_t must_use_backup; | |
155 RETURN_ON_FAILURE(TlclRead(KERNEL_MUST_USE_BACKUP_NV_INDEX, | |
156 (uint8_t*) &must_use_backup, sizeof(uint32_t))); | |
157 if (must_use_backup != distrust) { | |
158 RETURN_ON_FAILURE(SafeWrite(KERNEL_MUST_USE_BACKUP_NV_INDEX, | |
159 (uint8_t*) &distrust, sizeof(uint32_t))); | |
160 } | |
161 return TPM_SUCCESS; | |
162 } | |
163 | |
164 /* Checks if the kernel version space has been mucked with. If it has, | |
165 * reconstructs it using the backup value. | |
166 */ | |
167 uint32_t RecoverKernelSpace(void) { | |
168 uint32_t perms = 0; | |
169 uint8_t buffer[KERNEL_SPACE_SIZE]; | |
170 uint32_t backup_combined_versions; | |
171 uint32_t must_use_backup; | |
172 uint32_t zero = 0; | |
173 | |
174 VBDEBUG(("TPM: RecoverKernelSpace()\n")); | |
175 | |
176 RETURN_ON_FAILURE(TlclRead(KERNEL_MUST_USE_BACKUP_NV_INDEX, | |
177 (uint8_t*) &must_use_backup, sizeof(uint32_t))); | |
178 /* must_use_backup is true if the previous boot entered recovery mode. */ | |
179 | |
180 VBDEBUG(("TPM: must_use_backup = %d\n", must_use_backup)); | |
181 | |
182 /* If we can't read the kernel space, or it has the wrong permission, or it | |
183 * doesn't contain the right identifier, we give up. This will need to be | |
184 * fixed by the recovery kernel. We have to worry about this because at any | |
185 * time (even with PP turned off) the TPM owner can remove and redefine a | |
186 * PP-protected space (but not write to it). | |
187 */ | |
188 RETURN_ON_FAILURE(TlclRead(KERNEL_VERSIONS_NV_INDEX, (uint8_t*) &buffer, | |
189 KERNEL_SPACE_SIZE)); | |
190 RETURN_ON_FAILURE(TlclGetPermissions(KERNEL_VERSIONS_NV_INDEX, &perms)); | |
191 if (perms != TPM_NV_PER_PPWRITE || | |
192 Memcmp(buffer + sizeof(uint32_t), KERNEL_SPACE_UID, | |
193 KERNEL_SPACE_UID_SIZE) != 0) { | |
194 return TPM_E_CORRUPTED_STATE; | |
195 } | |
196 | |
197 if (must_use_backup) { | |
198 /* We must use the backup space because in the preceding boot cycle the | |
199 * primary space was left unlocked and cannot be trusted. | |
200 */ | |
201 RETURN_ON_FAILURE(TlclRead(KERNEL_VERSIONS_BACKUP_NV_INDEX, | |
202 (uint8_t*) &backup_combined_versions, | |
203 sizeof(uint32_t))); | |
204 RETURN_ON_FAILURE(SafeWrite(KERNEL_VERSIONS_NV_INDEX, | |
205 (uint8_t*) &backup_combined_versions, | |
206 sizeof(uint32_t))); | |
207 RETURN_ON_FAILURE(SafeWrite(KERNEL_MUST_USE_BACKUP_NV_INDEX, | |
208 (uint8_t*) &zero, 0)); | |
209 } | |
210 return TPM_SUCCESS; | |
211 } | |
212 | |
213 static uint32_t BackupKernelSpace(void) { | |
214 uint32_t kernel_versions; | |
215 uint32_t backup_versions; | |
216 VBDEBUG(("TPM: BackupKernelSpace()\n")); | |
217 RETURN_ON_FAILURE(TlclRead(KERNEL_VERSIONS_NV_INDEX, | |
218 (uint8_t*) &kernel_versions, sizeof(uint32_t))); | |
219 RETURN_ON_FAILURE(TlclRead(KERNEL_VERSIONS_BACKUP_NV_INDEX, | |
220 (uint8_t*) &backup_versions, sizeof(uint32_t))); | |
221 if (kernel_versions == backup_versions) { | |
222 return TPM_SUCCESS; | |
223 } else if (kernel_versions < backup_versions) { | |
224 /* This cannot happen. We're screwed. */ | |
225 return TPM_E_INTERNAL_INCONSISTENCY; | |
226 } | |
227 RETURN_ON_FAILURE(SafeWrite(KERNEL_VERSIONS_BACKUP_NV_INDEX, | |
228 (uint8_t*) &kernel_versions, sizeof(uint32_t))); | |
229 return TPM_SUCCESS; | |
230 } | |
231 | |
232 /* Checks for transitions between protected mode to developer mode. When going | |
233 * into or out of developer mode, clear the TPM. | |
234 */ | |
235 static uint32_t CheckDeveloperModeTransition(uint32_t current_developer) { | |
236 uint32_t past_developer; | |
237 RETURN_ON_FAILURE(TlclRead(DEVELOPER_MODE_NV_INDEX, | |
238 (uint8_t*) &past_developer, | |
239 sizeof(past_developer))); | |
240 if (past_developer != current_developer) { | |
241 RETURN_ON_FAILURE(TPMClearAndReenable()); | |
242 RETURN_ON_FAILURE(SafeWrite(DEVELOPER_MODE_NV_INDEX, | |
243 (uint8_t*) ¤t_developer, | |
244 sizeof(current_developer))); | |
245 } | |
246 return TPM_SUCCESS; | |
247 } | |
248 | 129 |
249 /* SetupTPM starts the TPM and establishes the root of trust for the | 130 /* SetupTPM starts the TPM and establishes the root of trust for the |
250 * anti-rollback mechanism. SetupTPM can fail for three reasons. 1 A bug. 2 a | 131 * anti-rollback mechanism. SetupTPM can fail for three reasons. 1 A bug. 2 a |
251 * TPM hardware failure. 3 An unexpected TPM state due to some attack. In | 132 * TPM hardware failure. 3 An unexpected TPM state due to some attack. In |
252 * general we cannot easily distinguish the kind of failure, so our strategy is | 133 * general we cannot easily distinguish the kind of failure, so our strategy is |
253 * to reboot in recovery mode in all cases. The recovery mode calls SetupTPM | 134 * to reboot in recovery mode in all cases. The recovery mode calls SetupTPM |
254 * again, which executes (almost) the same sequence of operations. There is a | 135 * again, which executes (almost) the same sequence of operations. There is a |
255 * good chance that, if recovery mode was entered because of a TPM failure, the | 136 * good chance that, if recovery mode was entered because of a TPM failure, the |
256 * failure will repeat itself. (In general this is impossible to guarantee | 137 * failure will repeat itself. (In general this is impossible to guarantee |
257 * because we have no way of creating the exact TPM initial state at the | 138 * because we have no way of creating the exact TPM initial state at the |
258 * previous boot.) In recovery mode, we ignore the failure and continue, thus | 139 * previous boot.) In recovery mode, we ignore the failure and continue, thus |
259 * giving the recovery kernel a chance to fix things (that's why we don't set | 140 * giving the recovery kernel a chance to fix things (that's why we don't set |
260 * bGlobalLock). The choice is between a knowingly insecure device and a | 141 * bGlobalLock). The choice is between a knowingly insecure device and a |
261 * bricked device. | 142 * bricked device. |
262 * | 143 * |
263 * As a side note, observe that we go through considerable hoops to avoid using | 144 * As a side note, observe that we go through considerable hoops to avoid using |
264 * the STCLEAR permissions for the index spaces. We do this to avoid writing | 145 * the STCLEAR permissions for the index spaces. We do this to avoid writing |
265 * to the TPM flashram at every reboot or wake-up, because of concerns about | 146 * to the TPM flashram at every reboot or wake-up, because of concerns about |
266 * the durability of the NVRAM. | 147 * the durability of the NVRAM. |
267 */ | 148 */ |
268 uint32_t SetupTPM(int recovery_mode, int developer_mode) { | 149 uint32_t SetupTPM(int recovery_mode, int developer_mode, |
150 RollbackSpaceFirmware *rsf) { | |
151 | |
152 RollbackSpaceKernel rsk; | |
153 int rsf_dirty = 0; | |
154 uint8_t new_flags = 0; | |
155 | |
269 uint8_t disable; | 156 uint8_t disable; |
270 uint8_t deactivated; | 157 uint8_t deactivated; |
271 uint32_t result; | 158 uint32_t result; |
159 uint32_t perms; | |
272 | 160 |
273 VBDEBUG(("TPM: SetupTPM(r%d, d%d)\n", recovery_mode, developer_mode)); | 161 VBDEBUG(("TPM: SetupTPM(r%d, d%d)\n", recovery_mode, developer_mode)); |
274 | 162 |
275 /* TODO: TlclLibInit() should be able to return failure */ | 163 /* TODO: TlclLibInit() should be able to return failure */ |
276 TlclLibInit(); | 164 TlclLibInit(); |
277 | 165 |
278 RETURN_ON_FAILURE(TlclStartup()); | 166 RETURN_ON_FAILURE(TlclStartup()); |
279 #ifdef USE_CONTINUE_SELF_TEST | 167 #ifdef USE_CONTINUE_SELF_TEST |
280 /* TODO: ContinueSelfTest() should be faster than SelfTestFull, but may also | 168 /* TODO: ContinueSelfTest() should be faster than SelfTestFull, but |
281 * not work properly in older TPM firmware. For now, do the full self test. * / | 169 * may also not work properly in older TPM firmware. For now, do |
170 * the full self test. */ | |
282 RETURN_ON_FAILURE(TlclContinueSelfTest()); | 171 RETURN_ON_FAILURE(TlclContinueSelfTest()); |
283 #else | 172 #else |
284 RETURN_ON_FAILURE(TlclSelfTestFull()); | 173 RETURN_ON_FAILURE(TlclSelfTestFull()); |
285 #endif | 174 #endif |
286 RETURN_ON_FAILURE(TlclAssertPhysicalPresence()); | 175 RETURN_ON_FAILURE(TlclAssertPhysicalPresence()); |
287 /* Checks that the TPM is enabled and activated. */ | 176 |
177 /* Check that the TPM is enabled and activated. */ | |
288 RETURN_ON_FAILURE(TlclGetFlags(&disable, &deactivated, NULL)); | 178 RETURN_ON_FAILURE(TlclGetFlags(&disable, &deactivated, NULL)); |
289 if (disable || deactivated) { | 179 if (disable || deactivated) { |
290 VBDEBUG(("TPM: disabled (%d) or deactivated (%d). Fixing...\n", disable, de activated)); | 180 VBDEBUG(("TPM: disabled (%d) or deactivated (%d). Fixing...\n", |
181 disable, deactivated)); | |
291 RETURN_ON_FAILURE(TlclSetEnable()); | 182 RETURN_ON_FAILURE(TlclSetEnable()); |
292 RETURN_ON_FAILURE(TlclSetDeactivated(0)); | 183 RETURN_ON_FAILURE(TlclSetDeactivated(0)); |
293 VBDEBUG(("TPM: Must reboot to re-enable\n")); | 184 VBDEBUG(("TPM: Must reboot to re-enable\n")); |
294 return TPM_E_MUST_REBOOT; | 185 return TPM_E_MUST_REBOOT; |
295 } | 186 } |
296 result = RecoverKernelSpace(); | 187 |
297 if (result != TPM_SUCCESS) { | 188 /* Read the firmware space. */ |
298 /* Check if this is the first time we run and the TPM has not been | 189 result = ReadSpaceFirmware(rsf); |
299 * initialized yet. | 190 if (TPM_E_BADINDEX == result) { |
300 */ | 191 /* This is the first time we've run, and the TPM has not been |
301 int initialized = 0; | 192 * initialized. Initialize it. */ |
302 VBDEBUG(("TPM: RecoverKernelSpace() failed\n")); | 193 VBDEBUG(("TPM: Not initialized yet.\n")); |
303 RETURN_ON_FAILURE(GetSpacesInitialized(&initialized)); | 194 RETURN_ON_FAILURE(InitializeSpaces(rsf, &rsk)); |
304 if (initialized) { | 195 } else if (TPM_SUCCESS != result) { |
305 VBDEBUG(("TPM: Already initialized, so give up\n")); | 196 VBDEBUG(("TPM: Firmware space in a bad state; giving up.\n")); |
306 return result; | 197 return TPM_E_CORRUPTED_STATE; |
198 } | |
199 VBDEBUG(("TPM: Firmware space sv%d f%x v%x\n", | |
200 rsf->struct_version, rsf->flags, rsf->fw_versions)); | |
201 | |
202 /* Read the kernel space and verify its permissions. If the kernel | |
203 * space has the wrong permission, or it doesn't contain the right | |
204 * identifier, we give up. This will need to be fixed by the | |
205 * recovery kernel. We have to worry about this because at any time | |
206 * (even with PP turned off) the TPM owner can remove and redefine a | |
207 * PP-protected space (but not write to it). */ | |
208 RETURN_ON_FAILURE(ReadSpaceKernel(&rsk)); | |
209 RETURN_ON_FAILURE(TlclGetPermissions(KERNEL_NV_INDEX, &perms)); | |
210 if (TPM_NV_PER_PPWRITE != perms || ROLLBACK_SPACE_KERNEL_UID != rsk.uid) | |
211 return TPM_E_CORRUPTED_STATE; | |
212 VBDEBUG(("TPM: Kernel space sv%d v%x\n", | |
213 rsk.struct_version, rsk.kernel_versions)); | |
214 | |
215 /* If the kernel space and its backup are different, we need to copy | |
216 * one to the other. Which one we copy depends on whether the | |
217 * use-backup flag is set. */ | |
218 if (0 != Memcmp(&rsk, &rsf->kernel_backup, sizeof(RollbackSpaceKernel))) { | |
219 VBDEBUG(("TPM: kernel space and backup are different\n")); | |
220 | |
221 if (rsf->flags & FLAG_KERNEL_SPACE_USE_BACKUP) { | |
222 VBDEBUG(("TPM: use backup kernel space\n")); | |
223 Memcpy(&rsk, &rsf->kernel_backup, sizeof(RollbackSpaceKernel)); | |
224 RETURN_ON_FAILURE(WriteSpaceKernel(&rsk)); | |
225 } else if (rsk.kernel_versions < rsf->kernel_backup.kernel_versions) { | |
226 VBDEBUG(("TPM: kernel versions %x < backup versions %x\n", | |
227 rsk.kernel_versions, rsf->kernel_backup.kernel_versions)); | |
228 return TPM_E_INTERNAL_INCONSISTENCY; | |
307 } else { | 229 } else { |
308 VBDEBUG(("TPM: Need to initialize spaces.\n")); | 230 VBDEBUG(("TPM: copy kernel space to backup\n")); |
309 RETURN_ON_FAILURE(InitializeSpaces()); | 231 Memcpy(&rsf->kernel_backup, &rsk, sizeof(RollbackSpaceKernel)); |
310 VBDEBUG(("TPM: Retrying RecoverKernelSpace() now that spaces are initializ ed.\n")); | 232 rsf_dirty = 1; |
311 RETURN_ON_FAILURE(RecoverKernelSpace()); | |
312 } | 233 } |
313 } | 234 } |
314 RETURN_ON_FAILURE(BackupKernelSpace()); | |
315 RETURN_ON_FAILURE(SetDistrustKernelSpaceAtNextBoot(recovery_mode)); | |
316 RETURN_ON_FAILURE(CheckDeveloperModeTransition(developer_mode)); | |
317 | 235 |
236 /* Clear ownership if developer flag has toggled */ | |
237 if ((developer_mode ? FLAG_LAST_BOOT_DEVELOPER : 0) != | |
238 (rsf->flags & FLAG_LAST_BOOT_DEVELOPER)) { | |
239 VBDEBUG(("TPM: Developer flag changed; clearing owner.\n")); | |
240 RETURN_ON_FAILURE(TPMClearAndReenable()); | |
241 } | |
242 | |
243 /* Update flags */ | |
244 if (developer_mode) | |
245 new_flags |= FLAG_LAST_BOOT_DEVELOPER; | |
318 if (recovery_mode) { | 246 if (recovery_mode) { |
319 /* In recovery mode global variables are usable. */ | 247 new_flags |= FLAG_KERNEL_SPACE_USE_BACKUP; |
320 g_rollback_recovery_mode = 1; | 248 g_rollback_recovery_mode = 1; /* Global variables are usable in |
249 * recovery mode */ | |
321 } | 250 } |
251 if (rsf->flags != new_flags) { | |
252 rsf->flags = new_flags; | |
253 rsf_dirty = 1; | |
254 } | |
255 | |
256 /* If firmware space is dirty, flush it back to the TPM */ | |
257 if (rsf_dirty) { | |
258 VBDEBUG(("TPM: Updating firmware space.\n")); | |
259 RETURN_ON_FAILURE(WriteSpaceFirmware(rsf)); | |
260 } | |
261 | |
322 VBDEBUG(("TPM: SetupTPM() succeeded\n")); | 262 VBDEBUG(("TPM: SetupTPM() succeeded\n")); |
323 return TPM_SUCCESS; | 263 return TPM_SUCCESS; |
324 } | 264 } |
325 | 265 |
326 /* disable MSVC warnings on unused arguments */ | 266 /* disable MSVC warnings on unused arguments */ |
327 __pragma(warning (disable: 4100)) | 267 __pragma(warning (disable: 4100)) |
328 | 268 |
329 | 269 |
330 #ifdef DISABLE_ROLLBACK_TPM | 270 #ifdef DISABLE_ROLLBACK_TPM |
331 | 271 |
332 /* Dummy implementations which don't support TPM rollback protection */ | 272 /* Dummy implementations which don't support TPM rollback protection */ |
333 | 273 |
334 uint32_t RollbackFirmwareSetup(int developer_mode) { | 274 uint32_t RollbackFirmwareSetup(int developer_mode, |
275 uint16_t* key_version, uint16_t* version) { | |
335 #ifndef CHROMEOS_ENVIRONMENT | 276 #ifndef CHROMEOS_ENVIRONMENT |
336 /* Initialize the TPM, but ignore return codes. In ChromeOS | 277 /* Initialize the TPM, but ignore return codes. In ChromeOS |
337 * environment, don't even talk to the TPM. */ | 278 * environment, don't even talk to the TPM. */ |
338 TlclLibInit(); | 279 TlclLibInit(); |
339 TlclStartup(); | 280 TlclStartup(); |
340 TlclSelfTestFull(); | 281 TlclSelfTestFull(); |
341 #endif | 282 #endif |
342 return TPM_SUCCESS; | |
343 } | |
344 | 283 |
345 uint32_t RollbackFirmwareRead(uint16_t* key_version, uint16_t* version) { | |
346 *key_version = *version = 0; | 284 *key_version = *version = 0; |
347 return TPM_SUCCESS; | 285 return TPM_SUCCESS; |
348 } | 286 } |
349 | 287 |
350 uint32_t RollbackFirmwareWrite(uint16_t key_version, uint16_t version) { | 288 uint32_t RollbackFirmwareWrite(uint16_t key_version, uint16_t version) { |
351 return TPM_SUCCESS; | 289 return TPM_SUCCESS; |
352 } | 290 } |
353 | 291 |
354 uint32_t RollbackFirmwareLock(void) { | 292 uint32_t RollbackFirmwareLock(void) { |
355 return TPM_SUCCESS; | 293 return TPM_SUCCESS; |
(...skipping 18 matching lines...) Expand all Loading... | |
374 uint32_t RollbackKernelWrite(uint16_t key_version, uint16_t version) { | 312 uint32_t RollbackKernelWrite(uint16_t key_version, uint16_t version) { |
375 return TPM_SUCCESS; | 313 return TPM_SUCCESS; |
376 } | 314 } |
377 | 315 |
378 uint32_t RollbackKernelLock(void) { | 316 uint32_t RollbackKernelLock(void) { |
379 return TPM_SUCCESS; | 317 return TPM_SUCCESS; |
380 } | 318 } |
381 | 319 |
382 #else | 320 #else |
383 | 321 |
384 uint32_t RollbackFirmwareSetup(int developer_mode) { | 322 uint32_t RollbackFirmwareSetup(int developer_mode, uint16_t* key_version, |
385 return SetupTPM(0, developer_mode); | 323 uint16_t* version) { |
386 } | 324 RollbackSpaceFirmware rsf; |
387 | 325 |
388 uint32_t RollbackFirmwareRead(uint16_t* key_version, uint16_t* version) { | 326 RETURN_ON_FAILURE(SetupTPM(0, developer_mode, &rsf)); |
389 uint32_t firmware_versions; | 327 *key_version = (uint16_t)(rsf.fw_versions >> 16); |
390 /* Gets firmware versions. */ | 328 *version = (uint16_t)(rsf.fw_versions & 0xffff); |
391 RETURN_ON_FAILURE(TlclRead(FIRMWARE_VERSIONS_NV_INDEX, | 329 |
392 (uint8_t*) &firmware_versions, | 330 VBDEBUG(("TPM: RollbackFirmwareSetup %x %x %x\n", (int)rsf.fw_versions, (int)* key_version, (int)*version)); |
393 sizeof(firmware_versions))); | 331 |
394 *key_version = (uint16_t) (firmware_versions >> 16); | |
395 *version = (uint16_t) (firmware_versions & 0xffff); | |
396 return TPM_SUCCESS; | 332 return TPM_SUCCESS; |
397 } | 333 } |
398 | 334 |
399 uint32_t RollbackFirmwareWrite(uint16_t key_version, uint16_t version) { | 335 uint32_t RollbackFirmwareWrite(uint16_t key_version, uint16_t version) { |
400 uint32_t combined_version = (key_version << 16) & version; | 336 RollbackSpaceFirmware rsf; |
401 return SafeWrite(FIRMWARE_VERSIONS_NV_INDEX, | 337 uint32_t new_versions = ((uint32_t)key_version << 16) | version; |
402 (uint8_t*) &combined_version, | 338 |
403 sizeof(uint32_t)); | 339 VBDEBUG(("TPM: RollbackFirmwareWrite(%d, %d)\n", (int)key_version, (int)versio n)); |
340 | |
341 RETURN_ON_FAILURE(ReadSpaceFirmware(&rsf)); | |
342 VBDEBUG(("TPM: RollbackFirmwareWrite %x --> %x\n", (int)rsf.fw_versions, (int) new_versions)); | |
343 rsf.fw_versions = new_versions; | |
344 return WriteSpaceFirmware(&rsf); | |
404 } | 345 } |
405 | 346 |
406 uint32_t RollbackFirmwareLock(void) { | 347 uint32_t RollbackFirmwareLock(void) { |
407 return TlclSetGlobalLock(); | 348 return TlclSetGlobalLock(); |
408 } | 349 } |
409 | 350 |
410 uint32_t RollbackKernelRecovery(int developer_mode) { | 351 uint32_t RollbackKernelRecovery(int developer_mode) { |
411 uint32_t result = SetupTPM(1, developer_mode); | 352 RollbackSpaceFirmware rsf; |
353 uint32_t result = SetupTPM(1, developer_mode, &rsf); | |
412 /* In recovery mode we ignore TPM malfunctions or corruptions, and leave the | 354 /* In recovery mode we ignore TPM malfunctions or corruptions, and leave the |
413 * TPM completely unlocked if and only if the dev mode switch is ON. The | 355 * TPM completely unlocked if and only if the dev mode switch is ON. The |
414 * recovery kernel will fix the TPM (if needed) and lock it ASAP. We leave | 356 * recovery kernel will fix the TPM (if needed) and lock it ASAP. We leave |
415 * Physical Presence on in either case. | 357 * Physical Presence on in either case. */ |
416 */ | |
417 if (!developer_mode) { | 358 if (!developer_mode) { |
418 RETURN_ON_FAILURE(TlclSetGlobalLock()); | 359 RETURN_ON_FAILURE(TlclSetGlobalLock()); |
419 } | 360 } |
420 /* We still return the result of SetupTPM even though we expect the caller to | 361 /* We still return the result of SetupTPM even though we expect the caller to |
421 * ignore it. It's useful in unit testing. | 362 * ignore it. It's useful in unit testing. */ |
422 */ | |
423 return result; | 363 return result; |
424 } | 364 } |
425 | 365 |
426 uint32_t RollbackKernelRead(uint16_t* key_version, uint16_t* version) { | 366 uint32_t RollbackKernelRead(uint16_t* key_version, uint16_t* version) { |
427 uint32_t kernel_versions; | |
428 if (g_rollback_recovery_mode) { | 367 if (g_rollback_recovery_mode) { |
429 *key_version = 0; | 368 *key_version = 0; |
430 *version = 0; | 369 *version = 0; |
431 } else { | 370 } else { |
432 /* Reads kernel versions from TPM. */ | 371 RollbackSpaceKernel rsk; |
433 RETURN_ON_FAILURE(TlclRead(KERNEL_VERSIONS_NV_INDEX, | 372 RETURN_ON_FAILURE(ReadSpaceKernel(&rsk)); |
434 (uint8_t*) &kernel_versions, | 373 *key_version = (uint16_t)(rsk.kernel_versions >> 16); |
435 sizeof(kernel_versions))); | 374 *version = (uint16_t)(rsk.kernel_versions & 0xffff); |
436 *key_version = (uint16_t) (kernel_versions >> 16); | 375 VBDEBUG(("TPM: RollbackKernelRead %x %x %x\n", (int)rsk.kernel_versions, (in t)*key_version, (int)*version)); |
437 *version = (uint16_t) (kernel_versions & 0xffff); | |
438 } | 376 } |
439 return TPM_SUCCESS; | 377 return TPM_SUCCESS; |
440 } | 378 } |
441 | 379 |
442 uint32_t RollbackKernelWrite(uint16_t key_version, uint16_t version) { | 380 uint32_t RollbackKernelWrite(uint16_t key_version, uint16_t version) { |
443 if (!g_rollback_recovery_mode) { | 381 |
444 uint32_t combined_version = (key_version << 16) & version; | 382 VBDEBUG(("TPM: RollbackKernelWrite(%d, %d)\n", (int)key_version, (int)version) ); |
445 return SafeWrite(KERNEL_VERSIONS_NV_INDEX, | 383 |
446 (uint8_t*) &combined_version, | 384 if (g_rollback_recovery_mode) { |
447 sizeof(uint32_t)); | 385 return TPM_SUCCESS; |
386 } else { | |
387 RollbackSpaceKernel rsk; | |
388 uint32_t new_versions = ((uint32_t)key_version << 16) | version; | |
389 | |
390 RETURN_ON_FAILURE(ReadSpaceKernel(&rsk)); | |
391 VBDEBUG(("TPM: RollbackKernelWrite %x --> %x\n", (int)rsk.kernel_versions, ( int)new_versions)); | |
392 rsk.kernel_versions = new_versions; | |
393 return WriteSpaceKernel(&rsk); | |
448 } | 394 } |
449 return TPM_SUCCESS; | |
450 } | 395 } |
451 | 396 |
452 uint32_t RollbackKernelLock(void) { | 397 uint32_t RollbackKernelLock(void) { |
453 if (!g_rollback_recovery_mode) { | 398 if (g_rollback_recovery_mode) { |
399 return TPM_SUCCESS; | |
400 } else { | |
454 return TlclLockPhysicalPresence(); | 401 return TlclLockPhysicalPresence(); |
455 } else { | |
456 return TPM_SUCCESS; | |
457 } | 402 } |
458 } | 403 } |
459 | 404 |
460 #endif // DISABLE_ROLLBACK_TPM | 405 #endif // DISABLE_ROLLBACK_TPM |
OLD | NEW |