OLD | NEW |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/test/launcher/unit_test_launcher.h" | 5 #include "base/test/launcher/unit_test_launcher.h" |
6 | 6 |
7 #include "base/bind.h" | 7 #include "base/bind.h" |
8 #include "base/callback_helpers.h" | 8 #include "base/callback_helpers.h" |
9 #include "base/command_line.h" | 9 #include "base/command_line.h" |
10 #include "base/compiler_specific.h" | 10 #include "base/compiler_specific.h" |
(...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
221 Unretained(this), | 221 Unretained(this), |
222 callback_state)); | 222 callback_state)); |
223 } | 223 } |
224 | 224 |
225 void GTestCallback(const GTestCallbackState& callback_state, | 225 void GTestCallback(const GTestCallbackState& callback_state, |
226 int exit_code, | 226 int exit_code, |
227 const TimeDelta& elapsed_time, | 227 const TimeDelta& elapsed_time, |
228 bool was_timeout, | 228 bool was_timeout, |
229 const std::string& output) { | 229 const std::string& output) { |
230 DCHECK(thread_checker_.CalledOnValidThread()); | 230 DCHECK(thread_checker_.CalledOnValidThread()); |
231 std::vector<std::string> tests_to_relaunch_after_interruption; | 231 std::vector<std::string> tests_to_relaunch; |
232 ProcessTestResults(callback_state.test_launcher, | 232 ProcessTestResults(callback_state.test_launcher, |
233 callback_state.test_names, | 233 callback_state.test_names, |
234 callback_state.output_file, | 234 callback_state.output_file, |
235 output, | 235 output, |
236 exit_code, | 236 exit_code, |
237 was_timeout, | 237 was_timeout, |
238 &tests_to_relaunch_after_interruption); | 238 &tests_to_relaunch); |
239 | 239 |
240 RunBatch(callback_state.test_launcher, | 240 // Relaunch requested tests in parallel, but only use single |
241 tests_to_relaunch_after_interruption); | 241 // test per batch for more precise results (crashes, test passes |
| 242 // but non-zero exit codes etc). |
| 243 for (size_t i = 0; i < tests_to_relaunch.size(); i++) { |
| 244 std::vector<std::string> batch; |
| 245 batch.push_back(tests_to_relaunch[i]); |
| 246 RunBatch(callback_state.test_launcher, batch); |
| 247 } |
242 | 248 |
243 // The temporary file's directory is also temporary. | 249 // The temporary file's directory is also temporary. |
244 DeleteFile(callback_state.output_file.DirName(), true); | 250 DeleteFile(callback_state.output_file.DirName(), true); |
245 } | 251 } |
246 | 252 |
247 void SerialGTestCallback(const GTestCallbackState& callback_state, | 253 void SerialGTestCallback(const GTestCallbackState& callback_state, |
248 const std::vector<std::string>& test_names, | 254 const std::vector<std::string>& test_names, |
249 int exit_code, | 255 int exit_code, |
250 const TimeDelta& elapsed_time, | 256 const TimeDelta& elapsed_time, |
251 bool was_timeout, | 257 bool was_timeout, |
252 const std::string& output) { | 258 const std::string& output) { |
253 DCHECK(thread_checker_.CalledOnValidThread()); | 259 DCHECK(thread_checker_.CalledOnValidThread()); |
254 std::vector<std::string> tests_to_relaunch_after_interruption; | 260 std::vector<std::string> tests_to_relaunch; |
255 bool called_any_callbacks = | 261 bool called_any_callbacks = |
256 ProcessTestResults(callback_state.test_launcher, | 262 ProcessTestResults(callback_state.test_launcher, |
257 callback_state.test_names, | 263 callback_state.test_names, |
258 callback_state.output_file, | 264 callback_state.output_file, |
259 output, | 265 output, |
260 exit_code, | 266 exit_code, |
261 was_timeout, | 267 was_timeout, |
262 &tests_to_relaunch_after_interruption); | 268 &tests_to_relaunch); |
263 | 269 |
264 // There is only one test, there cannot be other tests to relaunch | 270 // There is only one test, there cannot be other tests to relaunch |
265 // due to a crash. | 271 // due to a crash. |
266 DCHECK(tests_to_relaunch_after_interruption.empty()); | 272 DCHECK(tests_to_relaunch.empty()); |
267 | 273 |
268 // There is only one test, we should have called back with its result. | 274 // There is only one test, we should have called back with its result. |
269 DCHECK(called_any_callbacks); | 275 DCHECK(called_any_callbacks); |
270 | 276 |
271 // The temporary file's directory is also temporary. | 277 // The temporary file's directory is also temporary. |
272 DeleteFile(callback_state.output_file.DirName(), true); | 278 DeleteFile(callback_state.output_file.DirName(), true); |
273 | 279 |
274 MessageLoop::current()->PostTask( | 280 MessageLoop::current()->PostTask( |
275 FROM_HERE, | 281 FROM_HERE, |
276 Bind(&UnitTestLauncherDelegate::RunSerially, | 282 Bind(&UnitTestLauncherDelegate::RunSerially, |
277 Unretained(this), | 283 Unretained(this), |
278 callback_state.test_launcher, | 284 callback_state.test_launcher, |
279 test_names)); | 285 test_names)); |
280 } | 286 } |
281 | 287 |
282 static bool ProcessTestResults( | 288 static bool ProcessTestResults( |
283 TestLauncher* test_launcher, | 289 TestLauncher* test_launcher, |
284 const std::vector<std::string>& test_names, | 290 const std::vector<std::string>& test_names, |
285 const base::FilePath& output_file, | 291 const base::FilePath& output_file, |
286 const std::string& output, | 292 const std::string& output, |
287 int exit_code, | 293 int exit_code, |
288 bool was_timeout, | 294 bool was_timeout, |
289 std::vector<std::string>* tests_to_relaunch_after_interruption) { | 295 std::vector<std::string>* tests_to_relaunch) { |
290 std::vector<TestResult> test_results; | 296 std::vector<TestResult> test_results; |
291 bool crashed = false; | 297 bool crashed = false; |
292 bool have_test_results = | 298 bool have_test_results = |
293 ProcessGTestOutput(output_file, &test_results, &crashed); | 299 ProcessGTestOutput(output_file, &test_results, &crashed); |
294 | 300 |
295 bool called_any_callback = false; | 301 bool called_any_callback = false; |
296 | 302 |
297 if (have_test_results) { | 303 if (have_test_results) { |
298 // TODO(phajdan.jr): Check for duplicates and mismatches between | 304 // TODO(phajdan.jr): Check for duplicates and mismatches between |
299 // the results we got from XML file and tests we intended to run. | 305 // the results we got from XML file and tests we intended to run. |
300 std::map<std::string, TestResult> results_map; | 306 std::map<std::string, TestResult> results_map; |
301 for (size_t i = 0; i < test_results.size(); i++) | 307 for (size_t i = 0; i < test_results.size(); i++) |
302 results_map[test_results[i].full_name] = test_results[i]; | 308 results_map[test_results[i].full_name] = test_results[i]; |
303 | 309 |
304 bool had_interrupted_test = false; | 310 bool had_interrupted_test = false; |
305 | 311 |
| 312 // Results to be reported back to the test launcher. |
| 313 std::vector<TestResult> final_results; |
| 314 |
306 for (size_t i = 0; i < test_names.size(); i++) { | 315 for (size_t i = 0; i < test_names.size(); i++) { |
307 if (ContainsKey(results_map, test_names[i])) { | 316 if (ContainsKey(results_map, test_names[i])) { |
308 TestResult test_result = results_map[test_names[i]]; | 317 TestResult test_result = results_map[test_names[i]]; |
309 if (test_result.status == TestResult::TEST_CRASH) { | 318 if (test_result.status == TestResult::TEST_CRASH) { |
310 had_interrupted_test = true; | 319 had_interrupted_test = true; |
311 | 320 |
312 if (was_timeout) { | 321 if (was_timeout) { |
313 // Fix up the test status: we forcibly kill the child process | 322 // Fix up the test status: we forcibly kill the child process |
314 // after the timeout, so from XML results it looks just like | 323 // after the timeout, so from XML results it looks just like |
315 // a crash. | 324 // a crash. |
316 test_result.status = TestResult::TEST_TIMEOUT; | 325 test_result.status = TestResult::TEST_TIMEOUT; |
317 } | 326 } |
318 } else if (test_result.status == TestResult::TEST_SUCCESS || | 327 } else if (test_result.status == TestResult::TEST_SUCCESS || |
319 test_result.status == TestResult::TEST_FAILURE) { | 328 test_result.status == TestResult::TEST_FAILURE) { |
320 // We run multiple tests in a batch with a timeout applied | 329 // We run multiple tests in a batch with a timeout applied |
321 // to the entire batch. It is possible that with other tests | 330 // to the entire batch. It is possible that with other tests |
322 // running quickly some tests take longer than the per-test timeout. | 331 // running quickly some tests take longer than the per-test timeout. |
323 // For consistent handling of tests independent of order and other | 332 // For consistent handling of tests independent of order and other |
324 // factors, mark them as timing out. | 333 // factors, mark them as timing out. |
325 if (test_result.elapsed_time > | 334 if (test_result.elapsed_time > |
326 TestTimeouts::test_launcher_timeout()) { | 335 TestTimeouts::test_launcher_timeout()) { |
327 test_result.status = TestResult::TEST_TIMEOUT; | 336 test_result.status = TestResult::TEST_TIMEOUT; |
328 } | 337 } |
329 } | 338 } |
330 test_result.output_snippet = | 339 test_result.output_snippet = |
331 GetTestOutputSnippet(test_result, output); | 340 GetTestOutputSnippet(test_result, output); |
332 test_launcher->OnTestFinished(test_result); | 341 final_results.push_back(test_result); |
333 called_any_callback = true; | |
334 } else if (had_interrupted_test) { | 342 } else if (had_interrupted_test) { |
335 tests_to_relaunch_after_interruption->push_back(test_names[i]); | 343 tests_to_relaunch->push_back(test_names[i]); |
336 } else { | 344 } else { |
337 // TODO(phajdan.jr): Explicitly pass the info that the test didn't | 345 // TODO(phajdan.jr): Explicitly pass the info that the test didn't |
338 // run for a mysterious reason. | 346 // run for a mysterious reason. |
339 LOG(ERROR) << "no test result for " << test_names[i]; | 347 LOG(ERROR) << "no test result for " << test_names[i]; |
340 TestResult test_result; | 348 TestResult test_result; |
341 test_result.full_name = test_names[i]; | 349 test_result.full_name = test_names[i]; |
342 test_result.status = TestResult::TEST_UNKNOWN; | 350 test_result.status = TestResult::TEST_UNKNOWN; |
343 test_result.output_snippet = | 351 test_result.output_snippet = |
344 GetTestOutputSnippet(test_result, output); | 352 GetTestOutputSnippet(test_result, output); |
345 test_launcher->OnTestFinished(test_result); | 353 final_results.push_back(test_result); |
346 called_any_callback = true; | |
347 } | 354 } |
348 } | 355 } |
349 | 356 |
350 // TODO(phajdan.jr): Handle the case where processing XML output | 357 // TODO(phajdan.jr): Handle the case where processing XML output |
351 // indicates a crash but none of the test results is marked as crashing. | 358 // indicates a crash but none of the test results is marked as crashing. |
352 | 359 |
353 // TODO(phajdan.jr): Handle the case where the exit code is non-zero | 360 if (final_results.empty()) |
354 // but results file indicates that all tests passed (e.g. crash during | 361 return false; |
355 // shutdown). | 362 |
| 363 bool has_non_success_test = false; |
| 364 for (size_t i = 0; i < final_results.size(); i++) { |
| 365 if (final_results[i].status != TestResult::TEST_SUCCESS) { |
| 366 has_non_success_test = true; |
| 367 break; |
| 368 } |
| 369 } |
| 370 |
| 371 if (!has_non_success_test && exit_code != 0) { |
| 372 // This is a bit surprising case: all tests are marked as successful, |
| 373 // but the exit code was not zero. This can happen e.g. under memory |
| 374 // tools that report leaks this way. |
| 375 |
| 376 if (final_results.size() == 1) { |
| 377 // Easy case. One test only so we know the non-zero exit code |
| 378 // was caused by that one test. |
| 379 final_results[0].status = TestResult::TEST_FAILURE_ON_EXIT; |
| 380 } else { |
| 381 // Harder case. Discard the results and request relaunching all |
| 382 // tests without batching. This will trigger above branch on |
| 383 // relaunch leading to more precise results. |
| 384 LOG(WARNING) << "Not sure which test caused non-zero exit code, " |
| 385 << "relaunching all of them without batching."; |
| 386 |
| 387 for (size_t i = 0; i < final_results.size(); i++) |
| 388 tests_to_relaunch->push_back(final_results[i].full_name); |
| 389 |
| 390 return false; |
| 391 } |
| 392 } |
| 393 |
| 394 for (size_t i = 0; i < final_results.size(); i++) { |
| 395 // Fix the output snippet after possible changes to the test result. |
| 396 final_results[i].output_snippet = |
| 397 GetTestOutputSnippet(final_results[i], output); |
| 398 test_launcher->OnTestFinished(final_results[i]); |
| 399 called_any_callback = true; |
| 400 } |
356 } else { | 401 } else { |
357 fprintf(stdout, | 402 fprintf(stdout, |
358 "Failed to get out-of-band test success data, " | 403 "Failed to get out-of-band test success data, " |
359 "dumping full stdio below:\n%s\n", | 404 "dumping full stdio below:\n%s\n", |
360 output.c_str()); | 405 output.c_str()); |
361 fflush(stdout); | 406 fflush(stdout); |
362 | 407 |
363 // We do not have reliable details about test results (parsing test | 408 // We do not have reliable details about test results (parsing test |
364 // stdout is known to be unreliable), apply the executable exit code | 409 // stdout is known to be unreliable), apply the executable exit code |
365 // to all tests. | 410 // to all tests. |
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
443 | 488 |
444 fprintf(stdout, | 489 fprintf(stdout, |
445 "Tests took %" PRId64 " seconds.\n", | 490 "Tests took %" PRId64 " seconds.\n", |
446 (base::TimeTicks::Now() - start_time).InSeconds()); | 491 (base::TimeTicks::Now() - start_time).InSeconds()); |
447 fflush(stdout); | 492 fflush(stdout); |
448 | 493 |
449 return (success ? 0 : 1); | 494 return (success ? 0 : 1); |
450 } | 495 } |
451 | 496 |
452 } // namespace base | 497 } // namespace base |
OLD | NEW |