Chromium Code Reviews| Index: media/cdm/ppapi/cdm_file_io_test.cc |
| diff --git a/media/cdm/ppapi/cdm_file_io_test.cc b/media/cdm/ppapi/cdm_file_io_test.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..1b1986319a5c291c23bdfa5b6c7f45a86e800350 |
| --- /dev/null |
| +++ b/media/cdm/ppapi/cdm_file_io_test.cc |
| @@ -0,0 +1,431 @@ |
| +// Copyright 2013 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "media/cdm/ppapi/cdm_file_io_test.h" |
| + |
| +#include "base/callback_helpers.h" |
| +#include "base/logging.h" |
| +#include "media/base/bind_to_loop.h" |
| + |
| +namespace media { |
| + |
| +#define FILE_IO_DVLOG(level) DVLOG(level) << "File IO Test: " |
|
ddorwin
2013/12/16 18:16:52
I think you'll need to replace DVLOG and probably
xhwang
2013/12/16 23:04:29
This is in ClearKeyCdm code, where it's okay to us
|
| + |
| +const uint8_t kData[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, |
| + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }; |
| +const int kDataSize = arraysize(kData); |
| + |
| +const uint8_t kBigData[] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, |
| + 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff, |
| + 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, |
| + 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff, |
| + 0x00 }; |
| +const int kBigDataSize = arraysize(kBigData); |
| + |
| +const int kLargeDataSize = 9 * 1024 + 7; // > kReadSize in cdm_file_io_impl.cc. |
| + |
| +// Macros to help add test cases/steps. |
| +#define START_TEST_CASE(test_name) \ |
| + do { \ |
| + FileIOTest test_case(create_file_io_cb_, "FileIOTest." test_name); \ |
| + CREATE_FILE_IO // Create FileIO for each test case. |
| + |
| +#define ADD_TEST_STEP(type, status, data, data_size) \ |
| + test_case.AddTestStep(FileIOTest::type, cdm::FileIOClient::status, \ |
|
ddorwin
2013/12/16 18:16:52
Does this really need to be a macro? Can you just
xhwang
2013/12/16 23:04:29
Using can only be used for class type. So I can't
|
| + (data), (data_size)); |
| + |
| +#define END_TEST_CASE \ |
| + tests_.push_back(test_case); \ |
| + } while(0); |
| + |
| +#define CREATE_FILE_IO \ |
|
ddorwin
2013/12/16 18:16:52
Could these all be functions? (If they were functi
xhwang
2013/12/16 23:04:29
To use function, I have to use real types. So the
|
| + ADD_TEST_STEP(ACTION_CREATE, kSuccess, NULL, 0) |
| + |
| +#define OPEN_FILE \ |
| + ADD_TEST_STEP(ACTION_OPEN, kSuccess, NULL, 0) |
| + |
| +#define EXPECT_FILE_OPENED(status) \ |
| + ADD_TEST_STEP(RESULT_OPEN, status, NULL, 0) |
| + |
| +#define READ_FILE \ |
| + ADD_TEST_STEP(ACTION_READ, kSuccess, NULL, 0) |
| + |
| +#define EXPECT_FILE_READ(status, data, data_size) \ |
| + ADD_TEST_STEP(RESULT_READ, status, data, data_size) |
| + |
| +#define WRITE_FILE(data, data_size) \ |
| + ADD_TEST_STEP(ACTION_WRITE, kSuccess, data, data_size) |
| + |
| +#define EXPECT_FILE_WRITTEN(status) \ |
| + ADD_TEST_STEP(RESULT_WRITE, status, NULL, 0) |
| + |
| +#define CLOSE_FILE \ |
| + ADD_TEST_STEP(ACTION_CLOSE, kSuccess, NULL, 0) |
| + |
| +// FileIOTestRunner implementation. |
| + |
| +FileIOTestRunner::FileIOTestRunner(const CreateFileIOCB& create_file_io_cb) |
| + : create_file_io_cb_(create_file_io_cb), |
| + num_tests_(0), |
| + num_passed_tests_(0) { |
| + // Generate |large_data_|. |
| + large_data_.resize(kLargeDataSize); |
| + for (size_t i = 0; i < kLargeDataSize; ++i) |
| + large_data_[i] = i % kuint8max; |
| + |
| + AddTests(); |
| +} |
| + |
| +FileIOTestRunner::~FileIOTestRunner() { |
| + if (tests_.empty()) |
| + return; |
| + |
| + DCHECK_LT(num_passed_tests_, num_tests_); |
| + FILE_IO_DVLOG(1) << "Not Finished (probably due to timeout). " |
|
ddorwin
2013/12/16 18:16:52
Does a timeout really get here vs. killing the pro
xhwang
2013/12/16 23:04:29
It doesn't crash the process. Instead, it tears do
|
| + << num_passed_tests_ << " passed and " |
| + << (num_tests_ - num_passed_tests_) << " failed in " |
|
ddorwin
2013/12/16 18:16:52
failed or not run, right?
Do we need to do the mat
xhwang
2013/12/16 23:04:29
Done.
|
| + << num_tests_ << " tests."; |
| +} |
| + |
| +// Note: Consecutive expectations (EXPECT*) can happen in any order. |
| +void FileIOTestRunner::AddTests() { |
| + START_TEST_CASE("ReadBeforeOpeningFile") |
| + READ_FILE |
| + EXPECT_FILE_READ(kError, NULL, 0) |
| + END_TEST_CASE |
| + |
| + START_TEST_CASE("WriteBeforeOpeningFile") |
| + WRITE_FILE(kData, kDataSize) |
| + EXPECT_FILE_WRITTEN(kError) |
| + END_TEST_CASE |
| + |
| + START_TEST_CASE("ReadBeforeFileOpened") |
| + OPEN_FILE |
| + READ_FILE |
| + EXPECT_FILE_READ(kError, NULL, 0) |
|
ddorwin
2013/12/16 18:16:52
This seems to contradict Note above - you will get
xhwang
2013/12/16 23:04:29
Done.
|
| + EXPECT_FILE_OPENED(kSuccess) |
| + END_TEST_CASE |
| + |
| + START_TEST_CASE("WriteBeforeFileOpened") |
| + OPEN_FILE |
| + WRITE_FILE(kData, kDataSize) |
| + EXPECT_FILE_WRITTEN(kError) |
| + EXPECT_FILE_OPENED(kSuccess) |
| + END_TEST_CASE |
| + |
| + START_TEST_CASE("ReadDuringPendingRead") |
| + OPEN_FILE |
| + EXPECT_FILE_OPENED(kSuccess) |
| + WRITE_FILE(kData, kDataSize) |
| + EXPECT_FILE_WRITTEN(kSuccess) |
| + READ_FILE |
| + READ_FILE |
| + EXPECT_FILE_READ(kInUse, NULL, 0) |
| + EXPECT_FILE_READ(kSuccess, kData, kDataSize) |
| + END_TEST_CASE |
| + |
| + START_TEST_CASE("ReadDuringPendingWrite") |
| + OPEN_FILE |
| + EXPECT_FILE_OPENED(kSuccess) |
| + WRITE_FILE(kData, kDataSize) |
| + READ_FILE |
| + EXPECT_FILE_READ(kInUse, NULL, 0) |
| + EXPECT_FILE_WRITTEN(kSuccess) |
| + END_TEST_CASE |
| + |
| + START_TEST_CASE("WriteDuringPendingRead") |
| + OPEN_FILE |
| + EXPECT_FILE_OPENED(kSuccess) |
| + READ_FILE |
| + WRITE_FILE(kData, kDataSize) |
| + EXPECT_FILE_WRITTEN(kInUse) |
| + EXPECT_FILE_READ(kSuccess, NULL, 0) |
| + END_TEST_CASE |
| + |
| + START_TEST_CASE("WriteDuringPendingWrite") |
| + OPEN_FILE |
| + EXPECT_FILE_OPENED(kSuccess) |
| + WRITE_FILE(kData, kDataSize) |
| + WRITE_FILE(kBigData, kBigDataSize) |
| + EXPECT_FILE_WRITTEN(kInUse) |
| + EXPECT_FILE_WRITTEN(kSuccess) |
| + END_TEST_CASE |
| + |
| + START_TEST_CASE("ReadEmptyFile") |
|
ddorwin
2013/12/16 18:16:52
Are empty and non-existent the same thing?
xhwang
2013/12/16 23:04:29
The result is the same since we specify PP_FILEOPE
ddorwin
2013/12/17 00:17:04
Okay, maybe we should note in the interface that o
|
| + OPEN_FILE |
| + EXPECT_FILE_OPENED(kSuccess) |
| + READ_FILE |
| + EXPECT_FILE_READ(kSuccess, NULL, 0) |
| + END_TEST_CASE |
| + |
| + START_TEST_CASE("WriteAndRead") |
| + OPEN_FILE |
| + EXPECT_FILE_OPENED(kSuccess) |
| + WRITE_FILE(kData, kDataSize) |
| + EXPECT_FILE_WRITTEN(kSuccess) |
| + READ_FILE |
| + EXPECT_FILE_READ(kSuccess, kData, kDataSize) |
| + END_TEST_CASE |
| + |
| + START_TEST_CASE("WriteZeroBytes") |
| + OPEN_FILE |
| + EXPECT_FILE_OPENED(kSuccess) |
| + WRITE_FILE(NULL, 0) |
| + EXPECT_FILE_WRITTEN(kSuccess) |
| + READ_FILE |
| + EXPECT_FILE_READ(kSuccess, NULL, 0) |
| + END_TEST_CASE |
| + |
| + START_TEST_CASE("WriteAndReadLargeData") |
| + OPEN_FILE |
| + EXPECT_FILE_OPENED(kSuccess) |
| + WRITE_FILE(&large_data_[0], kLargeDataSize) |
| + EXPECT_FILE_WRITTEN(kSuccess) |
| + READ_FILE |
| + EXPECT_FILE_READ(kSuccess, &large_data_[0], kLargeDataSize) |
| + END_TEST_CASE |
| + |
| + START_TEST_CASE("OverwriteZeroBytes") |
| + OPEN_FILE |
| + EXPECT_FILE_OPENED(kSuccess) |
| + WRITE_FILE(kData, kDataSize) |
| + EXPECT_FILE_WRITTEN(kSuccess) |
| + READ_FILE |
| + EXPECT_FILE_READ(kSuccess, kData, kDataSize) |
| + WRITE_FILE(NULL, 0) |
| + EXPECT_FILE_WRITTEN(kSuccess) |
| + READ_FILE |
| + EXPECT_FILE_READ(kSuccess, NULL, 0) |
| + END_TEST_CASE |
| + |
| + START_TEST_CASE("OverwriteWithSmallerData") |
| + OPEN_FILE |
| + EXPECT_FILE_OPENED(kSuccess) |
| + WRITE_FILE(kBigData, kBigDataSize) |
| + EXPECT_FILE_WRITTEN(kSuccess) |
| + WRITE_FILE(kData, kDataSize) |
| + EXPECT_FILE_WRITTEN(kSuccess) |
| + READ_FILE |
| + EXPECT_FILE_READ(kSuccess, kData, kDataSize) |
| + END_TEST_CASE |
| + |
| + START_TEST_CASE("OverwriteWithLargerData") |
| + OPEN_FILE |
| + EXPECT_FILE_OPENED(kSuccess) |
| + WRITE_FILE(kData, kDataSize) |
| + EXPECT_FILE_WRITTEN(kSuccess) |
| + WRITE_FILE(kBigData, kBigDataSize) |
| + EXPECT_FILE_WRITTEN(kSuccess) |
| + READ_FILE |
| + EXPECT_FILE_READ(kSuccess, kBigData, kBigDataSize) |
| + END_TEST_CASE |
| + |
| + START_TEST_CASE("ReadExistingFile") |
| + OPEN_FILE |
| + EXPECT_FILE_OPENED(kSuccess) |
| + WRITE_FILE(kData, kDataSize) |
| + EXPECT_FILE_WRITTEN(kSuccess) |
| + CLOSE_FILE |
| + CREATE_FILE_IO |
| + OPEN_FILE |
| + EXPECT_FILE_OPENED(kSuccess) |
| + READ_FILE |
| + EXPECT_FILE_READ(kSuccess, kData, kDataSize) |
| + END_TEST_CASE |
| + |
| + START_TEST_CASE("ReopenFileInTheSameFileIO") |
| + OPEN_FILE |
| + OPEN_FILE |
| + EXPECT_FILE_OPENED(kError) // The second Open() failed. |
| + EXPECT_FILE_OPENED(kSuccess) // The first Open() succeeded. |
| + END_TEST_CASE |
| + |
| + // TODO(xhwang): This test should fail. But pp::FileIO doesn't support locking |
| + // of opened files. We need to either workaround this or fix pp::FileIO |
| + // implementation. |
| + START_TEST_CASE("ReopenFileInSeparateFileIO") |
| + OPEN_FILE |
| + EXPECT_FILE_OPENED(kSuccess) |
| + WRITE_FILE(kData, kDataSize) |
| + EXPECT_FILE_WRITTEN(kSuccess) |
| + CREATE_FILE_IO // Create a second FileIO without closing the first one. |
| + OPEN_FILE |
| + EXPECT_FILE_OPENED(kSuccess) |
| + READ_FILE |
| + EXPECT_FILE_READ(kSuccess, kData, kDataSize) |
| + WRITE_FILE(kBigData, kBigDataSize) |
| + EXPECT_FILE_WRITTEN(kSuccess) |
| + END_TEST_CASE |
| +} |
| + |
| +void FileIOTestRunner::RunAllTests(const CompletionCB& completion_cb) { |
| + completion_cb_ = BindToCurrentLoop(completion_cb); |
| + num_tests_ = tests_.size(); |
| + RunNextTest(); |
| +} |
| + |
| +void FileIOTestRunner::RunNextTest() { |
| + if (tests_.empty()) { |
| + FILE_IO_DVLOG(1) << num_passed_tests_ << " passed and " |
| + << (num_tests_ - num_passed_tests_) << " failed in " |
| + << num_tests_ << " tests."; |
| + bool success = (num_passed_tests_ == num_tests_); |
| + base::ResetAndReturn(&completion_cb_).Run(success); |
| + return; |
| + } |
| + |
| + tests_.front().Run( |
| + base::Bind(&FileIOTestRunner::OnTestComplete, base::Unretained(this))); |
| +} |
| + |
| +void FileIOTestRunner::OnTestComplete(bool success) { |
| + if (success) |
| + num_passed_tests_++; |
| + tests_.pop_front(); |
| + RunNextTest(); |
| +} |
| + |
| +// FileIOTest implementation. |
| + |
| +FileIOTest::FileIOTest(const CreateFileIOCB& create_file_io_cb, |
| + const std::string& test_name) |
| + : create_file_io_cb_(create_file_io_cb), |
| + test_name_(test_name), |
| + file_io_(NULL), |
| + old_file_io_(NULL) {} |
| + |
| +FileIOTest::~FileIOTest() {} |
| + |
| +void FileIOTest::AddTestStep(StepType type, |
| + Status status, |
| + const uint8* data, |
| + uint32 data_size) { |
| + test_steps_.push_back(TestStep(type, status, data, data_size)); |
| +} |
| + |
| +void FileIOTest::Run(const CompletionCB& completion_cb) { |
| + FILE_IO_DVLOG(3) << "Run " << test_name_; |
| + completion_cb_ = BindToCurrentLoop(completion_cb); |
| + RunNextStep(); |
| +} |
| + |
| +void FileIOTest::OnOpenComplete(Status status) { |
| + OnResult(TestStep(RESULT_OPEN, status, NULL, 0)); |
| +} |
| + |
| +void FileIOTest::OnReadComplete(Status status, |
| + const uint8_t* data, |
| + uint32_t data_size) { |
| + OnResult(TestStep(RESULT_READ, status, data, data_size)); |
| +} |
| + |
| +void FileIOTest::OnWriteComplete(Status status) { |
| + OnResult(TestStep(RESULT_WRITE, status, NULL, 0)); |
| +} |
| + |
| +bool FileIOTest::IsResult(const TestStep& test_step) { |
| + return test_step.type == RESULT_OPEN || test_step.type == RESULT_READ || |
|
ddorwin
2013/12/16 18:16:52
Use a switch statement instead? (Including all cas
xhwang
2013/12/16 23:04:29
Done.
|
| + test_step.type == RESULT_WRITE; |
| +} |
| + |
| +bool FileIOTest::MatchesResult(const TestStep& a, const TestStep& b) { |
| + DCHECK(IsResult(a) && IsResult(b)); |
| + if (a.type != b.type || a.status != b.status) |
| + return false; |
| + |
| + if (a.type != RESULT_READ || a.status != cdm::FileIOClient::kSuccess) |
| + return true; |
| + |
| + return (a.data_size == a.data_size && |
| + std::equal(a.data, a.data + a.data_size, b.data)); |
| +} |
| + |
| +void FileIOTest::RunNextStep() { |
| + while (!test_steps_.empty()) { |
| + if (IsResult(test_steps_.front())) |
| + return; |
|
ddorwin
2013/12/16 18:16:52
This being first seems to assume this was called f
xhwang
2013/12/16 23:04:29
Added comments.
|
| + |
| + TestStep test_step = test_steps_.front(); |
| + test_steps_.pop_front(); |
| + |
| + switch (test_step.type) { |
| + case ACTION_CREATE: |
| + if (file_io_) { |
| + if (old_file_io_) |
|
ddorwin
2013/12/16 18:16:52
Please explain. Does this retire the oldest of 3 F
xhwang
2013/12/16 23:04:29
Done.
|
| + old_file_io_->Close(); |
| + old_file_io_ = file_io_; |
| + } |
| + file_io_ = create_file_io_cb_.Run(this); |
| + if (!file_io_) { |
| + FILE_IO_DVLOG(3) << "Cannot create FileIO object."; |
| + OnTestComplete(false); |
| + return; |
| + } |
| + break; |
| + case ACTION_OPEN: |
| + // Use test name as the test file name. |
|
ddorwin
2013/12/16 18:16:52
It might be good to mention this in a higher-level
xhwang
2013/12/16 23:04:29
Done.
|
| + file_io_->Open(test_name_.data(), test_name_.size()); |
| + break; |
| + case ACTION_READ: |
| + file_io_->Read(); |
| + break; |
| + case ACTION_WRITE: |
| + file_io_->Write(test_step.data, test_step.data_size); |
| + break; |
| + case ACTION_CLOSE: |
| + file_io_->Close(); |
| + file_io_ = NULL; |
| + break; |
| + default: |
| + NOTREACHED(); |
| + } |
| + } |
| + |
| + OnTestComplete(true); |
| +} |
| + |
| +void FileIOTest::OnResult(const TestStep& result) { |
| + DCHECK(IsResult(result)); |
| + if (!CheckResult(result)) { |
| + OnTestComplete(false); |
| + return; |
| + } |
| + |
| + RunNextStep(); |
| +} |
| + |
| +bool FileIOTest::CheckResult(const TestStep& result) { |
| + if (test_steps_.empty() || !IsResult(test_steps_.front())) |
| + return false; |
| + |
| + // If there are multiple results expected, the order does not matter. |
| + std::list<TestStep>::iterator iter = test_steps_.begin(); |
| + for (; iter != test_steps_.end(); ++iter) { |
| + if (!IsResult(*iter)) |
| + return false; |
| + |
| + if (!MatchesResult(*iter, result)) |
| + continue; |
| + |
| + test_steps_.erase(iter); |
| + return true; |
| + } |
| + |
| + return false; |
| +} |
| + |
| +void FileIOTest::OnTestComplete(bool success) { |
| + FILE_IO_DVLOG(3) << test_name_ << (success ? " PASSED" : " FAILED"); |
| + base::ResetAndReturn(&completion_cb_).Run(success); |
| + if (old_file_io_) { |
| + old_file_io_->Close(); |
| + old_file_io_ = NULL; |
| + } |
| + if (file_io_) { |
| + file_io_->Close(); |
| + file_io_ = NULL; |
| + } |
| +} |
| + |
| +} // namespace media |