Index: content/browser/blob_storage/blob_storage_context_unittest.cc |
diff --git a/content/browser/blob_storage/blob_storage_context_unittest.cc b/content/browser/blob_storage/blob_storage_context_unittest.cc |
index 166509e7c53c2f1f808ff9b3e85404819fcb806e..b80d2f0d6acf6ae190642f866b67b739c3a0b9ec 100644 |
--- a/content/browser/blob_storage/blob_storage_context_unittest.cc |
+++ b/content/browser/blob_storage/blob_storage_context_unittest.cc |
@@ -99,12 +99,18 @@ void SaveBlobStatusAndFiles(BlobStatus* status_ptr, |
std::vector<FileCreationInfo>* files_ptr, |
BlobStatus status, |
std::vector<FileCreationInfo> files) { |
+ EXPECT_FALSE(BlobStatusIsError(status)); |
*status_ptr = status; |
for (FileCreationInfo& info : files) { |
files_ptr->push_back(std::move(info)); |
} |
} |
+void IncrementPointer(size_t* number, BlobStatus status) { |
kinuko
2016/12/01 05:10:04
nit: well, it's incrementing the pointed value rat
dmurph
2016/12/01 20:41:00
Done.
|
+ EXPECT_EQ(BlobStatus::DONE, status); |
+ *number = *number + 1; |
+} |
+ |
} // namespace |
class BlobStorageContextTest : public testing::Test { |
@@ -112,7 +118,16 @@ class BlobStorageContextTest : public testing::Test { |
BlobStorageContextTest() {} |
~BlobStorageContextTest() override {} |
- void SetUp() override { context_ = base::MakeUnique<BlobStorageContext>(); } |
+ void SetUp() override { |
+ ASSERT_TRUE(temp_dir_.CreateUniqueTempDir()); |
+ context_ = base::MakeUnique<BlobStorageContext>(); |
+ } |
+ |
+ void TearDown() override { |
+ base::RunLoop().RunUntilIdle(); |
+ file_runner_->RunPendingTasks(); |
+ ASSERT_TRUE(temp_dir_.Delete()); |
+ } |
std::unique_ptr<BlobDataHandle> SetupBasicBlob(const std::string& id) { |
BlobDataBuilder builder(id); |
@@ -141,6 +156,8 @@ class BlobStorageContextTest : public testing::Test { |
} |
std::vector<FileCreationInfo> files_; |
+ base::ScopedTempDir temp_dir_; |
+ scoped_refptr<TestSimpleTaskRunner> file_runner_ = new TestSimpleTaskRunner(); |
base::MessageLoop fake_io_message_loop_; |
std::unique_ptr<BlobStorageContext> context_; |
@@ -655,6 +672,150 @@ TEST_F(BlobStorageContextTest, TestUnknownBrokenAndBuildingBlobReference) { |
EXPECT_FALSE(context_->registry().HasEntry(kReferencingId)); |
} |
-// TODO(michaeln): tests for the depcrecated url stuff |
+TEST_F(BlobStorageContextTest, BuildBlobCombinations) { |
+ const std::string kTestBlobData = "Test Blob Data"; |
+ const std::string kId("id"); |
+ const size_t kTotalRawBlobs = 200; |
+ const size_t kTotalSlicedBlobs = 100; |
+ scoped_refptr<BlobDataBuilder::DataHandle> disk_cache_data_handle = |
+ new EmptyDataHandle(); |
+ |
+ context_ = |
+ base::MakeUnique<BlobStorageContext>(temp_dir_.GetPath(), file_runner_); |
+ |
+ SetTestMemoryLimits(); |
+ std::unique_ptr<disk_cache::Backend> cache = CreateInMemoryDiskCache(); |
+ ASSERT_TRUE(cache); |
+ disk_cache::ScopedEntryPtr entry = |
+ CreateDiskCacheEntry(cache.get(), "test entry", kTestBlobData); |
+ |
+ // This tests mixed blob content with both synchronous and asynchronous |
+ // construction. Blobs should also be paged to disk during execution. |
+ std::vector<std::unique_ptr<BlobDataBuilder>> builders; |
+ std::vector<size_t> sizes; |
+ for (size_t i = 0; i < kTotalRawBlobs; i++) { |
+ builders.emplace_back(new BlobDataBuilder(base::SizeTToString(i))); |
+ auto& builder = *builders.back(); |
+ size_t size = 0; |
+ if (i % 2 != 0) { |
+ builder.AppendFutureData(5u); |
+ size += 5u; |
+ if (i % 3 == 1) { |
+ builder.AppendData("abcdefghij", 4u); |
+ size += 4u; |
+ } |
+ if (i % 3 == 0) { |
+ builder.AppendFutureData(1u); |
+ size += 1u; |
+ } |
kinuko
2016/12/01 05:10:04
Could we avoid using nested if for deciding test p
dmurph
2016/12/01 20:41:00
Done.
|
+ } else if (i % 3 == 0) { |
+ builder.AppendFutureFile(0lu, 3lu, 0); |
+ size += 3u; |
+ } |
+ if (i % 5 != 0) { |
+ builder.AppendFile(base::FilePath::FromUTF8Unsafe(base::SizeTToString(i)), |
+ 0ul, 20ul, base::Time::Max()); |
+ size += 20u; |
+ } |
+ builder.AppendDiskCacheEntry(disk_cache_data_handle, entry.get(), |
+ kTestDiskCacheStreamIndex); |
+ size += 14; |
kinuko
2016/12/01 05:10:04
14u for consistency
or use strlen(kTestBlobData)
dmurph
2016/12/01 20:41:00
Done.
|
+ EXPECT_NE(0u, size); |
+ sizes.push_back(size); |
+ } |
+ |
+ for (size_t i = 0; i < kTotalSlicedBlobs; i++) { |
+ builders.emplace_back( |
+ new BlobDataBuilder(base::SizeTToString(i + kTotalRawBlobs))); |
+ size_t source_size = sizes[i]; |
+ size_t offset = sizes[i] == 1 ? 0 : i % (source_size - 1); |
kinuko
2016/12/01 05:10:04
When could sizes[i] could be 1?
Could we consiste
dmurph
2016/12/01 20:41:00
Done.
|
+ size_t size = (i % (source_size - offset)) + 1; |
+ builders.back()->AppendBlob(base::SizeTToString(i), offset, size); |
+ sizes.push_back(size); |
kinuko
2016/12/01 05:10:04
When are these sizes used after this line?
dmurph
2016/12/01 20:41:00
They aren't removed.
|
+ } |
+ |
+ size_t total_finished_blobs = 0; |
+ std::vector<std::unique_ptr<BlobDataHandle>> handles; |
+ std::vector<BlobStatus> statuses; |
+ std::vector<bool> populated; |
+ statuses.resize(kTotalRawBlobs, |
+ BlobStatus::ERR_INVALID_CONSTRUCTION_ARGUMENTS); |
+ populated.resize(kTotalRawBlobs, false); |
+ for (size_t i = 0; i < builders.size(); i++) { |
+ BlobDataBuilder& builder = *builders[i]; |
+ builder.set_content_type("text/plain"); |
+ bool has_pending_memory = i < kTotalRawBlobs && (i % 2 != 0 || i % 3 == 0); |
+ std::unique_ptr<BlobDataHandle> handle = context_->BuildBlob( |
+ builder, |
+ has_pending_memory |
+ ? base::Bind(&SaveBlobStatusAndFiles, &statuses[0] + i, &files_) |
+ : BlobStorageContext::TransportAllowedCallback()); |
+ handle->RunOnConstructionComplete( |
+ base::Bind(&IncrementPointer, &total_finished_blobs)); |
+ handles.push_back(std::move(handle)); |
+ } |
+ base::RunLoop().RunUntilIdle(); |
+ |
+ // We should be needing to send a page or two to disk. |
+ EXPECT_TRUE(file_runner_->HasPendingTask()); |
+ do { |
+ file_runner_->RunPendingTasks(); |
+ base::RunLoop().RunUntilIdle(); |
+ // Continue populating data for items that can fit. |
+ for (size_t i = 0; i < kTotalRawBlobs; i++) { |
+ auto& builder = *builders[i]; |
+ bool has_pending_memory = (i % 2 != 0 || i % 3 == 0); |
+ if (has_pending_memory && !populated[i] && |
+ statuses[i] == BlobStatus::PENDING_TRANSPORT) { |
+ if (i % 2 != 0) { |
+ builder.PopulateFutureData(0, "abcde", 0, 5); |
+ if (i % 3 == 0) { |
+ builder.PopulateFutureData(1, "z", 0, 1); |
+ } |
+ } else if (i % 3 == 0) { |
+ scoped_refptr<ShareableFileReference> file_ref = |
+ ShareableFileReference::GetOrCreate( |
+ base::FilePath::FromUTF8Unsafe( |
+ base::SizeTToString(i + kTotalRawBlobs)), |
+ ShareableFileReference::DONT_DELETE_ON_FINAL_RELEASE, |
+ file_runner_.get()); |
+ builder.PopulateFutureFile(0, file_ref, base::Time::Max()); |
+ } |
+ context_->NotifyTransportComplete(base::SizeTToString(i)); |
+ populated[i] = true; |
+ } |
+ } |
+ base::RunLoop().RunUntilIdle(); |
+ } while (file_runner_->HasPendingTask()); |
+ |
+ for (size_t i = 0; i < populated.size(); i++) { |
+ bool has_pending_memory = (i % 2 != 0 || i % 3 == 0); |
+ if (has_pending_memory) |
+ EXPECT_TRUE(populated[i]) << i; |
+ } |
+ base::RunLoop().RunUntilIdle(); |
+ |
+ // We should be completely built now. |
+ EXPECT_EQ(kTotalRawBlobs + kTotalSlicedBlobs, total_finished_blobs); |
+ for (std::unique_ptr<BlobDataHandle>& handle : handles) { |
+ EXPECT_EQ(BlobStatus::DONE, handle->GetBlobStatus()); |
+ } |
+ handles.clear(); |
+ base::RunLoop().RunUntilIdle(); |
+ files_.clear(); |
+ // We should have file cleanup tasks. |
+ EXPECT_TRUE(file_runner_->HasPendingTask()); |
+ file_runner_->RunPendingTasks(); |
+ base::RunLoop().RunUntilIdle(); |
+ for (size_t i = 0; i < kTotalRawBlobs; i++) { |
+ bool has_pending_memory = (i % 2 != 0 || i % 3 == 0); |
+ if (has_pending_memory) |
+ EXPECT_EQ(BlobStatus::PENDING_TRANSPORT, statuses[i]) << i; |
+ } |
+ EXPECT_EQ(0lu, context_->memory_controller().memory_usage()); |
+ EXPECT_EQ(0lu, context_->memory_controller().disk_usage()); |
+} |
+ |
+// TODO(michaeln): tests for the deprecated url stuff |
-} // namespace content |
+} // namespace storage |