| Index: webkit/blob/blob_storage_controller.cc
|
| ===================================================================
|
| --- webkit/blob/blob_storage_controller.cc (revision 105887)
|
| +++ webkit/blob/blob_storage_controller.cc (working copy)
|
| @@ -29,23 +29,37 @@
|
| return GURL(url.spec().substr(0, hash_pos));
|
| }
|
|
|
| +static const int64 kMaxMemoryUsage = 1024 * 1024 * 1024; // 1G
|
| +
|
| } // namespace
|
|
|
| -BlobStorageController::BlobStorageController() {
|
| +BlobStorageController::BlobStorageController()
|
| + : memory_usage_(0) {
|
| }
|
|
|
| BlobStorageController::~BlobStorageController() {
|
| }
|
|
|
| -void BlobStorageController::RegisterBlobUrl(
|
| - const GURL& url, const BlobData* blob_data) {
|
| +void BlobStorageController::StartBuildingBlob(const GURL& url) {
|
| DCHECK(url.SchemeIs("blob"));
|
| DCHECK(!BlobUrlHasRef(url));
|
| + BlobData* blob_data = new BlobData;
|
| + unfinalized_blob_map_[url.spec()] = blob_data;
|
| + IncrementBlobDataUsage(blob_data);
|
| +}
|
|
|
| - scoped_refptr<BlobData> target_blob_data(new BlobData());
|
| - target_blob_data->set_content_type(blob_data->content_type());
|
| - target_blob_data->set_content_disposition(blob_data->content_disposition());
|
| +void BlobStorageController::AppendBlobDataItem(
|
| + const GURL& url, const BlobData::Item& item) {
|
| + DCHECK(url.SchemeIs("blob"));
|
| + DCHECK(!BlobUrlHasRef(url));
|
| + BlobMap::iterator found = unfinalized_blob_map_.find(url.spec());
|
| + if (found == unfinalized_blob_map_.end())
|
| + return;
|
| + BlobData* target_blob_data = found->second;
|
| + DCHECK(target_blob_data);
|
|
|
| + memory_usage_ -= target_blob_data->GetMemoryUsage();
|
| +
|
| // The blob data is stored in the "canonical" way. That is, it only contains a
|
| // list of Data and File items.
|
| // 1) The Data item is denoted by the raw data and the range.
|
| @@ -54,40 +68,67 @@
|
| // All the Blob items in the passing blob data are resolved and expanded into
|
| // a set of Data and File items.
|
|
|
| - for (std::vector<BlobData::Item>::const_iterator iter =
|
| - blob_data->items().begin();
|
| - iter != blob_data->items().end(); ++iter) {
|
| - switch (iter->type()) {
|
| - case BlobData::TYPE_DATA: {
|
| - // WebBlobData does not allow partial data.
|
| - DCHECK(!(iter->offset()) && iter->length() == iter->data().size());
|
| - target_blob_data->AppendData(iter->data());
|
| - break;
|
| - }
|
| - case BlobData::TYPE_FILE:
|
| - AppendFileItem(target_blob_data,
|
| - iter->file_path(),
|
| - iter->offset(),
|
| - iter->length(),
|
| - iter->expected_modification_time());
|
| - break;
|
| - case BlobData::TYPE_BLOB: {
|
| - BlobData* src_blob_data = GetBlobDataFromUrl(iter->blob_url());
|
| - DCHECK(src_blob_data);
|
| - if (src_blob_data)
|
| - AppendStorageItems(target_blob_data.get(),
|
| - src_blob_data,
|
| - iter->offset(),
|
| - iter->length());
|
| - break;
|
| - }
|
| - }
|
| + switch (item.type) {
|
| + case BlobData::TYPE_DATA:
|
| + // WebBlobData does not allow partial data.
|
| + DCHECK(!(item.offset) && item.length == item.data.size());
|
| + target_blob_data->AppendData(item.data.c_str(), item.data.size());
|
| + break;
|
| + case BlobData::TYPE_DATA_EXTERNAL:
|
| + DCHECK(!item.offset);
|
| + target_blob_data->AppendData(item.data_external, item.length);
|
| + break;
|
| + case BlobData::TYPE_FILE:
|
| + AppendFileItem(target_blob_data,
|
| + item.file_path,
|
| + item.offset,
|
| + item.length,
|
| + item.expected_modification_time);
|
| + break;
|
| + case BlobData::TYPE_BLOB:
|
| + BlobData* src_blob_data = GetBlobDataFromUrl(item.blob_url);
|
| + DCHECK(src_blob_data);
|
| + if (src_blob_data)
|
| + AppendStorageItems(target_blob_data,
|
| + src_blob_data,
|
| + item.offset,
|
| + item.length);
|
| + break;
|
| }
|
|
|
| - blob_map_[url.spec()] = target_blob_data;
|
| + memory_usage_ += target_blob_data->GetMemoryUsage();
|
| +
|
| + // If we're using too much memory, drop this blob.
|
| + // TODO(michaeln): Blob memory storage does not yet spill over to disk,
|
| + // until it does, we'll prevent memory usage over a max amount.
|
| + if (memory_usage_ > kMaxMemoryUsage)
|
| + RemoveBlob(url);
|
| }
|
|
|
| -void BlobStorageController::RegisterBlobUrlFrom(
|
| +void BlobStorageController::FinishBuildingBlob(
|
| + const GURL& url, const std::string& content_type) {
|
| + DCHECK(url.SchemeIs("blob"));
|
| + DCHECK(!BlobUrlHasRef(url));
|
| + BlobMap::iterator found = unfinalized_blob_map_.find(url.spec());
|
| + if (found == unfinalized_blob_map_.end())
|
| + return;
|
| + found->second->set_content_type(content_type);
|
| + blob_map_[url.spec()] = found->second;
|
| + unfinalized_blob_map_.erase(found);
|
| +}
|
| +
|
| +void BlobStorageController::AddFinishedBlob(const GURL& url,
|
| + const BlobData* data) {
|
| + StartBuildingBlob(url);
|
| + for (std::vector<BlobData::Item>::const_iterator iter =
|
| + data->items().begin();
|
| + iter != data->items().end(); ++iter) {
|
| + AppendBlobDataItem(url, *iter);
|
| + }
|
| + FinishBuildingBlob(url, data->content_type());
|
| +}
|
| +
|
| +void BlobStorageController::CloneBlob(
|
| const GURL& url, const GURL& src_url) {
|
| DCHECK(url.SchemeIs("blob"));
|
| DCHECK(!BlobUrlHasRef(url));
|
| @@ -98,12 +139,29 @@
|
| return;
|
|
|
| blob_map_[url.spec()] = blob_data;
|
| + IncrementBlobDataUsage(blob_data);
|
| }
|
|
|
| -void BlobStorageController::UnregisterBlobUrl(const GURL& url) {
|
| - blob_map_.erase(url.spec());
|
| +void BlobStorageController::RemoveBlob(const GURL& url) {
|
| + DCHECK(url.SchemeIs("blob"));
|
| + DCHECK(!BlobUrlHasRef(url));
|
| +
|
| + if (!RemoveFromMapHelper(&unfinalized_blob_map_, url))
|
| + RemoveFromMapHelper(&blob_map_, url);
|
| }
|
|
|
| +bool BlobStorageController::RemoveFromMapHelper(
|
| + BlobMap* map, const GURL& url) {
|
| + BlobMap::iterator found = map->find(url.spec());
|
| + if (found == map->end())
|
| + return false;
|
| + if (DecrementBlobDataUsage(found->second))
|
| + memory_usage_ -= found->second->GetMemoryUsage();
|
| + map->erase(found);
|
| + return true;
|
| +}
|
| +
|
| +
|
| BlobData* BlobStorageController::GetBlobDataFromUrl(const GURL& url) {
|
| BlobMap::iterator found = blob_map_.find(
|
| BlobUrlHasRef(url) ? ClearBlobUrlRef(url).spec() : url.spec());
|
| @@ -123,7 +181,7 @@
|
| }
|
|
|
| // Find the referred blob data.
|
| - webkit_blob::BlobData* blob_data = GetBlobDataFromUrl(iter->blob_url());
|
| + BlobData* blob_data = GetBlobDataFromUrl(iter->blob_url());
|
| DCHECK(blob_data);
|
| if (!blob_data) {
|
| // TODO(jianli): We should probably fail uploading the data
|
| @@ -146,22 +204,22 @@
|
| for (size_t i = blob_data->items().size(); i > 0; --i) {
|
| iter = uploads->insert(iter, net::UploadData::Element());
|
|
|
| - const webkit_blob::BlobData::Item& item = blob_data->items().at(i - 1);
|
| - switch (item.type()) {
|
| - case webkit_blob::BlobData::TYPE_DATA:
|
| + const BlobData::Item& item = blob_data->items().at(i - 1);
|
| + switch (item.type) {
|
| + case BlobData::TYPE_DATA:
|
| // TODO(jianli): Figure out how to avoid copying the data.
|
| iter->SetToBytes(
|
| - &item.data().at(0) + static_cast<int>(item.offset()),
|
| - static_cast<int>(item.length()));
|
| + &item.data.at(0) + static_cast<int>(item.offset),
|
| + static_cast<int>(item.length));
|
| break;
|
| - case webkit_blob::BlobData::TYPE_FILE:
|
| + case BlobData::TYPE_FILE:
|
| // TODO(michaeln): Ensure that any temp files survive till the
|
| // net::URLRequest is done with the upload.
|
| iter->SetToFilePathRange(
|
| - item.file_path(),
|
| - item.offset(),
|
| - item.length(),
|
| - item.expected_modification_time());
|
| + item.file_path,
|
| + item.offset,
|
| + item.length,
|
| + item.expected_modification_time);
|
| break;
|
| default:
|
| NOTREACHED();
|
| @@ -181,27 +239,27 @@
|
| src_blob_data->items().begin();
|
| if (offset) {
|
| for (; iter != src_blob_data->items().end(); ++iter) {
|
| - if (offset >= iter->length())
|
| - offset -= iter->length();
|
| + if (offset >= iter->length)
|
| + offset -= iter->length;
|
| else
|
| break;
|
| }
|
| }
|
|
|
| for (; iter != src_blob_data->items().end() && length > 0; ++iter) {
|
| - uint64 current_length = iter->length() - offset;
|
| + uint64 current_length = iter->length - offset;
|
| uint64 new_length = current_length > length ? length : current_length;
|
| - if (iter->type() == BlobData::TYPE_DATA) {
|
| - target_blob_data->AppendData(iter->data(),
|
| - static_cast<uint32>(iter->offset() + offset),
|
| - static_cast<uint32>(new_length));
|
| + if (iter->type == BlobData::TYPE_DATA) {
|
| + target_blob_data->AppendData(
|
| + iter->data.c_str() + static_cast<size_t>(iter->offset + offset),
|
| + static_cast<uint32>(new_length));
|
| } else {
|
| - DCHECK(iter->type() == BlobData::TYPE_FILE);
|
| + DCHECK(iter->type == BlobData::TYPE_FILE);
|
| AppendFileItem(target_blob_data,
|
| - iter->file_path(),
|
| - iter->offset() + offset,
|
| + iter->file_path,
|
| + iter->offset + offset,
|
| new_length,
|
| - iter->expected_modification_time());
|
| + iter->expected_modification_time);
|
| }
|
| length -= new_length;
|
| offset = 0;
|
| @@ -222,4 +280,17 @@
|
| target_blob_data->AttachDeletableFileReference(deletable_file);
|
| }
|
|
|
| +void BlobStorageController::IncrementBlobDataUsage(BlobData* blob_data) {
|
| + blob_data_usage_count_[blob_data] += 1;
|
| +}
|
| +
|
| +bool BlobStorageController::DecrementBlobDataUsage(BlobData* blob_data) {
|
| + BlobDataUsageMap::iterator found = blob_data_usage_count_.find(blob_data);
|
| + DCHECK(found != blob_data_usage_count_.end());
|
| + if (--(found->second))
|
| + return false; // Still in use
|
| + blob_data_usage_count_.erase(found);
|
| + return true;
|
| +}
|
| +
|
| } // namespace webkit_blob
|
|
|