| Index: third_party/WebKit/Source/platform/SharedBuffer.cpp
|
| diff --git a/third_party/WebKit/Source/platform/SharedBuffer.cpp b/third_party/WebKit/Source/platform/SharedBuffer.cpp
|
| index add05a7b0e6dc5f8669f456b1237af1db5fb9cce..a696a696f89486ac84913c79c6b5b52234ba4fb7 100644
|
| --- a/third_party/WebKit/Source/platform/SharedBuffer.cpp
|
| +++ b/third_party/WebKit/Source/platform/SharedBuffer.cpp
|
| @@ -32,32 +32,21 @@
|
|
|
| namespace blink {
|
|
|
| -static inline size_t SegmentIndex(size_t position) {
|
| - return position / SharedBuffer::kSegmentSize;
|
| +SharedBuffer::SharedBuffer() {
|
| + current_step_ = SharedBufferStep::Create();
|
| }
|
|
|
| -static inline size_t OffsetInSegment(size_t position) {
|
| - return position % SharedBuffer::kSegmentSize;
|
| +SharedBuffer::SharedBuffer(size_t size) {
|
| + current_step_ = SharedBufferStep::Create(size);
|
| }
|
|
|
| -static inline char* AllocateSegment() {
|
| - return static_cast<char*>(WTF::Partitions::FastMalloc(
|
| - SharedBuffer::kSegmentSize, "blink::SharedBuffer"));
|
| -}
|
| -
|
| -static inline void FreeSegment(char* p) {
|
| - WTF::Partitions::FastFree(p);
|
| -}
|
| -
|
| -SharedBuffer::SharedBuffer() : size_(0) {}
|
| -
|
| -SharedBuffer::SharedBuffer(size_t size) : size_(size), buffer_(size) {}
|
| -
|
| -SharedBuffer::SharedBuffer(const char* data, size_t size) : size_(0) {
|
| +SharedBuffer::SharedBuffer(const char* data, size_t size) {
|
| + current_step_ = SharedBufferStep::Create();
|
| AppendInternal(data, size);
|
| }
|
|
|
| -SharedBuffer::SharedBuffer(const unsigned char* data, size_t size) : size_(0) {
|
| +SharedBuffer::SharedBuffer(const unsigned char* data, size_t size) {
|
| + current_step_ = SharedBufferStep::Create();
|
| AppendInternal(reinterpret_cast<const char*>(data), size);
|
| }
|
|
|
| @@ -67,18 +56,157 @@ SharedBuffer::~SharedBuffer() {
|
|
|
| PassRefPtr<SharedBuffer> SharedBuffer::AdoptVector(Vector<char>& vector) {
|
| RefPtr<SharedBuffer> buffer = Create();
|
| - buffer->buffer_.swap(vector);
|
| - buffer->size_ = buffer->buffer_.size();
|
| + buffer->current_step_ = SharedBufferStep::AdoptVector(vector);
|
| return buffer.Release();
|
| }
|
|
|
| +SharedBuffer::ThreadSafeStepper::Steps::Steps() {
|
| + for (size_t index = 0; index < kStepArraySize; ++index) {
|
| + step_holder_arr[index].self_keep = false;
|
| + step_holder_arr[index].keep_ref = false;
|
| + step_holder_arr[index].keep_count = 0;
|
| + }
|
| + next = nullptr;
|
| +}
|
| +
|
| +SharedBuffer::ThreadSafeStepper::ThreadSafeStepper(
|
| + PassRefPtr<SharedBufferStep> step) {
|
| + DCHECK(step);
|
| + steps_head_ = steps_tail_ = new Steps();
|
| + Steps::StepHolder* step_holder_to_write = steps_head_->step_holder_arr;
|
| + step_holder_to_write->step = std::move(step);
|
| + step_holder_to_write->self_keep = true;
|
| + step_holder_to_write->keep_ref = true;
|
| + std::atomic_store_explicit(&step_holder_to_write->keep_count,
|
| + static_cast<size_t>(1), std::memory_order_relaxed);
|
| + steps_to_write_ = steps_head_;
|
| + to_write_index_ = 1;
|
| + step_holder_to_read_ = step_holder_to_write;
|
| +}
|
| +
|
| +SharedBuffer::ThreadSafeStepper::~ThreadSafeStepper() {
|
| + for (Steps* steps = steps_head_; steps;) {
|
| + Steps* del_steps = steps;
|
| + steps = steps->next;
|
| + delete del_steps;
|
| + }
|
| +}
|
| +
|
| +void SharedBuffer::ThreadSafeStepper::ClearStepHolderSelfKeep(
|
| + SharedBuffer::ThreadSafeStepper::Steps::StepHolder* step_holder) {
|
| + if (std::atomic_fetch_sub_explicit(&step_holder->keep_count,
|
| + static_cast<size_t>(1),
|
| + std::memory_order_relaxed) == 0) {
|
| + ClearStepHolder(step_holder);
|
| + }
|
| + step_holder->self_keep = false;
|
| +}
|
| +
|
| +void SharedBuffer::ThreadSafeStepper::ClearStepHolder(
|
| + SharedBuffer::ThreadSafeStepper::Steps::StepHolder* step_holder) {
|
| + DCHECK_EQ(step_holder->keep_ref, true);
|
| + DCHECK_EQ(step_holder->keep_count, static_cast<size_t>(0));
|
| + step_holder->step.Clear();
|
| + step_holder->keep_ref = false;
|
| +}
|
| +
|
| +PassRefPtr<SharedBufferStep> SharedBuffer::ThreadSafeStepper::current_step() {
|
| + while (true) {
|
| + if (!step_holder_to_read_->self_keep || !step_holder_to_read_->keep_ref) {
|
| + continue;
|
| + }
|
| + size_t keep_count = std::atomic_load_explicit(
|
| + &step_holder_to_read_->keep_count, std::memory_order_relaxed);
|
| + if (keep_count == 0) {
|
| + continue;
|
| + }
|
| + if (!std::atomic_compare_exchange_weak_explicit(
|
| + &step_holder_to_read_->keep_count, &keep_count, keep_count + 1,
|
| + std::memory_order_relaxed, std::memory_order_relaxed)) {
|
| + continue;
|
| + }
|
| +
|
| + RefPtr<SharedBufferStep> step = step_holder_to_read_->step;
|
| + if (std::atomic_fetch_sub_explicit(&step_holder_to_read_->keep_count,
|
| + static_cast<size_t>(1),
|
| + std::memory_order_relaxed) == 0) {
|
| + ClearStepHolder(step_holder_to_read_);
|
| + }
|
| +
|
| + return step.Release();
|
| + };
|
| +
|
| + NOTREACHED();
|
| + return nullptr;
|
| +}
|
| +
|
| +void SharedBuffer::ThreadSafeStepper::set_current_step(
|
| + PassRefPtr<SharedBufferStep> step) {
|
| + Steps::StepHolder* step_holder_to_write = nullptr;
|
| + if (to_write_index_ < Steps::kStepArraySize) {
|
| + step_holder_to_write = steps_to_write_->step_holder_arr + to_write_index_;
|
| + } else if (steps_to_write_->next) {
|
| + steps_to_write_ = steps_to_write_->next;
|
| + to_write_index_ = 0;
|
| + step_holder_to_write = steps_to_write_->step_holder_arr;
|
| + } else {
|
| + steps_to_write_ = steps_head_;
|
| + to_write_index_ = 0;
|
| + step_holder_to_write = steps_to_write_->step_holder_arr;
|
| + }
|
| +
|
| + if (!step_holder_to_write || step_holder_to_write->keep_ref) {
|
| + steps_tail_->next = new Steps();
|
| + steps_tail_ = steps_tail_->next;
|
| + steps_to_write_ = steps_tail_;
|
| + to_write_index_ = 0;
|
| + step_holder_to_write = steps_to_write_->step_holder_arr;
|
| + }
|
| +
|
| + step_holder_to_write->step = std::move(step);
|
| + step_holder_to_write->self_keep = true;
|
| + step_holder_to_write->keep_ref = true;
|
| + std::atomic_store_explicit(&step_holder_to_write->keep_count,
|
| + static_cast<size_t>(1), std::memory_order_relaxed);
|
| + to_write_index_ += 1;
|
| +
|
| + Steps::StepHolder* step_holder_to_clear = step_holder_to_read_;
|
| + step_holder_to_read_ = step_holder_to_write;
|
| + ClearStepHolderSelfKeep(step_holder_to_clear);
|
| +}
|
| +
|
| +PassRefPtr<SharedBuffer::ThreadSafeStepper> SharedBuffer::thread_safe_stepper()
|
| + const {
|
| + if (thread_safe_stepper_) {
|
| + return thread_safe_stepper_;
|
| + }
|
| +
|
| + thread_safe_stepper_ = ThreadSafeStepper::Create(current_step_);
|
| + return thread_safe_stepper_;
|
| +}
|
| +
|
| +void SharedBuffer::SynchronizeStepWithThreadStepper() const {
|
| + if (!thread_safe_stepper_) {
|
| + return;
|
| + }
|
| +
|
| + if (current_step_ != thread_safe_stepper_->current_step()) {
|
| + thread_safe_stepper_->set_current_step(current_step_);
|
| + }
|
| +}
|
| +
|
| size_t SharedBuffer::size() const {
|
| - return size_;
|
| + return current_step_->size();
|
| }
|
|
|
| const char* SharedBuffer::Data() const {
|
| - MergeSegmentsIntoBuffer();
|
| - return buffer_.data();
|
| + if (forward_step_if_needed()) {
|
| + current_step_ = current_step_->MergeSegmentsIntoBuffer(true);
|
| + SynchronizeStepWithThreadStepper();
|
| + } else {
|
| + current_step_->MergeSegmentsIntoBuffer(false);
|
| + }
|
| + return current_step_->buffer()->data();
|
| }
|
|
|
| void SharedBuffer::Append(PassRefPtr<SharedBuffer> data) {
|
| @@ -91,39 +219,11 @@ void SharedBuffer::Append(PassRefPtr<SharedBuffer> data) {
|
| }
|
|
|
| void SharedBuffer::AppendInternal(const char* data, size_t length) {
|
| - if (!length)
|
| - return;
|
| -
|
| - DCHECK_GE(size_, buffer_.size());
|
| - size_t position_in_segment = OffsetInSegment(size_ - buffer_.size());
|
| - size_ += length;
|
| -
|
| - if (size_ <= kSegmentSize) {
|
| - // No need to use segments for small resource data.
|
| - buffer_.Append(data, length);
|
| - return;
|
| - }
|
| -
|
| - char* segment;
|
| - if (!position_in_segment) {
|
| - segment = AllocateSegment();
|
| - segments_.push_back(segment);
|
| - } else
|
| - segment = segments_.back() + position_in_segment;
|
| -
|
| - size_t segment_free_space = kSegmentSize - position_in_segment;
|
| - size_t bytes_to_copy = std::min(length, segment_free_space);
|
| -
|
| - for (;;) {
|
| - memcpy(segment, data, bytes_to_copy);
|
| - if (length == bytes_to_copy)
|
| - break;
|
| -
|
| - length -= bytes_to_copy;
|
| - data += bytes_to_copy;
|
| - segment = AllocateSegment();
|
| - segments_.push_back(segment);
|
| - bytes_to_copy = std::min(length, static_cast<size_t>(kSegmentSize));
|
| + if (forward_step_if_needed()) {
|
| + current_step_ = current_step_->Append(data, length, true);
|
| + SynchronizeStepWithThreadStepper();
|
| + } else {
|
| + current_step_->Append(data, length, false);
|
| }
|
| }
|
|
|
| @@ -132,122 +232,46 @@ void SharedBuffer::Append(const Vector<char>& data) {
|
| }
|
|
|
| void SharedBuffer::Clear() {
|
| - for (size_t i = 0; i < segments_.size(); ++i)
|
| - FreeSegment(segments_[i]);
|
| -
|
| - segments_.clear();
|
| - size_ = 0;
|
| - buffer_.clear();
|
| + if (forward_step_if_needed()) {
|
| + current_step_ = current_step_->Clear(true);
|
| + thread_safe_stepper_.Clear();
|
| + } else {
|
| + current_step_->Clear(false);
|
| + }
|
| }
|
|
|
| Vector<char> SharedBuffer::Copy() const {
|
| Vector<char> buffer;
|
| - buffer.ReserveInitialCapacity(size_);
|
| + buffer.ReserveInitialCapacity(current_step_->size());
|
|
|
| ForEachSegment([&buffer](const char* segment, size_t segment_size,
|
| size_t segment_offset) {
|
| buffer.Append(segment, segment_size);
|
| });
|
|
|
| - DCHECK_EQ(buffer.size(), size_);
|
| + DCHECK_EQ(buffer.size(), current_step_->size());
|
| return buffer;
|
| }
|
|
|
| -void SharedBuffer::MergeSegmentsIntoBuffer() const {
|
| - size_t buffer_size = buffer_.size();
|
| - if (size_ > buffer_size) {
|
| - size_t bytes_left = size_ - buffer_size;
|
| - for (size_t i = 0; i < segments_.size(); ++i) {
|
| - size_t bytes_to_copy =
|
| - std::min(bytes_left, static_cast<size_t>(kSegmentSize));
|
| - buffer_.Append(segments_[i], bytes_to_copy);
|
| - bytes_left -= bytes_to_copy;
|
| - FreeSegment(segments_[i]);
|
| - }
|
| - segments_.clear();
|
| - }
|
| -}
|
| -
|
| size_t SharedBuffer::GetSomeDataInternal(const char*& some_data,
|
| size_t position) const {
|
| - size_t total_size = size();
|
| - if (position >= total_size) {
|
| - some_data = 0;
|
| - return 0;
|
| - }
|
| -
|
| - SECURITY_DCHECK(position < size_);
|
| - size_t consecutive_size = buffer_.size();
|
| - if (position < consecutive_size) {
|
| - some_data = buffer_.data() + position;
|
| - return consecutive_size - position;
|
| - }
|
| -
|
| - position -= consecutive_size;
|
| - size_t segments = segments_.size();
|
| - size_t max_segmented_size = segments * kSegmentSize;
|
| - size_t segment = SegmentIndex(position);
|
| - if (segment < segments) {
|
| - size_t bytes_left = total_size - consecutive_size;
|
| - size_t segmented_size = std::min(max_segmented_size, bytes_left);
|
| -
|
| - size_t position_in_segment = OffsetInSegment(position);
|
| - some_data = segments_[segment] + position_in_segment;
|
| - return segment == segments - 1 ? segmented_size - position
|
| - : kSegmentSize - position_in_segment;
|
| - }
|
| - NOTREACHED();
|
| - return 0;
|
| + return current_step_->GetSomeData(some_data, position);
|
| }
|
|
|
| bool SharedBuffer::GetBytesInternal(void* dest, size_t byte_length) const {
|
| - if (!dest)
|
| - return false;
|
| -
|
| - const char* segment = nullptr;
|
| - size_t load_position = 0;
|
| - size_t write_position = 0;
|
| - while (byte_length > 0) {
|
| - size_t load_size = GetSomeDataInternal(segment, load_position);
|
| - if (load_size == 0)
|
| - break;
|
| -
|
| - if (byte_length < load_size)
|
| - load_size = byte_length;
|
| - memcpy(static_cast<char*>(dest) + write_position, segment, load_size);
|
| - load_position += load_size;
|
| - write_position += load_size;
|
| - byte_length -= load_size;
|
| - }
|
| -
|
| - return byte_length == 0;
|
| + return current_step_->GetBytes(dest, byte_length);
|
| }
|
|
|
| sk_sp<SkData> SharedBuffer::GetAsSkData() const {
|
| - size_t buffer_length = size();
|
| - sk_sp<SkData> data = SkData::MakeUninitialized(buffer_length);
|
| - char* buffer = static_cast<char*>(data->writable_data());
|
| - const char* segment = 0;
|
| - size_t position = 0;
|
| - while (size_t segment_size = GetSomeDataInternal(segment, position)) {
|
| - memcpy(buffer + position, segment, segment_size);
|
| - position += segment_size;
|
| - }
|
| -
|
| - if (position != buffer_length) {
|
| - NOTREACHED();
|
| - // Don't return the incomplete SkData.
|
| - return nullptr;
|
| - }
|
| - return data;
|
| + return current_step_->GetAsSkData();
|
| }
|
|
|
| void SharedBuffer::OnMemoryDump(const String& dump_prefix,
|
| WebProcessMemoryDump* memory_dump) const {
|
| - if (buffer_.size()) {
|
| + if (current_step_->buffer()->size()) {
|
| WebMemoryAllocatorDump* dump =
|
| memory_dump->CreateMemoryAllocatorDump(dump_prefix + "/shared_buffer");
|
| - dump->AddScalar("size", "bytes", buffer_.size());
|
| + dump->AddScalar("size", "bytes", current_step_->buffer()->size());
|
| memory_dump->AddSuballocation(
|
| dump->Guid(), String(WTF::Partitions::kAllocatedObjectPoolName));
|
| } else {
|
| @@ -255,7 +279,7 @@ void SharedBuffer::OnMemoryDump(const String& dump_prefix,
|
| // using fastMalloc.
|
| const String data_dump_name = dump_prefix + "/segments";
|
| auto dump = memory_dump->CreateMemoryAllocatorDump(data_dump_name);
|
| - dump->AddScalar("size", "bytes", size_);
|
| + dump->AddScalar("size", "bytes", size());
|
| memory_dump->AddSuballocation(
|
| dump->Guid(), String(WTF::Partitions::kAllocatedObjectPoolName));
|
| }
|
|
|