Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(255)

Side by Side Diff: third_party/WebKit/Source/platform/SharedBuffer.cpp

Issue 2918443003: Remove redundant reading and writing of data about SharedBuffer.
Patch Set: benchmark Created 3 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2006, 2008 Apple Inc. All rights reserved. 2 * Copyright (C) 2006, 2008 Apple Inc. All rights reserved.
3 * Copyright (C) Research In Motion Limited 2009-2010. All rights reserved. 3 * Copyright (C) Research In Motion Limited 2009-2010. All rights reserved.
4 * 4 *
5 * Redistribution and use in source and binary forms, with or without 5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions 6 * modification, are permitted provided that the following conditions
7 * are met: 7 * are met:
8 * 1. Redistributions of source code must retain the above copyright 8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright 10 * 2. Redistributions in binary form must reproduce the above copyright
(...skipping 14 matching lines...) Expand all
25 */ 25 */
26 26
27 #include "platform/SharedBuffer.h" 27 #include "platform/SharedBuffer.h"
28 28
29 #include "platform/instrumentation/tracing/web_process_memory_dump.h" 29 #include "platform/instrumentation/tracing/web_process_memory_dump.h"
30 #include "platform/wtf/text/UTF8.h" 30 #include "platform/wtf/text/UTF8.h"
31 #include "platform/wtf/text/Unicode.h" 31 #include "platform/wtf/text/Unicode.h"
32 32
33 namespace blink { 33 namespace blink {
34 34
35 static inline size_t SegmentIndex(size_t position) { 35 SharedBuffer::SharedBuffer() {
36 return position / SharedBuffer::kSegmentSize; 36 current_step_ = SharedBufferStep::Create();
37 } 37 }
38 38
39 static inline size_t OffsetInSegment(size_t position) { 39 SharedBuffer::SharedBuffer(size_t size) {
40 return position % SharedBuffer::kSegmentSize; 40 current_step_ = SharedBufferStep::Create(size);
41 } 41 }
42 42
43 static inline char* AllocateSegment() { 43 SharedBuffer::SharedBuffer(const char* data, size_t size) {
44 return static_cast<char*>(WTF::Partitions::FastMalloc( 44 current_step_ = SharedBufferStep::Create();
45 SharedBuffer::kSegmentSize, "blink::SharedBuffer"));
46 }
47
48 static inline void FreeSegment(char* p) {
49 WTF::Partitions::FastFree(p);
50 }
51
52 SharedBuffer::SharedBuffer() : size_(0) {}
53
54 SharedBuffer::SharedBuffer(size_t size) : size_(size), buffer_(size) {}
55
56 SharedBuffer::SharedBuffer(const char* data, size_t size) : size_(0) {
57 AppendInternal(data, size); 45 AppendInternal(data, size);
58 } 46 }
59 47
60 SharedBuffer::SharedBuffer(const unsigned char* data, size_t size) : size_(0) { 48 SharedBuffer::SharedBuffer(const unsigned char* data, size_t size) {
49 current_step_ = SharedBufferStep::Create();
61 AppendInternal(reinterpret_cast<const char*>(data), size); 50 AppendInternal(reinterpret_cast<const char*>(data), size);
62 } 51 }
63 52
64 SharedBuffer::~SharedBuffer() { 53 SharedBuffer::~SharedBuffer() {
65 Clear(); 54 Clear();
66 } 55 }
67 56
68 PassRefPtr<SharedBuffer> SharedBuffer::AdoptVector(Vector<char>& vector) { 57 PassRefPtr<SharedBuffer> SharedBuffer::AdoptVector(Vector<char>& vector) {
69 RefPtr<SharedBuffer> buffer = Create(); 58 RefPtr<SharedBuffer> buffer = Create();
70 buffer->buffer_.swap(vector); 59 buffer->current_step_ = SharedBufferStep::AdoptVector(vector);
71 buffer->size_ = buffer->buffer_.size();
72 return buffer.Release(); 60 return buffer.Release();
73 } 61 }
74 62
63 SharedBuffer::ThreadSafeStepper::Steps::Steps() {
64 for (size_t index = 0; index < kStepArraySize; ++index) {
65 step_holder_arr[index].self_keep = false;
66 step_holder_arr[index].keep_ref = false;
67 step_holder_arr[index].keep_count = 0;
68 }
69 next = nullptr;
70 }
71
72 SharedBuffer::ThreadSafeStepper::ThreadSafeStepper(
73 PassRefPtr<SharedBufferStep> step) {
74 DCHECK(step);
75 steps_head_ = steps_tail_ = new Steps();
76 Steps::StepHolder* step_holder_to_write = steps_head_->step_holder_arr;
77 step_holder_to_write->step = std::move(step);
78 step_holder_to_write->self_keep = true;
79 step_holder_to_write->keep_ref = true;
80 std::atomic_store_explicit(&step_holder_to_write->keep_count,
81 static_cast<size_t>(1), std::memory_order_relaxed);
82 steps_to_write_ = steps_head_;
83 to_write_index_ = 1;
84 step_holder_to_read_ = step_holder_to_write;
85 }
86
87 SharedBuffer::ThreadSafeStepper::~ThreadSafeStepper() {
88 for (Steps* steps = steps_head_; steps;) {
89 Steps* del_steps = steps;
90 steps = steps->next;
91 delete del_steps;
92 }
93 }
94
95 void SharedBuffer::ThreadSafeStepper::ClearStepHolderSelfKeep(
96 SharedBuffer::ThreadSafeStepper::Steps::StepHolder* step_holder) {
97 if (std::atomic_fetch_sub_explicit(&step_holder->keep_count,
98 static_cast<size_t>(1),
99 std::memory_order_relaxed) == 0) {
100 ClearStepHolder(step_holder);
101 }
102 step_holder->self_keep = false;
103 }
104
105 void SharedBuffer::ThreadSafeStepper::ClearStepHolder(
106 SharedBuffer::ThreadSafeStepper::Steps::StepHolder* step_holder) {
107 DCHECK_EQ(step_holder->keep_ref, true);
108 DCHECK_EQ(step_holder->keep_count, static_cast<size_t>(0));
109 step_holder->step.Clear();
110 step_holder->keep_ref = false;
111 }
112
113 PassRefPtr<SharedBufferStep> SharedBuffer::ThreadSafeStepper::current_step() {
114 while (true) {
115 if (!step_holder_to_read_->self_keep || !step_holder_to_read_->keep_ref) {
116 continue;
117 }
118 size_t keep_count = std::atomic_load_explicit(
119 &step_holder_to_read_->keep_count, std::memory_order_relaxed);
120 if (keep_count == 0) {
121 continue;
122 }
123 if (!std::atomic_compare_exchange_weak_explicit(
124 &step_holder_to_read_->keep_count, &keep_count, keep_count + 1,
125 std::memory_order_relaxed, std::memory_order_relaxed)) {
126 continue;
127 }
128
129 RefPtr<SharedBufferStep> step = step_holder_to_read_->step;
130 if (std::atomic_fetch_sub_explicit(&step_holder_to_read_->keep_count,
131 static_cast<size_t>(1),
132 std::memory_order_relaxed) == 0) {
133 ClearStepHolder(step_holder_to_read_);
134 }
135
136 return step.Release();
137 };
138
139 NOTREACHED();
140 return nullptr;
141 }
142
143 void SharedBuffer::ThreadSafeStepper::set_current_step(
144 PassRefPtr<SharedBufferStep> step) {
145 Steps::StepHolder* step_holder_to_write = nullptr;
146 if (to_write_index_ < Steps::kStepArraySize) {
147 step_holder_to_write = steps_to_write_->step_holder_arr + to_write_index_;
148 } else if (steps_to_write_->next) {
149 steps_to_write_ = steps_to_write_->next;
150 to_write_index_ = 0;
151 step_holder_to_write = steps_to_write_->step_holder_arr;
152 } else {
153 steps_to_write_ = steps_head_;
154 to_write_index_ = 0;
155 step_holder_to_write = steps_to_write_->step_holder_arr;
156 }
157
158 if (!step_holder_to_write || step_holder_to_write->keep_ref) {
159 steps_tail_->next = new Steps();
160 steps_tail_ = steps_tail_->next;
161 steps_to_write_ = steps_tail_;
162 to_write_index_ = 0;
163 step_holder_to_write = steps_to_write_->step_holder_arr;
164 }
165
166 step_holder_to_write->step = std::move(step);
167 step_holder_to_write->self_keep = true;
168 step_holder_to_write->keep_ref = true;
169 std::atomic_store_explicit(&step_holder_to_write->keep_count,
170 static_cast<size_t>(1), std::memory_order_relaxed);
171 to_write_index_ += 1;
172
173 Steps::StepHolder* step_holder_to_clear = step_holder_to_read_;
174 step_holder_to_read_ = step_holder_to_write;
175 ClearStepHolderSelfKeep(step_holder_to_clear);
176 }
177
178 PassRefPtr<SharedBuffer::ThreadSafeStepper> SharedBuffer::thread_safe_stepper()
179 const {
180 if (thread_safe_stepper_) {
181 return thread_safe_stepper_;
182 }
183
184 thread_safe_stepper_ = ThreadSafeStepper::Create(current_step_);
185 return thread_safe_stepper_;
186 }
187
188 void SharedBuffer::SynchronizeStepWithThreadStepper() const {
189 if (!thread_safe_stepper_) {
190 return;
191 }
192
193 if (current_step_ != thread_safe_stepper_->current_step()) {
194 thread_safe_stepper_->set_current_step(current_step_);
195 }
196 }
197
75 size_t SharedBuffer::size() const { 198 size_t SharedBuffer::size() const {
76 return size_; 199 return current_step_->size();
77 } 200 }
78 201
79 const char* SharedBuffer::Data() const { 202 const char* SharedBuffer::Data() const {
80 MergeSegmentsIntoBuffer(); 203 if (forward_step_if_needed()) {
81 return buffer_.data(); 204 current_step_ = current_step_->MergeSegmentsIntoBuffer(true);
205 SynchronizeStepWithThreadStepper();
206 } else {
207 current_step_->MergeSegmentsIntoBuffer(false);
208 }
209 return current_step_->buffer()->data();
82 } 210 }
83 211
84 void SharedBuffer::Append(PassRefPtr<SharedBuffer> data) { 212 void SharedBuffer::Append(PassRefPtr<SharedBuffer> data) {
85 const char* segment; 213 const char* segment;
86 size_t position = 0; 214 size_t position = 0;
87 while (size_t length = data->GetSomeDataInternal(segment, position)) { 215 while (size_t length = data->GetSomeDataInternal(segment, position)) {
88 Append(segment, length); 216 Append(segment, length);
89 position += length; 217 position += length;
90 } 218 }
91 } 219 }
92 220
93 void SharedBuffer::AppendInternal(const char* data, size_t length) { 221 void SharedBuffer::AppendInternal(const char* data, size_t length) {
94 if (!length) 222 if (forward_step_if_needed()) {
95 return; 223 current_step_ = current_step_->Append(data, length, true);
96 224 SynchronizeStepWithThreadStepper();
97 DCHECK_GE(size_, buffer_.size()); 225 } else {
98 size_t position_in_segment = OffsetInSegment(size_ - buffer_.size()); 226 current_step_->Append(data, length, false);
99 size_ += length;
100
101 if (size_ <= kSegmentSize) {
102 // No need to use segments for small resource data.
103 buffer_.Append(data, length);
104 return;
105 }
106
107 char* segment;
108 if (!position_in_segment) {
109 segment = AllocateSegment();
110 segments_.push_back(segment);
111 } else
112 segment = segments_.back() + position_in_segment;
113
114 size_t segment_free_space = kSegmentSize - position_in_segment;
115 size_t bytes_to_copy = std::min(length, segment_free_space);
116
117 for (;;) {
118 memcpy(segment, data, bytes_to_copy);
119 if (length == bytes_to_copy)
120 break;
121
122 length -= bytes_to_copy;
123 data += bytes_to_copy;
124 segment = AllocateSegment();
125 segments_.push_back(segment);
126 bytes_to_copy = std::min(length, static_cast<size_t>(kSegmentSize));
127 } 227 }
128 } 228 }
129 229
130 void SharedBuffer::Append(const Vector<char>& data) { 230 void SharedBuffer::Append(const Vector<char>& data) {
131 Append(data.data(), data.size()); 231 Append(data.data(), data.size());
132 } 232 }
133 233
134 void SharedBuffer::Clear() { 234 void SharedBuffer::Clear() {
135 for (size_t i = 0; i < segments_.size(); ++i) 235 if (forward_step_if_needed()) {
136 FreeSegment(segments_[i]); 236 current_step_ = current_step_->Clear(true);
137 237 thread_safe_stepper_.Clear();
138 segments_.clear(); 238 } else {
139 size_ = 0; 239 current_step_->Clear(false);
140 buffer_.clear(); 240 }
141 } 241 }
142 242
143 Vector<char> SharedBuffer::Copy() const { 243 Vector<char> SharedBuffer::Copy() const {
144 Vector<char> buffer; 244 Vector<char> buffer;
145 buffer.ReserveInitialCapacity(size_); 245 buffer.ReserveInitialCapacity(current_step_->size());
146 246
147 ForEachSegment([&buffer](const char* segment, size_t segment_size, 247 ForEachSegment([&buffer](const char* segment, size_t segment_size,
148 size_t segment_offset) { 248 size_t segment_offset) {
149 buffer.Append(segment, segment_size); 249 buffer.Append(segment, segment_size);
150 }); 250 });
151 251
152 DCHECK_EQ(buffer.size(), size_); 252 DCHECK_EQ(buffer.size(), current_step_->size());
153 return buffer; 253 return buffer;
154 } 254 }
155 255
156 void SharedBuffer::MergeSegmentsIntoBuffer() const {
157 size_t buffer_size = buffer_.size();
158 if (size_ > buffer_size) {
159 size_t bytes_left = size_ - buffer_size;
160 for (size_t i = 0; i < segments_.size(); ++i) {
161 size_t bytes_to_copy =
162 std::min(bytes_left, static_cast<size_t>(kSegmentSize));
163 buffer_.Append(segments_[i], bytes_to_copy);
164 bytes_left -= bytes_to_copy;
165 FreeSegment(segments_[i]);
166 }
167 segments_.clear();
168 }
169 }
170
171 size_t SharedBuffer::GetSomeDataInternal(const char*& some_data, 256 size_t SharedBuffer::GetSomeDataInternal(const char*& some_data,
172 size_t position) const { 257 size_t position) const {
173 size_t total_size = size(); 258 return current_step_->GetSomeData(some_data, position);
174 if (position >= total_size) {
175 some_data = 0;
176 return 0;
177 }
178
179 SECURITY_DCHECK(position < size_);
180 size_t consecutive_size = buffer_.size();
181 if (position < consecutive_size) {
182 some_data = buffer_.data() + position;
183 return consecutive_size - position;
184 }
185
186 position -= consecutive_size;
187 size_t segments = segments_.size();
188 size_t max_segmented_size = segments * kSegmentSize;
189 size_t segment = SegmentIndex(position);
190 if (segment < segments) {
191 size_t bytes_left = total_size - consecutive_size;
192 size_t segmented_size = std::min(max_segmented_size, bytes_left);
193
194 size_t position_in_segment = OffsetInSegment(position);
195 some_data = segments_[segment] + position_in_segment;
196 return segment == segments - 1 ? segmented_size - position
197 : kSegmentSize - position_in_segment;
198 }
199 NOTREACHED();
200 return 0;
201 } 259 }
202 260
203 bool SharedBuffer::GetBytesInternal(void* dest, size_t byte_length) const { 261 bool SharedBuffer::GetBytesInternal(void* dest, size_t byte_length) const {
204 if (!dest) 262 return current_step_->GetBytes(dest, byte_length);
205 return false;
206
207 const char* segment = nullptr;
208 size_t load_position = 0;
209 size_t write_position = 0;
210 while (byte_length > 0) {
211 size_t load_size = GetSomeDataInternal(segment, load_position);
212 if (load_size == 0)
213 break;
214
215 if (byte_length < load_size)
216 load_size = byte_length;
217 memcpy(static_cast<char*>(dest) + write_position, segment, load_size);
218 load_position += load_size;
219 write_position += load_size;
220 byte_length -= load_size;
221 }
222
223 return byte_length == 0;
224 } 263 }
225 264
226 sk_sp<SkData> SharedBuffer::GetAsSkData() const { 265 sk_sp<SkData> SharedBuffer::GetAsSkData() const {
227 size_t buffer_length = size(); 266 return current_step_->GetAsSkData();
228 sk_sp<SkData> data = SkData::MakeUninitialized(buffer_length);
229 char* buffer = static_cast<char*>(data->writable_data());
230 const char* segment = 0;
231 size_t position = 0;
232 while (size_t segment_size = GetSomeDataInternal(segment, position)) {
233 memcpy(buffer + position, segment, segment_size);
234 position += segment_size;
235 }
236
237 if (position != buffer_length) {
238 NOTREACHED();
239 // Don't return the incomplete SkData.
240 return nullptr;
241 }
242 return data;
243 } 267 }
244 268
245 void SharedBuffer::OnMemoryDump(const String& dump_prefix, 269 void SharedBuffer::OnMemoryDump(const String& dump_prefix,
246 WebProcessMemoryDump* memory_dump) const { 270 WebProcessMemoryDump* memory_dump) const {
247 if (buffer_.size()) { 271 if (current_step_->buffer()->size()) {
248 WebMemoryAllocatorDump* dump = 272 WebMemoryAllocatorDump* dump =
249 memory_dump->CreateMemoryAllocatorDump(dump_prefix + "/shared_buffer"); 273 memory_dump->CreateMemoryAllocatorDump(dump_prefix + "/shared_buffer");
250 dump->AddScalar("size", "bytes", buffer_.size()); 274 dump->AddScalar("size", "bytes", current_step_->buffer()->size());
251 memory_dump->AddSuballocation( 275 memory_dump->AddSuballocation(
252 dump->Guid(), String(WTF::Partitions::kAllocatedObjectPoolName)); 276 dump->Guid(), String(WTF::Partitions::kAllocatedObjectPoolName));
253 } else { 277 } else {
254 // If there is data in the segments, then it should have been allocated 278 // If there is data in the segments, then it should have been allocated
255 // using fastMalloc. 279 // using fastMalloc.
256 const String data_dump_name = dump_prefix + "/segments"; 280 const String data_dump_name = dump_prefix + "/segments";
257 auto dump = memory_dump->CreateMemoryAllocatorDump(data_dump_name); 281 auto dump = memory_dump->CreateMemoryAllocatorDump(data_dump_name);
258 dump->AddScalar("size", "bytes", size_); 282 dump->AddScalar("size", "bytes", size());
259 memory_dump->AddSuballocation( 283 memory_dump->AddSuballocation(
260 dump->Guid(), String(WTF::Partitions::kAllocatedObjectPoolName)); 284 dump->Guid(), String(WTF::Partitions::kAllocatedObjectPoolName));
261 } 285 }
262 } 286 }
263 287
264 } // namespace blink 288 } // namespace blink
OLDNEW
« no previous file with comments | « third_party/WebKit/Source/platform/SharedBuffer.h ('k') | third_party/WebKit/Source/platform/SharedBufferStep.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698