OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2006, 2008 Apple Inc. All rights reserved. | 2 * Copyright (C) 2006, 2008 Apple Inc. All rights reserved. |
3 * Copyright (C) Research In Motion Limited 2009-2010. All rights reserved. | 3 * Copyright (C) Research In Motion Limited 2009-2010. All rights reserved. |
4 * | 4 * |
5 * Redistribution and use in source and binary forms, with or without | 5 * Redistribution and use in source and binary forms, with or without |
6 * modification, are permitted provided that the following conditions | 6 * modification, are permitted provided that the following conditions |
7 * are met: | 7 * are met: |
8 * 1. Redistributions of source code must retain the above copyright | 8 * 1. Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * 2. Redistributions in binary form must reproduce the above copyright | 10 * 2. Redistributions in binary form must reproduce the above copyright |
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
116 const char* segment; | 116 const char* segment; |
117 size_t position = 0; | 117 size_t position = 0; |
118 while (size_t length = data->getSomeDataInternal(segment, position)) { | 118 while (size_t length = data->getSomeDataInternal(segment, position)) { |
119 append(segment, length); | 119 append(segment, length); |
120 position += length; | 120 position += length; |
121 } | 121 } |
122 } | 122 } |
123 | 123 |
124 void SharedBuffer::appendInternal(const char* data, size_t length) | 124 void SharedBuffer::appendInternal(const char* data, size_t length) |
125 { | 125 { |
126 ASSERT(isLocked()); | |
127 if (!length) | 126 if (!length) |
128 return; | 127 return; |
129 | 128 |
130 ASSERT(m_size >= m_buffer.size()); | 129 ASSERT(m_size >= m_buffer.size()); |
131 size_t positionInSegment = offsetInSegment(m_size - m_buffer.size()); | 130 size_t positionInSegment = offsetInSegment(m_size - m_buffer.size()); |
132 m_size += length; | 131 m_size += length; |
133 | 132 |
134 if (m_size <= kSegmentSize) { | 133 if (m_size <= kSegmentSize) { |
135 // No need to use segments for small resource data. | 134 // No need to use segments for small resource data. |
136 m_buffer.append(data, length); | 135 m_buffer.append(data, length); |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
203 m_buffer.append(m_segments[i], bytesToCopy); | 202 m_buffer.append(m_segments[i], bytesToCopy); |
204 bytesLeft -= bytesToCopy; | 203 bytesLeft -= bytesToCopy; |
205 freeSegment(m_segments[i]); | 204 freeSegment(m_segments[i]); |
206 } | 205 } |
207 m_segments.clear(); | 206 m_segments.clear(); |
208 } | 207 } |
209 } | 208 } |
210 | 209 |
211 size_t SharedBuffer::getSomeDataInternal(const char*& someData, size_t position)
const | 210 size_t SharedBuffer::getSomeDataInternal(const char*& someData, size_t position)
const |
212 { | 211 { |
213 ASSERT(isLocked()); | |
214 size_t totalSize = size(); | 212 size_t totalSize = size(); |
215 if (position >= totalSize) { | 213 if (position >= totalSize) { |
216 someData = 0; | 214 someData = 0; |
217 return 0; | 215 return 0; |
218 } | 216 } |
219 | 217 |
220 ASSERT_WITH_SECURITY_IMPLICATION(position < m_size); | 218 ASSERT_WITH_SECURITY_IMPLICATION(position < m_size); |
221 size_t consecutiveSize = m_buffer.size(); | 219 size_t consecutiveSize = m_buffer.size(); |
222 if (position < consecutiveSize) { | 220 if (position < consecutiveSize) { |
223 someData = m_buffer.data() + position; | 221 someData = m_buffer.data() + position; |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
276 } | 274 } |
277 | 275 |
278 if (position != bufferLength) { | 276 if (position != bufferLength) { |
279 ASSERT_NOT_REACHED(); | 277 ASSERT_NOT_REACHED(); |
280 // Don't return the incomplete SkData. | 278 // Don't return the incomplete SkData. |
281 return nullptr; | 279 return nullptr; |
282 } | 280 } |
283 return data; | 281 return data; |
284 } | 282 } |
285 | 283 |
286 bool SharedBuffer::lock() | |
287 { | |
288 return m_buffer.lock(); | |
289 } | |
290 | |
291 void SharedBuffer::unlock() | |
292 { | |
293 mergeSegmentsIntoBuffer(); | |
294 m_buffer.unlock(); | |
295 } | |
296 | |
297 bool SharedBuffer::isLocked() const | |
298 { | |
299 return m_buffer.isLocked(); | |
300 } | |
301 | |
302 void SharedBuffer::onMemoryDump(const String& dumpPrefix, WebProcessMemoryDump*
memoryDump) const | 284 void SharedBuffer::onMemoryDump(const String& dumpPrefix, WebProcessMemoryDump*
memoryDump) const |
303 { | 285 { |
304 if (m_buffer.size()) { | 286 if (m_buffer.size()) { |
305 m_buffer.onMemoryDump(dumpPrefix + "/shared_buffer", memoryDump); | 287 m_buffer.onMemoryDump(dumpPrefix + "/shared_buffer", memoryDump); |
306 } else { | 288 } else { |
307 // If there is data in the segments, then it should have been allocated | 289 // If there is data in the segments, then it should have been allocated |
308 // using fastMalloc. | 290 // using fastMalloc. |
309 const String dataDumpName = dumpPrefix + "/segments"; | 291 const String dataDumpName = dumpPrefix + "/segments"; |
310 auto dump = memoryDump->createMemoryAllocatorDump(dataDumpName); | 292 auto dump = memoryDump->createMemoryAllocatorDump(dataDumpName); |
311 dump->addScalar("size", "bytes", m_size); | 293 dump->addScalar("size", "bytes", m_size); |
312 memoryDump->addSuballocation(dump->guid(), String(WTF::Partitions::kAllo
catedObjectPoolName)); | 294 memoryDump->addSuballocation(dump->guid(), String(WTF::Partitions::kAllo
catedObjectPoolName)); |
313 } | 295 } |
314 } | 296 } |
315 | 297 |
316 } // namespace blink | 298 } // namespace blink |
OLD | NEW |