Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2006, 2008 Apple Inc. All rights reserved. | 2 * Copyright (C) 2006, 2008 Apple Inc. All rights reserved. |
| 3 * Copyright (C) Research In Motion Limited 2009-2010. All rights reserved. | 3 * Copyright (C) Research In Motion Limited 2009-2010. All rights reserved. |
| 4 * | 4 * |
| 5 * Redistribution and use in source and binary forms, with or without | 5 * Redistribution and use in source and binary forms, with or without |
| 6 * modification, are permitted provided that the following conditions | 6 * modification, are permitted provided that the following conditions |
| 7 * are met: | 7 * are met: |
| 8 * 1. Redistributions of source code must retain the above copyright | 8 * 1. Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * 2. Redistributions in binary form must reproduce the above copyright | 10 * 2. Redistributions in binary form must reproduce the above copyright |
| (...skipping 22 matching lines...) Expand all Loading... | |
| 33 #undef SHARED_BUFFER_STATS | 33 #undef SHARED_BUFFER_STATS |
| 34 | 34 |
| 35 #ifdef SHARED_BUFFER_STATS | 35 #ifdef SHARED_BUFFER_STATS |
| 36 #include "public/platform/Platform.h" | 36 #include "public/platform/Platform.h" |
| 37 #include "public/platform/WebTraceLocation.h" | 37 #include "public/platform/WebTraceLocation.h" |
| 38 #include "wtf/DataLog.h" | 38 #include "wtf/DataLog.h" |
| 39 #endif | 39 #endif |
| 40 | 40 |
| 41 namespace blink { | 41 namespace blink { |
| 42 | 42 |
| 43 static const unsigned segmentSize = 0x1000; | |
| 44 static const unsigned segmentPositionMask = 0x0FFF; | |
| 45 | |
| 46 static inline unsigned segmentIndex(unsigned position) | 43 static inline unsigned segmentIndex(unsigned position) |
| 47 { | 44 { |
| 48 return position / segmentSize; | 45 return position / SharedBuffer::kSegmentSize; |
| 49 } | 46 } |
| 50 | 47 |
| 51 static inline unsigned offsetInSegment(unsigned position) | 48 static inline unsigned offsetInSegment(unsigned position) |
| 52 { | 49 { |
| 50 const unsigned segmentPositionMask = SharedBuffer::kSegmentSize - 1; | |
| 53 return position & segmentPositionMask; | 51 return position & segmentPositionMask; |
|
Peter Kasting
2015/03/25 19:53:11
This will be safer if you do:
return position %
| |
| 54 } | 52 } |
| 55 | 53 |
| 56 static inline char* allocateSegment() | 54 static inline char* allocateSegment() |
| 57 { | 55 { |
| 58 return static_cast<char*>(fastMalloc(segmentSize)); | 56 return static_cast<char*>(fastMalloc(SharedBuffer::kSegmentSize)); |
| 59 } | 57 } |
| 60 | 58 |
| 61 static inline void freeSegment(char* p) | 59 static inline void freeSegment(char* p) |
| 62 { | 60 { |
| 63 fastFree(p); | 61 fastFree(p); |
| 64 } | 62 } |
| 65 | 63 |
| 66 #ifdef SHARED_BUFFER_STATS | 64 #ifdef SHARED_BUFFER_STATS |
| 67 | 65 |
| 68 static Mutex& statsMutex() | 66 static Mutex& statsMutex() |
| (...skipping 171 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 240 void SharedBuffer::append(const char* data, unsigned length) | 238 void SharedBuffer::append(const char* data, unsigned length) |
| 241 { | 239 { |
| 242 ASSERT(isLocked()); | 240 ASSERT(isLocked()); |
| 243 if (!length) | 241 if (!length) |
| 244 return; | 242 return; |
| 245 | 243 |
| 246 ASSERT(m_size >= m_buffer.size()); | 244 ASSERT(m_size >= m_buffer.size()); |
| 247 unsigned positionInSegment = offsetInSegment(m_size - m_buffer.size()); | 245 unsigned positionInSegment = offsetInSegment(m_size - m_buffer.size()); |
| 248 m_size += length; | 246 m_size += length; |
| 249 | 247 |
| 250 if (m_size <= segmentSize) { | 248 if (m_size <= kSegmentSize) { |
| 251 // No need to use segments for small resource data. | 249 // No need to use segments for small resource data. |
| 252 m_buffer.append(data, length); | 250 m_buffer.append(data, length); |
| 253 return; | 251 return; |
| 254 } | 252 } |
| 255 | 253 |
| 256 char* segment; | 254 char* segment; |
| 257 if (!positionInSegment) { | 255 if (!positionInSegment) { |
| 258 segment = allocateSegment(); | 256 segment = allocateSegment(); |
| 259 m_segments.append(segment); | 257 m_segments.append(segment); |
| 260 } else | 258 } else |
| 261 segment = m_segments.last() + positionInSegment; | 259 segment = m_segments.last() + positionInSegment; |
| 262 | 260 |
| 263 unsigned segmentFreeSpace = segmentSize - positionInSegment; | 261 unsigned segmentFreeSpace = kSegmentSize - positionInSegment; |
| 264 unsigned bytesToCopy = std::min(length, segmentFreeSpace); | 262 unsigned bytesToCopy = std::min(length, segmentFreeSpace); |
| 265 | 263 |
| 266 for (;;) { | 264 for (;;) { |
| 267 memcpy(segment, data, bytesToCopy); | 265 memcpy(segment, data, bytesToCopy); |
| 268 if (static_cast<unsigned>(length) == bytesToCopy) | 266 if (static_cast<unsigned>(length) == bytesToCopy) |
| 269 break; | 267 break; |
| 270 | 268 |
| 271 length -= bytesToCopy; | 269 length -= bytesToCopy; |
| 272 data += bytesToCopy; | 270 data += bytesToCopy; |
| 273 segment = allocateSegment(); | 271 segment = allocateSegment(); |
| 274 m_segments.append(segment); | 272 m_segments.append(segment); |
| 275 bytesToCopy = std::min(length, segmentSize); | 273 bytesToCopy = std::min(length, kSegmentSize); |
| 276 } | 274 } |
| 277 } | 275 } |
| 278 | 276 |
| 279 void SharedBuffer::append(const Vector<char>& data) | 277 void SharedBuffer::append(const Vector<char>& data) |
| 280 { | 278 { |
| 281 append(data.data(), data.size()); | 279 append(data.data(), data.size()); |
| 282 } | 280 } |
| 283 | 281 |
| 284 void SharedBuffer::clear() | 282 void SharedBuffer::clear() |
| 285 { | 283 { |
| (...skipping 22 matching lines...) Expand all Loading... | |
| 308 } | 306 } |
| 309 return clone.release(); | 307 return clone.release(); |
| 310 } | 308 } |
| 311 | 309 |
| 312 void SharedBuffer::mergeSegmentsIntoBuffer() const | 310 void SharedBuffer::mergeSegmentsIntoBuffer() const |
| 313 { | 311 { |
| 314 unsigned bufferSize = m_buffer.size(); | 312 unsigned bufferSize = m_buffer.size(); |
| 315 if (m_size > bufferSize) { | 313 if (m_size > bufferSize) { |
| 316 unsigned bytesLeft = m_size - bufferSize; | 314 unsigned bytesLeft = m_size - bufferSize; |
| 317 for (unsigned i = 0; i < m_segments.size(); ++i) { | 315 for (unsigned i = 0; i < m_segments.size(); ++i) { |
| 318 unsigned bytesToCopy = std::min(bytesLeft, segmentSize); | 316 unsigned bytesToCopy = std::min(bytesLeft, kSegmentSize); |
| 319 m_buffer.append(m_segments[i], bytesToCopy); | 317 m_buffer.append(m_segments[i], bytesToCopy); |
| 320 bytesLeft -= bytesToCopy; | 318 bytesLeft -= bytesToCopy; |
| 321 freeSegment(m_segments[i]); | 319 freeSegment(m_segments[i]); |
| 322 } | 320 } |
| 323 m_segments.clear(); | 321 m_segments.clear(); |
| 324 } | 322 } |
| 325 } | 323 } |
| 326 | 324 |
| 327 unsigned SharedBuffer::getSomeData(const char*& someData, unsigned position) con st | 325 unsigned SharedBuffer::getSomeData(const char*& someData, unsigned position) con st |
| 328 { | 326 { |
| 329 ASSERT(isLocked()); | 327 ASSERT(isLocked()); |
| 330 unsigned totalSize = size(); | 328 unsigned totalSize = size(); |
| 331 if (position >= totalSize) { | 329 if (position >= totalSize) { |
| 332 someData = 0; | 330 someData = 0; |
| 333 return 0; | 331 return 0; |
| 334 } | 332 } |
| 335 | 333 |
| 336 ASSERT_WITH_SECURITY_IMPLICATION(position < m_size); | 334 ASSERT_WITH_SECURITY_IMPLICATION(position < m_size); |
| 337 unsigned consecutiveSize = m_buffer.size(); | 335 unsigned consecutiveSize = m_buffer.size(); |
| 338 if (position < consecutiveSize) { | 336 if (position < consecutiveSize) { |
| 339 someData = m_buffer.data() + position; | 337 someData = m_buffer.data() + position; |
| 340 return consecutiveSize - position; | 338 return consecutiveSize - position; |
| 341 } | 339 } |
| 342 | 340 |
| 343 position -= consecutiveSize; | 341 position -= consecutiveSize; |
| 344 unsigned segments = m_segments.size(); | 342 unsigned segments = m_segments.size(); |
| 345 unsigned maxSegmentedSize = segments * segmentSize; | 343 unsigned maxSegmentedSize = segments * kSegmentSize; |
| 346 unsigned segment = segmentIndex(position); | 344 unsigned segment = segmentIndex(position); |
| 347 if (segment < segments) { | 345 if (segment < segments) { |
| 348 unsigned bytesLeft = totalSize - consecutiveSize; | 346 unsigned bytesLeft = totalSize - consecutiveSize; |
| 349 unsigned segmentedSize = std::min(maxSegmentedSize, bytesLeft); | 347 unsigned segmentedSize = std::min(maxSegmentedSize, bytesLeft); |
| 350 | 348 |
| 351 unsigned positionInSegment = offsetInSegment(position); | 349 unsigned positionInSegment = offsetInSegment(position); |
| 352 someData = m_segments[segment] + positionInSegment; | 350 someData = m_segments[segment] + positionInSegment; |
| 353 return segment == segments - 1 ? segmentedSize - position : segmentSize - positionInSegment; | 351 return segment == segments - 1 ? segmentedSize - position : kSegmentSize - positionInSegment; |
| 354 } | 352 } |
| 355 ASSERT_NOT_REACHED(); | 353 ASSERT_NOT_REACHED(); |
| 356 return 0; | 354 return 0; |
| 357 } | 355 } |
| 358 | 356 |
| 359 bool SharedBuffer::getAsBytes(void* dest, unsigned byteLength) const | 357 bool SharedBuffer::getAsBytes(void* dest, unsigned byteLength) const |
| 360 { | 358 { |
| 361 if (!dest || byteLength != size()) | 359 if (!dest || byteLength != size()) |
| 362 return false; | 360 return false; |
| 363 | 361 |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 407 mergeSegmentsIntoBuffer(); | 405 mergeSegmentsIntoBuffer(); |
| 408 m_buffer.unlock(); | 406 m_buffer.unlock(); |
| 409 } | 407 } |
| 410 | 408 |
| 411 bool SharedBuffer::isLocked() const | 409 bool SharedBuffer::isLocked() const |
| 412 { | 410 { |
| 413 return m_buffer.isLocked(); | 411 return m_buffer.isLocked(); |
| 414 } | 412 } |
| 415 | 413 |
| 416 } // namespace blink | 414 } // namespace blink |
| OLD | NEW |