OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2006, 2008 Apple Inc. All rights reserved. | 2 * Copyright (C) 2006, 2008 Apple Inc. All rights reserved. |
3 * Copyright (C) Research In Motion Limited 2009-2010. All rights reserved. | 3 * Copyright (C) Research In Motion Limited 2009-2010. All rights reserved. |
4 * | 4 * |
5 * Redistribution and use in source and binary forms, with or without | 5 * Redistribution and use in source and binary forms, with or without |
6 * modification, are permitted provided that the following conditions | 6 * modification, are permitted provided that the following conditions |
7 * are met: | 7 * are met: |
8 * 1. Redistributions of source code must retain the above copyright | 8 * 1. Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * 2. Redistributions in binary form must reproduce the above copyright | 10 * 2. Redistributions in binary form must reproduce the above copyright |
(...skipping 22 matching lines...) Expand all Loading... |
33 #undef SHARED_BUFFER_STATS | 33 #undef SHARED_BUFFER_STATS |
34 | 34 |
35 #ifdef SHARED_BUFFER_STATS | 35 #ifdef SHARED_BUFFER_STATS |
36 #include "public/platform/Platform.h" | 36 #include "public/platform/Platform.h" |
37 #include "public/platform/WebTraceLocation.h" | 37 #include "public/platform/WebTraceLocation.h" |
38 #include "wtf/DataLog.h" | 38 #include "wtf/DataLog.h" |
39 #endif | 39 #endif |
40 | 40 |
41 namespace blink { | 41 namespace blink { |
42 | 42 |
43 static const unsigned segmentSize = 0x1000; | 43 STATIC_CONST_MEMBER_DEFINITION const unsigned SharedBuffer::kSegmentSize; |
44 static const unsigned segmentPositionMask = 0x0FFF; | |
45 | 44 |
46 static inline unsigned segmentIndex(unsigned position) | 45 static inline unsigned segmentIndex(unsigned position) |
47 { | 46 { |
48 return position / segmentSize; | 47 return position / SharedBuffer::kSegmentSize; |
49 } | 48 } |
50 | 49 |
51 static inline unsigned offsetInSegment(unsigned position) | 50 static inline unsigned offsetInSegment(unsigned position) |
52 { | 51 { |
53 return position & segmentPositionMask; | 52 return position % SharedBuffer::kSegmentSize; |
54 } | 53 } |
55 | 54 |
56 static inline char* allocateSegment() | 55 static inline char* allocateSegment() |
57 { | 56 { |
58 return static_cast<char*>(fastMalloc(segmentSize)); | 57 return static_cast<char*>(fastMalloc(SharedBuffer::kSegmentSize)); |
59 } | 58 } |
60 | 59 |
61 static inline void freeSegment(char* p) | 60 static inline void freeSegment(char* p) |
62 { | 61 { |
63 fastFree(p); | 62 fastFree(p); |
64 } | 63 } |
65 | 64 |
66 #ifdef SHARED_BUFFER_STATS | 65 #ifdef SHARED_BUFFER_STATS |
67 | 66 |
68 static Mutex& statsMutex() | 67 static Mutex& statsMutex() |
(...skipping 171 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
240 void SharedBuffer::append(const char* data, unsigned length) | 239 void SharedBuffer::append(const char* data, unsigned length) |
241 { | 240 { |
242 ASSERT(isLocked()); | 241 ASSERT(isLocked()); |
243 if (!length) | 242 if (!length) |
244 return; | 243 return; |
245 | 244 |
246 ASSERT(m_size >= m_buffer.size()); | 245 ASSERT(m_size >= m_buffer.size()); |
247 unsigned positionInSegment = offsetInSegment(m_size - m_buffer.size()); | 246 unsigned positionInSegment = offsetInSegment(m_size - m_buffer.size()); |
248 m_size += length; | 247 m_size += length; |
249 | 248 |
250 if (m_size <= segmentSize) { | 249 if (m_size <= kSegmentSize) { |
251 // No need to use segments for small resource data. | 250 // No need to use segments for small resource data. |
252 m_buffer.append(data, length); | 251 m_buffer.append(data, length); |
253 return; | 252 return; |
254 } | 253 } |
255 | 254 |
256 char* segment; | 255 char* segment; |
257 if (!positionInSegment) { | 256 if (!positionInSegment) { |
258 segment = allocateSegment(); | 257 segment = allocateSegment(); |
259 m_segments.append(segment); | 258 m_segments.append(segment); |
260 } else | 259 } else |
261 segment = m_segments.last() + positionInSegment; | 260 segment = m_segments.last() + positionInSegment; |
262 | 261 |
263 unsigned segmentFreeSpace = segmentSize - positionInSegment; | 262 unsigned segmentFreeSpace = kSegmentSize - positionInSegment; |
264 unsigned bytesToCopy = std::min(length, segmentFreeSpace); | 263 unsigned bytesToCopy = std::min(length, segmentFreeSpace); |
265 | 264 |
266 for (;;) { | 265 for (;;) { |
267 memcpy(segment, data, bytesToCopy); | 266 memcpy(segment, data, bytesToCopy); |
268 if (static_cast<unsigned>(length) == bytesToCopy) | 267 if (static_cast<unsigned>(length) == bytesToCopy) |
269 break; | 268 break; |
270 | 269 |
271 length -= bytesToCopy; | 270 length -= bytesToCopy; |
272 data += bytesToCopy; | 271 data += bytesToCopy; |
273 segment = allocateSegment(); | 272 segment = allocateSegment(); |
274 m_segments.append(segment); | 273 m_segments.append(segment); |
275 bytesToCopy = std::min(length, segmentSize); | 274 bytesToCopy = std::min(length, kSegmentSize); |
276 } | 275 } |
277 } | 276 } |
278 | 277 |
279 void SharedBuffer::append(const Vector<char>& data) | 278 void SharedBuffer::append(const Vector<char>& data) |
280 { | 279 { |
281 append(data.data(), data.size()); | 280 append(data.data(), data.size()); |
282 } | 281 } |
283 | 282 |
284 void SharedBuffer::clear() | 283 void SharedBuffer::clear() |
285 { | 284 { |
(...skipping 22 matching lines...) Expand all Loading... |
308 } | 307 } |
309 return clone.release(); | 308 return clone.release(); |
310 } | 309 } |
311 | 310 |
312 void SharedBuffer::mergeSegmentsIntoBuffer() const | 311 void SharedBuffer::mergeSegmentsIntoBuffer() const |
313 { | 312 { |
314 unsigned bufferSize = m_buffer.size(); | 313 unsigned bufferSize = m_buffer.size(); |
315 if (m_size > bufferSize) { | 314 if (m_size > bufferSize) { |
316 unsigned bytesLeft = m_size - bufferSize; | 315 unsigned bytesLeft = m_size - bufferSize; |
317 for (unsigned i = 0; i < m_segments.size(); ++i) { | 316 for (unsigned i = 0; i < m_segments.size(); ++i) { |
318 unsigned bytesToCopy = std::min(bytesLeft, segmentSize); | 317 unsigned bytesToCopy = std::min(bytesLeft, kSegmentSize); |
319 m_buffer.append(m_segments[i], bytesToCopy); | 318 m_buffer.append(m_segments[i], bytesToCopy); |
320 bytesLeft -= bytesToCopy; | 319 bytesLeft -= bytesToCopy; |
321 freeSegment(m_segments[i]); | 320 freeSegment(m_segments[i]); |
322 } | 321 } |
323 m_segments.clear(); | 322 m_segments.clear(); |
324 } | 323 } |
325 } | 324 } |
326 | 325 |
327 unsigned SharedBuffer::getSomeData(const char*& someData, unsigned position) con
st | 326 unsigned SharedBuffer::getSomeData(const char*& someData, unsigned position) con
st |
328 { | 327 { |
329 ASSERT(isLocked()); | 328 ASSERT(isLocked()); |
330 unsigned totalSize = size(); | 329 unsigned totalSize = size(); |
331 if (position >= totalSize) { | 330 if (position >= totalSize) { |
332 someData = 0; | 331 someData = 0; |
333 return 0; | 332 return 0; |
334 } | 333 } |
335 | 334 |
336 ASSERT_WITH_SECURITY_IMPLICATION(position < m_size); | 335 ASSERT_WITH_SECURITY_IMPLICATION(position < m_size); |
337 unsigned consecutiveSize = m_buffer.size(); | 336 unsigned consecutiveSize = m_buffer.size(); |
338 if (position < consecutiveSize) { | 337 if (position < consecutiveSize) { |
339 someData = m_buffer.data() + position; | 338 someData = m_buffer.data() + position; |
340 return consecutiveSize - position; | 339 return consecutiveSize - position; |
341 } | 340 } |
342 | 341 |
343 position -= consecutiveSize; | 342 position -= consecutiveSize; |
344 unsigned segments = m_segments.size(); | 343 unsigned segments = m_segments.size(); |
345 unsigned maxSegmentedSize = segments * segmentSize; | 344 unsigned maxSegmentedSize = segments * kSegmentSize; |
346 unsigned segment = segmentIndex(position); | 345 unsigned segment = segmentIndex(position); |
347 if (segment < segments) { | 346 if (segment < segments) { |
348 unsigned bytesLeft = totalSize - consecutiveSize; | 347 unsigned bytesLeft = totalSize - consecutiveSize; |
349 unsigned segmentedSize = std::min(maxSegmentedSize, bytesLeft); | 348 unsigned segmentedSize = std::min(maxSegmentedSize, bytesLeft); |
350 | 349 |
351 unsigned positionInSegment = offsetInSegment(position); | 350 unsigned positionInSegment = offsetInSegment(position); |
352 someData = m_segments[segment] + positionInSegment; | 351 someData = m_segments[segment] + positionInSegment; |
353 return segment == segments - 1 ? segmentedSize - position : segmentSize
- positionInSegment; | 352 return segment == segments - 1 ? segmentedSize - position : kSegmentSize
- positionInSegment; |
354 } | 353 } |
355 ASSERT_NOT_REACHED(); | 354 ASSERT_NOT_REACHED(); |
356 return 0; | 355 return 0; |
357 } | 356 } |
358 | 357 |
359 bool SharedBuffer::getAsBytes(void* dest, unsigned byteLength) const | 358 bool SharedBuffer::getAsBytes(void* dest, unsigned byteLength) const |
360 { | 359 { |
361 if (!dest || byteLength != size()) | 360 if (!dest || byteLength != size()) |
362 return false; | 361 return false; |
363 | 362 |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
407 mergeSegmentsIntoBuffer(); | 406 mergeSegmentsIntoBuffer(); |
408 m_buffer.unlock(); | 407 m_buffer.unlock(); |
409 } | 408 } |
410 | 409 |
411 bool SharedBuffer::isLocked() const | 410 bool SharedBuffer::isLocked() const |
412 { | 411 { |
413 return m_buffer.isLocked(); | 412 return m_buffer.isLocked(); |
414 } | 413 } |
415 | 414 |
416 } // namespace blink | 415 } // namespace blink |
OLD | NEW |