Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(246)

Side by Side Diff: third_party/WebKit/Source/platform/heap/HeapPage.cpp

Issue 2531973002: Simple BlinkGC heap compaction. (Closed)
Patch Set: Clear unused pages before decommitting Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 17 matching lines...) Expand all
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */ 29 */
30 30
31 #include "platform/heap/HeapPage.h" 31 #include "platform/heap/HeapPage.h"
32 32
33 #include "base/trace_event/process_memory_dump.h" 33 #include "base/trace_event/process_memory_dump.h"
34 #include "platform/MemoryCoordinator.h" 34 #include "platform/MemoryCoordinator.h"
35 #include "platform/ScriptForbiddenScope.h" 35 #include "platform/ScriptForbiddenScope.h"
36 #include "platform/heap/BlinkGCMemoryDumpProvider.h" 36 #include "platform/heap/BlinkGCMemoryDumpProvider.h"
37 #include "platform/heap/CallbackStack.h" 37 #include "platform/heap/CallbackStack.h"
38 #include "platform/heap/HeapCompact.h"
38 #include "platform/heap/MarkingVisitor.h" 39 #include "platform/heap/MarkingVisitor.h"
39 #include "platform/heap/PageMemory.h" 40 #include "platform/heap/PageMemory.h"
40 #include "platform/heap/PagePool.h" 41 #include "platform/heap/PagePool.h"
41 #include "platform/heap/SafePoint.h" 42 #include "platform/heap/SafePoint.h"
42 #include "platform/heap/ThreadState.h" 43 #include "platform/heap/ThreadState.h"
43 #include "platform/tracing/TraceEvent.h" 44 #include "platform/tracing/TraceEvent.h"
44 #include "platform/tracing/web_memory_allocator_dump.h" 45 #include "platform/tracing/web_memory_allocator_dump.h"
45 #include "platform/tracing/web_process_memory_dump.h" 46 #include "platform/tracing/web_process_memory_dump.h"
46 #include "public/platform/Platform.h" 47 #include "public/platform/Platform.h"
47 #include "wtf/Assertions.h" 48 #include "wtf/Assertions.h"
(...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after
194 ASSERT(!page->hasBeenSwept()); 195 ASSERT(!page->hasBeenSwept());
195 page->invalidateObjectStartBitmap(); 196 page->invalidateObjectStartBitmap();
196 } 197 }
197 if (previousPage) { 198 if (previousPage) {
198 ASSERT(m_firstUnsweptPage); 199 ASSERT(m_firstUnsweptPage);
199 previousPage->m_next = m_firstPage; 200 previousPage->m_next = m_firstPage;
200 m_firstPage = m_firstUnsweptPage; 201 m_firstPage = m_firstUnsweptPage;
201 m_firstUnsweptPage = nullptr; 202 m_firstUnsweptPage = nullptr;
202 } 203 }
203 ASSERT(!m_firstUnsweptPage); 204 ASSERT(!m_firstUnsweptPage);
205
206 HeapCompact* heapCompactor = getThreadState()->heap().compaction();
207 if (!heapCompactor->isCompactingArena(arenaIndex()))
208 return;
209
210 BasePage* nextPage = m_firstPage;
211 while (nextPage) {
212 if (!nextPage->isLargeObjectPage())
213 heapCompactor->addCompactingPage(nextPage);
214 nextPage = nextPage->next();
215 }
204 } 216 }
205 217
206 void BaseArena::makeConsistentForMutator() { 218 void BaseArena::makeConsistentForMutator() {
207 clearFreeLists(); 219 clearFreeLists();
208 ASSERT(isConsistentForGC()); 220 ASSERT(isConsistentForGC());
209 ASSERT(!m_firstPage); 221 ASSERT(!m_firstPage);
210 222
211 // Drop marks from marked objects and rebuild free lists in preparation for 223 // Drop marks from marked objects and rebuild free lists in preparation for
212 // resuming the executions of mutators. 224 // resuming the executions of mutators.
213 BasePage* previousPage = nullptr; 225 BasePage* previousPage = nullptr;
(...skipping 219 matching lines...) Expand 10 before | Expand all | Expand 10 after
433 m_promptlyFreedSize(0), 445 m_promptlyFreedSize(0),
434 m_isLazySweeping(false) { 446 m_isLazySweeping(false) {
435 clearFreeLists(); 447 clearFreeLists();
436 } 448 }
437 449
438 void NormalPageArena::clearFreeLists() { 450 void NormalPageArena::clearFreeLists() {
439 setAllocationPoint(nullptr, 0); 451 setAllocationPoint(nullptr, 0);
440 m_freeList.clear(); 452 m_freeList.clear();
441 } 453 }
442 454
455 size_t NormalPageArena::arenaSize() {
456 size_t size = 0;
457 BasePage* page = m_firstPage;
458 while (page) {
459 size += page->size();
460 page = page->next();
461 }
462 LOG_HEAP_FREELIST_VERBOSE("Heap size: %zu (%d)\n", size, arenaIndex());
463 return size;
464 }
465
466 size_t NormalPageArena::freeListSize() {
467 size_t freeSize = m_freeList.freeListSize();
468 LOG_HEAP_FREELIST_VERBOSE("Free size: %zu (%d)\n", freeSize, arenaIndex());
469 return freeSize;
470 }
471
472 void NormalPageArena::sweepAndCompact() {
473 ThreadHeap& heap = getThreadState()->heap();
474 if (!heap.compaction()->isCompactingArena(arenaIndex()))
475 return;
476
477 if (!m_firstUnsweptPage) {
478 heap.compaction()->finishedArenaCompaction(this, 0, 0);
479 return;
480 }
481
482 // Compaction is performed in-place, sliding objects down over unused
483 // holes for a smaller heap page footprint and improved locality.
484 // A "compaction pointer" is consequently kept, pointing to the next
485 // available address to move objects down to. It will belong to one
486 // of the already sweep-compacted pages for this arena, but as compaction
487 // proceeds, it will not belong to the same page as the one being
488 // currently compacted.
489 //
490 // The compaction pointer is represented by the
491 // |(currentPage, allocationPoint)| pair, with |allocationPoint|
492 // being the offset into |currentPage|, making up the next
493 // available location. When the compaction of an arena page causes the
494 // compaction pointer to exhaust the current page it is compacting into,
495 // page compaction will advance the current page of the compaction
496 // pointer, as well as the allocation point.
497 //
498 // By construction, the page compaction can be performed without having
499 // to allocate any new pages. So to arrange for the page compaction's
500 // supply of freed, available pages, we chain them together after each
501 // has been "compacted from". The page compaction will then reuse those
502 // as needed, and once finished, the chained, available pages can be
503 // released back to the OS.
504 //
505 // To ease the passing of the compaction state when iterating over an
506 // arena's pages, package it up into a |CompactionContext|.
507 NormalPage::CompactionContext context;
508 context.m_compactedPages = &m_firstPage;
509
510 while (m_firstUnsweptPage) {
511 BasePage* page = m_firstUnsweptPage;
512 if (page->isEmpty()) {
513 page->unlink(&m_firstUnsweptPage);
514 page->removeFromHeap();
515 continue;
516 }
517 // Large objects do not belong to this arena.
518 DCHECK(!page->isLargeObjectPage());
519 NormalPage* normalPage = static_cast<NormalPage*>(page);
520 normalPage->unlink(&m_firstUnsweptPage);
521 normalPage->markAsSwept();
522 // If not the first page, add |normalPage| onto the available pages chain.
523 if (!context.m_currentPage)
524 context.m_currentPage = normalPage;
525 else
526 normalPage->link(&context.m_availablePages);
527 normalPage->sweepAndCompact(context);
528 }
529
530 size_t freedSize = 0;
531 size_t freedPageCount = 0;
532
533 DCHECK(context.m_currentPage);
534 // If the current page hasn't been allocated into, add it to the available
535 // list, for subsequent release below.
536 size_t allocationPoint = context.m_allocationPoint;
537 if (!allocationPoint) {
538 context.m_currentPage->link(&context.m_availablePages);
539 } else {
540 NormalPage* currentPage = context.m_currentPage;
541 currentPage->link(&m_firstPage);
542 if (allocationPoint != currentPage->payloadSize()) {
543 // Put the remainder of the page onto the free list.
544 freedSize = currentPage->payloadSize() - allocationPoint;
545 Address payload = currentPage->payload();
546 SET_MEMORY_INACCESSIBLE(payload + allocationPoint, freedSize);
547 currentPage->arenaForNormalPage()->addToFreeList(
548 payload + allocationPoint, freedSize);
549 }
550 }
551
552 // Return available pages to the free page pool, decommitting them from
553 // the pagefile.
554 BasePage* availablePages = context.m_availablePages;
555 while (availablePages) {
556 size_t pageSize = availablePages->size();
557 #if DEBUG_HEAP_COMPACTION
558 if (!freedPageCount)
559 LOG_HEAP_COMPACTION("Releasing:");
560 LOG_HEAP_COMPACTION(" [%p, %p]", availablePages, availablePages + pageSize);
561 #endif
562 freedSize += pageSize;
563 freedPageCount++;
564 BasePage* nextPage;
565 availablePages->unlink(&nextPage);
566 // Clear out the page before adding it to the free page pool, which
567 // decommits it. Recommitting the page must find a zeroed page later.
568 // We cannot assume that the OS will hand back a zeroed page across
569 // its "decommit" operation.
570 DCHECK(!availablePages->isLargeObjectPage());
571 NormalPage* unusedPage = reinterpret_cast<NormalPage*>(availablePages);
572 memset(unusedPage->payload(), 0, unusedPage->payloadSize());
haraken 2016/12/13 02:29:41 This should happen only on Release builds, right?
sof 2016/12/13 06:18:30 done, they're already in that state, so leave out
573 availablePages->removeFromHeap();
574 availablePages = static_cast<NormalPage*>(nextPage);
575 }
576 if (freedPageCount)
577 LOG_HEAP_COMPACTION("\n");
578 heap.compaction()->finishedArenaCompaction(this, freedPageCount, freedSize);
579 }
580
443 #if ENABLE(ASSERT) 581 #if ENABLE(ASSERT)
444 bool NormalPageArena::isConsistentForGC() { 582 bool NormalPageArena::isConsistentForGC() {
445 // A thread heap is consistent for sweeping if none of the pages to be swept 583 // A thread heap is consistent for sweeping if none of the pages to be swept
446 // contain a freelist block or the current allocation point. 584 // contain a freelist block or the current allocation point.
447 for (size_t i = 0; i < blinkPageSizeLog2; ++i) { 585 for (size_t i = 0; i < blinkPageSizeLog2; ++i) {
448 for (FreeListEntry* freeListEntry = m_freeList.m_freeLists[i]; 586 for (FreeListEntry* freeListEntry = m_freeList.m_freeLists[i];
449 freeListEntry; freeListEntry = freeListEntry->next()) { 587 freeListEntry; freeListEntry = freeListEntry->next()) {
450 if (pagesToBeSweptContains(freeListEntry->getAddress())) 588 if (pagesToBeSweptContains(freeListEntry->getAddress()))
451 return false; 589 return false;
452 } 590 }
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
507 // the limit of the number of mmapped regions OS can support 645 // the limit of the number of mmapped regions OS can support
508 // (e.g., /proc/sys/vm/max_map_count in Linux). 646 // (e.g., /proc/sys/vm/max_map_count in Linux).
509 RELEASE_ASSERT(result); 647 RELEASE_ASSERT(result);
510 pageMemory = memory; 648 pageMemory = memory;
511 } else { 649 } else {
512 getThreadState()->heap().getFreePagePool()->addFreePage(arenaIndex(), 650 getThreadState()->heap().getFreePagePool()->addFreePage(arenaIndex(),
513 memory); 651 memory);
514 } 652 }
515 } 653 }
516 } 654 }
517
518 NormalPage* page = 655 NormalPage* page =
519 new (pageMemory->writableStart()) NormalPage(pageMemory, this); 656 new (pageMemory->writableStart()) NormalPage(pageMemory, this);
520 page->link(&m_firstPage); 657 page->link(&m_firstPage);
521 658
522 getThreadState()->heap().heapStats().increaseAllocatedSpace(page->size()); 659 getThreadState()->heap().heapStats().increaseAllocatedSpace(page->size());
523 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) 660 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
524 // Allow the following addToFreeList() to add the newly allocated memory 661 // Allow the following addToFreeList() to add the newly allocated memory
525 // to the free list. 662 // to the free list.
526 ASAN_UNPOISON_MEMORY_REGION(page->payload(), page->payloadSize()); 663 ASAN_UNPOISON_MEMORY_REGION(page->payload(), page->payloadSize());
527 Address address = page->payload(); 664 Address address = page->payload();
(...skipping 542 matching lines...) Expand 10 before | Expand all | Expand 10 after
1070 1207
1071 void NEVER_INLINE FreeList::checkFreedMemoryIsZapped(Address address, 1208 void NEVER_INLINE FreeList::checkFreedMemoryIsZapped(Address address,
1072 size_t size) { 1209 size_t size) {
1073 for (size_t i = 0; i < size; i++) { 1210 for (size_t i = 0; i < size; i++) {
1074 ASSERT(address[i] == reuseAllowedZapValue || 1211 ASSERT(address[i] == reuseAllowedZapValue ||
1075 address[i] == reuseForbiddenZapValue); 1212 address[i] == reuseForbiddenZapValue);
1076 } 1213 }
1077 } 1214 }
1078 #endif 1215 #endif
1079 1216
1217 size_t FreeList::freeListSize() const {
1218 size_t freeSize = 0;
1219 for (unsigned i = 0; i < blinkPageSizeLog2; ++i) {
1220 FreeListEntry* entry = m_freeLists[i];
1221 while (entry) {
1222 freeSize += entry->size();
1223 entry = entry->next();
1224 }
1225 }
1226 #if DEBUG_HEAP_FREELIST
1227 if (freeSize) {
1228 LOG_HEAP_FREELIST_VERBOSE("FreeList(%p): %zu\n", this, freeSize);
1229 for (unsigned i = 0; i < blinkPageSizeLog2; ++i) {
1230 FreeListEntry* entry = m_freeLists[i];
1231 size_t bucket = 0;
1232 size_t count = 0;
1233 while (entry) {
1234 bucket += entry->size();
1235 count++;
1236 entry = entry->next();
1237 }
1238 if (bucket) {
1239 LOG_HEAP_FREELIST_VERBOSE("[%d, %d]: %zu (%zu)\n", 0x1 << i,
1240 0x1 << (i + 1), bucket, count);
1241 }
1242 }
1243 }
1244 #endif
1245 return freeSize;
1246 }
1247
1080 void FreeList::clear() { 1248 void FreeList::clear() {
1081 m_biggestFreeListIndex = 0; 1249 m_biggestFreeListIndex = 0;
1082 for (size_t i = 0; i < blinkPageSizeLog2; ++i) 1250 for (size_t i = 0; i < blinkPageSizeLog2; ++i)
1083 m_freeLists[i] = nullptr; 1251 m_freeLists[i] = nullptr;
1084 } 1252 }
1085 1253
1086 int FreeList::bucketIndexForSize(size_t size) { 1254 int FreeList::bucketIndexForSize(size_t size) {
1087 ASSERT(size > 0); 1255 ASSERT(size > 0);
1088 int index = -1; 1256 int index = -1;
1089 while (size) { 1257 while (size) {
(...skipping 149 matching lines...) Expand 10 before | Expand all | Expand 10 after
1239 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) 1407 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
1240 if (MemoryCoordinator::isLowEndDevice()) 1408 if (MemoryCoordinator::isLowEndDevice())
1241 discardPages(startOfGap + sizeof(FreeListEntry), payloadEnd()); 1409 discardPages(startOfGap + sizeof(FreeListEntry), payloadEnd());
1242 #endif 1410 #endif
1243 } 1411 }
1244 1412
1245 if (markedObjectSize) 1413 if (markedObjectSize)
1246 pageArena->getThreadState()->increaseMarkedObjectSize(markedObjectSize); 1414 pageArena->getThreadState()->increaseMarkedObjectSize(markedObjectSize);
1247 } 1415 }
1248 1416
1417 void NormalPage::sweepAndCompact(CompactionContext& context) {
1418 NormalPage*& currentPage = context.m_currentPage;
1419 size_t& allocationPoint = context.m_allocationPoint;
1420
1421 size_t markedObjectSize = 0;
1422 NormalPageArena* pageArena = arenaForNormalPage();
1423 #if defined(ADDRESS_SANITIZER)
1424 bool isVectorArena = ThreadState::isVectorArenaIndex(pageArena->arenaIndex());
1425 #endif
1426 HeapCompact* compact = pageArena->getThreadState()->heap().compaction();
1427 for (Address headerAddress = payload(); headerAddress < payloadEnd();) {
1428 HeapObjectHeader* header =
1429 reinterpret_cast<HeapObjectHeader*>(headerAddress);
1430 size_t size = header->size();
1431 DCHECK(size > 0 && size < blinkPagePayloadSize());
1432
1433 if (header->isPromptlyFreed())
1434 pageArena->decreasePromptlyFreedSize(size);
1435 if (header->isFree()) {
1436 // Unpoison the freelist entry so that we
1437 // can compact into it as wanted.
1438 ASAN_UNPOISON_MEMORY_REGION(headerAddress, size);
1439 headerAddress += size;
1440 continue;
1441 }
1442 // This is a fast version of header->payloadSize().
1443 size_t payloadSize = size - sizeof(HeapObjectHeader);
1444 Address payload = header->payload();
1445 if (!header->isMarked()) {
1446 // For ASan, unpoison the object before calling the finalizer. The
1447 // finalized object will be zero-filled and poison'ed afterwards.
1448 // Given all other unmarked objects are poisoned, ASan will detect
1449 // an error if the finalizer touches any other on-heap object that
1450 // die at the same GC cycle.
1451 ASAN_UNPOISON_MEMORY_REGION(headerAddress, size);
1452 header->finalize(payload, payloadSize);
1453
1454 // As compaction is under way, leave the freed memory accessible
1455 // while compacting the rest of the page. We just zap the payload
1456 // to catch out other finalizers trying to access it.
1457 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \
1458 defined(MEMORY_SANITIZER)
1459 FreeList::zapFreedMemory(payload, payloadSize);
1460 #endif
1461 headerAddress += size;
1462 continue;
1463 }
1464 header->unmark();
1465 // Allocate and copy over the live object.
1466 Address compactFrontier = currentPage->payload() + allocationPoint;
1467 if (compactFrontier + size > currentPage->payloadEnd()) {
1468 // Can't fit on current allocation page; add remaining onto the
1469 // freelist and advance to next available page.
1470 //
1471 // TODO(sof): be more clever & compact later objects into
1472 // |currentPage|'s unused slop.
1473 currentPage->link(context.m_compactedPages);
1474 size_t freeSize = currentPage->payloadSize() - allocationPoint;
1475 if (freeSize) {
1476 SET_MEMORY_INACCESSIBLE(compactFrontier, freeSize);
1477 currentPage->arenaForNormalPage()->addToFreeList(compactFrontier,
1478 freeSize);
1479 }
1480
1481 BasePage* nextAvailablePage;
1482 context.m_availablePages->unlink(&nextAvailablePage);
1483 currentPage = reinterpret_cast<NormalPage*>(context.m_availablePages);
1484 context.m_availablePages = nextAvailablePage;
1485 allocationPoint = 0;
1486 compactFrontier = currentPage->payload();
1487 }
1488 if (LIKELY(compactFrontier != headerAddress)) {
1489 #if defined(ADDRESS_SANITIZER)
1490 // Unpoison the header + if it is a vector backing
1491 // store object, let go of the container annotations.
1492 // Do that by unpoisoning the payload entirely.
1493 ASAN_UNPOISON_MEMORY_REGION(header, sizeof(HeapObjectHeader));
1494 if (isVectorArena)
1495 ASAN_UNPOISON_MEMORY_REGION(payload, payloadSize);
1496 #endif
1497 // Use a non-overlapping copy, if possible.
1498 if (currentPage == this)
1499 memmove(compactFrontier, headerAddress, size);
1500 else
1501 memcpy(compactFrontier, headerAddress, size);
1502 compact->relocate(payload, compactFrontier + sizeof(HeapObjectHeader));
1503 }
1504 headerAddress += size;
1505 markedObjectSize += size;
1506 allocationPoint += size;
1507 DCHECK(allocationPoint <= currentPage->payloadSize());
1508 }
1509 if (markedObjectSize)
1510 pageArena->getThreadState()->increaseMarkedObjectSize(markedObjectSize);
1511
1512 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \
1513 defined(MEMORY_SANITIZER)
1514 // Zap the unused portion, until it is either compacted into or freed.
1515 if (currentPage != this) {
1516 FreeList::zapFreedMemory(payload(), payloadSize());
1517 } else {
1518 FreeList::zapFreedMemory(payload() + allocationPoint,
1519 payloadSize() - allocationPoint);
1520 }
1521 #endif
1522 }
1523
1249 void NormalPage::makeConsistentForGC() { 1524 void NormalPage::makeConsistentForGC() {
1250 size_t markedObjectSize = 0; 1525 size_t markedObjectSize = 0;
1251 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { 1526 for (Address headerAddress = payload(); headerAddress < payloadEnd();) {
1252 HeapObjectHeader* header = 1527 HeapObjectHeader* header =
1253 reinterpret_cast<HeapObjectHeader*>(headerAddress); 1528 reinterpret_cast<HeapObjectHeader*>(headerAddress);
1254 ASSERT(header->size() < blinkPagePayloadSize()); 1529 ASSERT(header->size() < blinkPagePayloadSize());
1255 // Check if a free list entry first since we cannot call 1530 // Check if a free list entry first since we cannot call
1256 // isMarked on a free list entry. 1531 // isMarked on a free list entry.
1257 if (header->isFree()) { 1532 if (header->isFree()) {
1258 headerAddress += header->size(); 1533 headerAddress += header->size();
(...skipping 372 matching lines...) Expand 10 before | Expand all | Expand 10 after
1631 1906
1632 m_hasEntries = true; 1907 m_hasEntries = true;
1633 size_t index = hash(address); 1908 size_t index = hash(address);
1634 ASSERT(!(index & 1)); 1909 ASSERT(!(index & 1));
1635 Address cachePage = roundToBlinkPageStart(address); 1910 Address cachePage = roundToBlinkPageStart(address);
1636 m_entries[index + 1] = m_entries[index]; 1911 m_entries[index + 1] = m_entries[index];
1637 m_entries[index] = cachePage; 1912 m_entries[index] = cachePage;
1638 } 1913 }
1639 1914
1640 } // namespace blink 1915 } // namespace blink
OLDNEW
« no previous file with comments | « third_party/WebKit/Source/platform/heap/HeapPage.h ('k') | third_party/WebKit/Source/platform/heap/HeapTest.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698