Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(8)

Side by Side Diff: src/spaces.cc

Issue 11085070: Enable --verify-heap in release mode (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: After rebase plus one new issue fix Created 8 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 479 matching lines...) Expand 10 before | Expand all | Expand 10 after
490 490
491 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, 491 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
492 Executability executable, 492 Executability executable,
493 Space* owner) { 493 Space* owner) {
494 size_t chunk_size; 494 size_t chunk_size;
495 Heap* heap = isolate_->heap(); 495 Heap* heap = isolate_->heap();
496 Address base = NULL; 496 Address base = NULL;
497 VirtualMemory reservation; 497 VirtualMemory reservation;
498 Address area_start = NULL; 498 Address area_start = NULL;
499 Address area_end = NULL; 499 Address area_end = NULL;
500
501 // Zapping should only occur in debug mode or in release with verify_heap on
502 #ifdef DEBUG
503 bool zap_blocks = true;
504 #else
505 bool zap_blocks = FLAG_verify_heap;
506 #endif
507
500 if (executable == EXECUTABLE) { 508 if (executable == EXECUTABLE) {
501 chunk_size = RoundUp(CodePageAreaStartOffset() + body_size, 509 chunk_size = RoundUp(CodePageAreaStartOffset() + body_size,
502 OS::CommitPageSize()) + CodePageGuardSize(); 510 OS::CommitPageSize()) + CodePageGuardSize();
503 511
504 // Check executable memory limit. 512 // Check executable memory limit.
505 if (size_executable_ + chunk_size > capacity_executable_) { 513 if (size_executable_ + chunk_size > capacity_executable_) {
506 LOG(isolate_, 514 LOG(isolate_,
507 StringEvent("MemoryAllocator::AllocateRawMemory", 515 StringEvent("MemoryAllocator::AllocateRawMemory",
508 "V8 Executable Allocation capacity exceeded")); 516 "V8 Executable Allocation capacity exceeded"));
509 return NULL; 517 return NULL;
(...skipping 12 matching lines...) Expand all
522 } else { 530 } else {
523 base = AllocateAlignedMemory(chunk_size, 531 base = AllocateAlignedMemory(chunk_size,
524 MemoryChunk::kAlignment, 532 MemoryChunk::kAlignment,
525 executable, 533 executable,
526 &reservation); 534 &reservation);
527 if (base == NULL) return NULL; 535 if (base == NULL) return NULL;
528 // Update executable memory size. 536 // Update executable memory size.
529 size_executable_ += reservation.size(); 537 size_executable_ += reservation.size();
530 } 538 }
531 539
532 #ifdef DEBUG 540 if (zap_blocks) {
533 ZapBlock(base, CodePageGuardStartOffset()); 541 ZapBlock(base, CodePageGuardStartOffset());
534 ZapBlock(base + CodePageAreaStartOffset(), body_size); 542 ZapBlock(base + CodePageAreaStartOffset(), body_size);
535 #endif 543 }
544
536 area_start = base + CodePageAreaStartOffset(); 545 area_start = base + CodePageAreaStartOffset();
537 area_end = area_start + body_size; 546 area_end = area_start + body_size;
538 } else { 547 } else {
539 chunk_size = MemoryChunk::kObjectStartOffset + body_size; 548 chunk_size = MemoryChunk::kObjectStartOffset + body_size;
540 base = AllocateAlignedMemory(chunk_size, 549 base = AllocateAlignedMemory(chunk_size,
541 MemoryChunk::kAlignment, 550 MemoryChunk::kAlignment,
542 executable, 551 executable,
543 &reservation); 552 &reservation);
544 553
545 if (base == NULL) return NULL; 554 if (base == NULL) return NULL;
546 555
547 #ifdef DEBUG 556 if (zap_blocks) {
548 ZapBlock(base, chunk_size); 557 ZapBlock(base, chunk_size);
549 #endif 558 }
550 559
551 area_start = base + Page::kObjectStartOffset; 560 area_start = base + Page::kObjectStartOffset;
552 area_end = base + chunk_size; 561 area_end = base + chunk_size;
553 } 562 }
554 563
555 isolate_->counters()->memory_allocated()-> 564 isolate_->counters()->memory_allocated()->
556 Increment(static_cast<int>(chunk_size)); 565 Increment(static_cast<int>(chunk_size));
557 566
558 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); 567 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
559 if (owner != NULL) { 568 if (owner != NULL) {
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
615 chunk->size(), 624 chunk->size(),
616 chunk->executable()); 625 chunk->executable());
617 } 626 }
618 } 627 }
619 628
620 629
621 bool MemoryAllocator::CommitBlock(Address start, 630 bool MemoryAllocator::CommitBlock(Address start,
622 size_t size, 631 size_t size,
623 Executability executable) { 632 Executability executable) {
624 if (!VirtualMemory::CommitRegion(start, size, executable)) return false; 633 if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
625 #ifdef DEBUG 634
626 ZapBlock(start, size); 635 // In release mode, only zap the block if verify heap is on.
636 #ifndef DEBUG
637 if (FLAG_verify_heap) {
627 #endif 638 #endif
639 ZapBlock(start, size);
640 #ifndef DEBUG
641 }
642 #endif
643
628 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size)); 644 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
629 return true; 645 return true;
630 } 646 }
631 647
632 648
633 bool MemoryAllocator::UncommitBlock(Address start, size_t size) { 649 bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
634 if (!VirtualMemory::UncommitRegion(start, size)) return false; 650 if (!VirtualMemory::UncommitRegion(start, size)) return false;
635 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); 651 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
636 return true; 652 return true;
637 } 653 }
(...skipping 341 matching lines...) Expand 10 before | Expand all | Expand 10 after
979 } 995 }
980 heap()->FreeQueuedChunks(); 996 heap()->FreeQueuedChunks();
981 } 997 }
982 998
983 999
984 #ifdef DEBUG 1000 #ifdef DEBUG
985 void PagedSpace::Print() { } 1001 void PagedSpace::Print() { }
986 #endif 1002 #endif
987 1003
988 1004
989 #ifdef DEBUG
990 void PagedSpace::Verify(ObjectVisitor* visitor) { 1005 void PagedSpace::Verify(ObjectVisitor* visitor) {
991 // We can only iterate over the pages if they were swept precisely. 1006 // We can only iterate over the pages if they were swept precisely.
992 if (was_swept_conservatively_) return; 1007 if (was_swept_conservatively_) return;
993 1008
994 bool allocation_pointer_found_in_space = 1009 bool allocation_pointer_found_in_space =
995 (allocation_info_.top == allocation_info_.limit); 1010 (allocation_info_.top == allocation_info_.limit);
996 PageIterator page_iterator(this); 1011 PageIterator page_iterator(this);
997 while (page_iterator.has_next()) { 1012 while (page_iterator.has_next()) {
998 Page* page = page_iterator.next(); 1013 Page* page = page_iterator.next();
999 ASSERT(page->owner() == this); 1014 CHECK(page->owner() == this);
1000 if (page == Page::FromAllocationTop(allocation_info_.top)) { 1015 if (page == Page::FromAllocationTop(allocation_info_.top)) {
1001 allocation_pointer_found_in_space = true; 1016 allocation_pointer_found_in_space = true;
1002 } 1017 }
1003 ASSERT(page->WasSweptPrecisely()); 1018 CHECK(page->WasSweptPrecisely());
1004 HeapObjectIterator it(page, NULL); 1019 HeapObjectIterator it(page, NULL);
1005 Address end_of_previous_object = page->area_start(); 1020 Address end_of_previous_object = page->area_start();
1006 Address top = page->area_end(); 1021 Address top = page->area_end();
1007 int black_size = 0; 1022 int black_size = 0;
1008 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { 1023 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
1009 ASSERT(end_of_previous_object <= object->address()); 1024 CHECK(end_of_previous_object <= object->address());
1010 1025
1011 // The first word should be a map, and we expect all map pointers to 1026 // The first word should be a map, and we expect all map pointers to
1012 // be in map space. 1027 // be in map space.
1013 Map* map = object->map(); 1028 Map* map = object->map();
1014 ASSERT(map->IsMap()); 1029 CHECK(map->IsMap());
1015 ASSERT(heap()->map_space()->Contains(map)); 1030 CHECK(heap()->map_space()->Contains(map));
1016 1031
1017 // Perform space-specific object verification. 1032 // Perform space-specific object verification.
1018 VerifyObject(object); 1033 VerifyObject(object);
1019 1034
1035 #ifdef DEBUG
1020 // The object itself should look OK. 1036 // The object itself should look OK.
1021 object->Verify(); 1037 object->Verify();
1038 #endif
1022 1039
1023 // All the interior pointers should be contained in the heap. 1040 // All the interior pointers should be contained in the heap.
1024 int size = object->Size(); 1041 int size = object->Size();
1025 object->IterateBody(map->instance_type(), size, visitor); 1042 object->IterateBody(map->instance_type(), size, visitor);
1026 if (Marking::IsBlack(Marking::MarkBitFrom(object))) { 1043 if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
1027 black_size += size; 1044 black_size += size;
1028 } 1045 }
1029 1046
1030 ASSERT(object->address() + size <= top); 1047 CHECK(object->address() + size <= top);
1031 end_of_previous_object = object->address() + size; 1048 end_of_previous_object = object->address() + size;
1032 } 1049 }
1033 ASSERT_LE(black_size, page->LiveBytes()); 1050 CHECK_LE(black_size, page->LiveBytes());
1034 } 1051 }
1035 ASSERT(allocation_pointer_found_in_space); 1052 CHECK(allocation_pointer_found_in_space);
1036 } 1053 }
1037 #endif
1038
1039 1054
1040 // ----------------------------------------------------------------------------- 1055 // -----------------------------------------------------------------------------
1041 // NewSpace implementation 1056 // NewSpace implementation
1042 1057
1043 1058
1044 bool NewSpace::SetUp(int reserved_semispace_capacity, 1059 bool NewSpace::SetUp(int reserved_semispace_capacity,
1045 int maximum_semispace_capacity) { 1060 int maximum_semispace_capacity) {
1046 // Set up new space based on the preallocated memory block defined by 1061 // Set up new space based on the preallocated memory block defined by
1047 // start and size. The provided space is divided into two semi-spaces. 1062 // start and size. The provided space is divided into two semi-spaces.
1048 // To support fast containment testing in the new space, the size of 1063 // To support fast containment testing in the new space, the size of
(...skipping 203 matching lines...) Expand 10 before | Expand all | Expand 10 after
1252 heap()->incremental_marking()->Step( 1267 heap()->incremental_marking()->Step(
1253 bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD); 1268 bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
1254 top_on_previous_step_ = to_space_.page_low(); 1269 top_on_previous_step_ = to_space_.page_low();
1255 return AllocateRaw(size_in_bytes); 1270 return AllocateRaw(size_in_bytes);
1256 } else { 1271 } else {
1257 return Failure::RetryAfterGC(); 1272 return Failure::RetryAfterGC();
1258 } 1273 }
1259 } 1274 }
1260 1275
1261 1276
1262 #ifdef DEBUG 1277
1263 // We do not use the SemiSpaceIterator because verification doesn't assume 1278 // We do not use the SemiSpaceIterator because verification doesn't assume
1264 // that it works (it depends on the invariants we are checking). 1279 // that it works (it depends on the invariants we are checking).
1265 void NewSpace::Verify() { 1280 void NewSpace::Verify() {
1266 // The allocation pointer should be in the space or at the very end. 1281 // The allocation pointer should be in the space or at the very end.
1267 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); 1282 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1268 1283
1269 // There should be objects packed in from the low address up to the 1284 // There should be objects packed in from the low address up to the
1270 // allocation pointer. 1285 // allocation pointer.
1271 Address current = to_space_.first_page()->area_start(); 1286 Address current = to_space_.first_page()->area_start();
1272 CHECK_EQ(current, to_space_.space_start()); 1287 CHECK_EQ(current, to_space_.space_start());
1273 1288
1274 while (current != top()) { 1289 while (current != top()) {
1275 if (!NewSpacePage::IsAtEnd(current)) { 1290 if (!NewSpacePage::IsAtEnd(current)) {
1276 // The allocation pointer should not be in the middle of an object. 1291 // The allocation pointer should not be in the middle of an object.
1277 CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) || 1292 CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1278 current < top()); 1293 current < top());
1279 1294
1280 HeapObject* object = HeapObject::FromAddress(current); 1295 HeapObject* object = HeapObject::FromAddress(current);
1281 1296
1282 // The first word should be a map, and we expect all map pointers to 1297 // The first word should be a map, and we expect all map pointers to
1283 // be in map space. 1298 // be in map space.
1284 Map* map = object->map(); 1299 Map* map = object->map();
1285 CHECK(map->IsMap()); 1300 CHECK(map->IsMap());
1286 CHECK(heap()->map_space()->Contains(map)); 1301 CHECK(heap()->map_space()->Contains(map));
1287 1302
1288 // The object should not be code or a map. 1303 // The object should not be code or a map.
1289 CHECK(!object->IsMap()); 1304 CHECK(!object->IsMap());
1290 CHECK(!object->IsCode()); 1305 CHECK(!object->IsCode());
1291 1306
1307 #ifdef DEBUG
1292 // The object itself should look OK. 1308 // The object itself should look OK.
1293 object->Verify(); 1309 object->Verify();
1310 #endif
1294 1311
1295 // All the interior pointers should be contained in the heap. 1312 // All the interior pointers should be contained in the heap.
1296 VerifyPointersVisitor visitor; 1313 VerifyPointersVisitor visitor;
1297 int size = object->Size(); 1314 int size = object->Size();
1298 object->IterateBody(map->instance_type(), size, &visitor); 1315 object->IterateBody(map->instance_type(), size, &visitor);
1299 1316
1300 current += size; 1317 current += size;
1301 } else { 1318 } else {
1302 // At end of page, switch to next page. 1319 // At end of page, switch to next page.
1303 NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page(); 1320 NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1304 // Next page should be valid. 1321 // Next page should be valid.
1305 CHECK(!page->is_anchor()); 1322 CHECK(!page->is_anchor());
1306 current = page->area_start(); 1323 current = page->area_start();
1307 } 1324 }
1308 } 1325 }
1309 1326
1310 // Check semi-spaces. 1327 // Check semi-spaces.
1311 ASSERT_EQ(from_space_.id(), kFromSpace); 1328 CHECK_EQ(from_space_.id(), kFromSpace);
1312 ASSERT_EQ(to_space_.id(), kToSpace); 1329 CHECK_EQ(to_space_.id(), kToSpace);
1313 from_space_.Verify(); 1330 from_space_.Verify();
1314 to_space_.Verify(); 1331 to_space_.Verify();
1315 } 1332 }
1316 #endif 1333
1317 1334
1318 // ----------------------------------------------------------------------------- 1335 // -----------------------------------------------------------------------------
1319 // SemiSpace implementation 1336 // SemiSpace implementation
1320 1337
1321 void SemiSpace::SetUp(Address start, 1338 void SemiSpace::SetUp(Address start,
1322 int initial_capacity, 1339 int initial_capacity,
1323 int maximum_capacity) { 1340 int maximum_capacity) {
1324 // Creates a space in the young generation. The constructor does not 1341 // Creates a space in the young generation. The constructor does not
1325 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of 1342 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1326 // memory of size 'capacity' when set up, and does not grow or shrink 1343 // memory of size 'capacity' when set up, and does not grow or shrink
(...skipping 191 matching lines...) Expand 10 before | Expand all | Expand 10 after
1518 // Mark all pages up to the one containing mark. 1535 // Mark all pages up to the one containing mark.
1519 NewSpacePageIterator it(space_start(), mark); 1536 NewSpacePageIterator it(space_start(), mark);
1520 while (it.has_next()) { 1537 while (it.has_next()) {
1521 it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK); 1538 it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1522 } 1539 }
1523 } 1540 }
1524 1541
1525 1542
1526 #ifdef DEBUG 1543 #ifdef DEBUG
1527 void SemiSpace::Print() { } 1544 void SemiSpace::Print() { }
1528 1545 #endif
1529 1546
1530 void SemiSpace::Verify() { 1547 void SemiSpace::Verify() {
1531 bool is_from_space = (id_ == kFromSpace); 1548 bool is_from_space = (id_ == kFromSpace);
1532 NewSpacePage* page = anchor_.next_page(); 1549 NewSpacePage* page = anchor_.next_page();
1533 CHECK(anchor_.semi_space() == this); 1550 CHECK(anchor_.semi_space() == this);
1534 while (page != &anchor_) { 1551 while (page != &anchor_) {
1535 CHECK(page->semi_space() == this); 1552 CHECK(page->semi_space() == this);
1536 CHECK(page->InNewSpace()); 1553 CHECK(page->InNewSpace());
1537 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE 1554 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1538 : MemoryChunk::IN_TO_SPACE)); 1555 : MemoryChunk::IN_TO_SPACE));
(...skipping 11 matching lines...) Expand all
1550 } 1567 }
1551 // TODO(gc): Check that the live_bytes_count_ field matches the 1568 // TODO(gc): Check that the live_bytes_count_ field matches the
1552 // black marking on the page (if we make it match in new-space). 1569 // black marking on the page (if we make it match in new-space).
1553 } 1570 }
1554 CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)); 1571 CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1555 CHECK(page->prev_page()->next_page() == page); 1572 CHECK(page->prev_page()->next_page() == page);
1556 page = page->next_page(); 1573 page = page->next_page();
1557 } 1574 }
1558 } 1575 }
1559 1576
1560 1577 #ifdef DEBUG
1561 void SemiSpace::AssertValidRange(Address start, Address end) { 1578 void SemiSpace::AssertValidRange(Address start, Address end) {
1562 // Addresses belong to same semi-space 1579 // Addresses belong to same semi-space
1563 NewSpacePage* page = NewSpacePage::FromLimit(start); 1580 NewSpacePage* page = NewSpacePage::FromLimit(start);
1564 NewSpacePage* end_page = NewSpacePage::FromLimit(end); 1581 NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1565 SemiSpace* space = page->semi_space(); 1582 SemiSpace* space = page->semi_space();
1566 CHECK_EQ(space, end_page->semi_space()); 1583 CHECK_EQ(space, end_page->semi_space());
1567 // Start address is before end address, either on same page, 1584 // Start address is before end address, either on same page,
1568 // or end address is on a later page in the linked list of 1585 // or end address is on a later page in the linked list of
1569 // semi-space pages. 1586 // semi-space pages.
1570 if (page == end_page) { 1587 if (page == end_page) {
(...skipping 974 matching lines...) Expand 10 before | Expand all | Expand 10 after
2545 accounting_stats_.AllocateBytes(free_list_.available()); 2562 accounting_stats_.AllocateBytes(free_list_.available());
2546 2563
2547 // Clear the free list before a full GC---it will be rebuilt afterward. 2564 // Clear the free list before a full GC---it will be rebuilt afterward.
2548 free_list_.Reset(); 2565 free_list_.Reset();
2549 } 2566 }
2550 2567
2551 2568
2552 // ----------------------------------------------------------------------------- 2569 // -----------------------------------------------------------------------------
2553 // MapSpace implementation 2570 // MapSpace implementation
2554 2571
2555 #ifdef DEBUG
2556 void MapSpace::VerifyObject(HeapObject* object) { 2572 void MapSpace::VerifyObject(HeapObject* object) {
2557 // The object should be a map or a free-list node. 2573 // The object should be a map or a free-list node.
2558 ASSERT(object->IsMap() || object->IsFreeSpace()); 2574 CHECK(object->IsMap() || object->IsFreeSpace());
2559 } 2575 }
2560 #endif
2561 2576
2562 2577
2563 // ----------------------------------------------------------------------------- 2578 // -----------------------------------------------------------------------------
2564 // GlobalPropertyCellSpace implementation 2579 // GlobalPropertyCellSpace implementation
2565 2580
2566 #ifdef DEBUG
2567 void CellSpace::VerifyObject(HeapObject* object) { 2581 void CellSpace::VerifyObject(HeapObject* object) {
2568 // The object should be a global object property cell or a free-list node. 2582 // The object should be a global object property cell or a free-list node.
2569 ASSERT(object->IsJSGlobalPropertyCell() || 2583 CHECK(object->IsJSGlobalPropertyCell() ||
2570 object->map() == heap()->two_pointer_filler_map()); 2584 object->map() == heap()->two_pointer_filler_map());
2571 } 2585 }
2572 #endif
2573 2586
2574 2587
2575 // ----------------------------------------------------------------------------- 2588 // -----------------------------------------------------------------------------
2576 // LargeObjectIterator 2589 // LargeObjectIterator
2577 2590
2578 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) { 2591 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2579 current_ = space->first_page_; 2592 current_ = space->first_page_;
2580 size_func_ = NULL; 2593 size_func_ = NULL;
2581 } 2594 }
2582 2595
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after
2672 for (uintptr_t key = base; key <= limit; key++) { 2685 for (uintptr_t key = base; key <= limit; key++) {
2673 HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key), 2686 HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2674 static_cast<uint32_t>(key), 2687 static_cast<uint32_t>(key),
2675 true); 2688 true);
2676 ASSERT(entry != NULL); 2689 ASSERT(entry != NULL);
2677 entry->value = page; 2690 entry->value = page;
2678 } 2691 }
2679 2692
2680 HeapObject* object = page->GetObject(); 2693 HeapObject* object = page->GetObject();
2681 2694
2682 #ifdef DEBUG 2695 #ifndef DEBUG
2683 // Make the object consistent so the heap can be vefified in OldSpaceStep. 2696 if (FLAG_verify_heap) {
2684 reinterpret_cast<Object**>(object->address())[0] = 2697 #endif
2685 heap()->fixed_array_map(); 2698 // Make the object consistent so the heap can be vefified in OldSpaceStep.
2686 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0); 2699 // We only need to do this in debug builds or if verify_heap is on.
2700 reinterpret_cast<Object**>(object->address())[0] =
2701 heap()->fixed_array_map();
2702 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
2703 #ifndef DEBUG
2704 }
2687 #endif 2705 #endif
2688 2706
2689 heap()->incremental_marking()->OldSpaceStep(object_size); 2707 heap()->incremental_marking()->OldSpaceStep(object_size);
2690 return object; 2708 return object;
2691 } 2709 }
2692 2710
2693 2711
2694 // GC support 2712 // GC support
2695 MaybeObject* LargeObjectSpace::FindObject(Address a) { 2713 MaybeObject* LargeObjectSpace::FindObject(Address a) {
2696 LargePage* page = FindPage(a); 2714 LargePage* page = FindPage(a);
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
2776 MemoryChunk* chunk = MemoryChunk::FromAddress(address); 2794 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
2777 2795
2778 bool owned = (chunk->owner() == this); 2796 bool owned = (chunk->owner() == this);
2779 2797
2780 SLOW_ASSERT(!owned || !FindObject(address)->IsFailure()); 2798 SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());
2781 2799
2782 return owned; 2800 return owned;
2783 } 2801 }
2784 2802
2785 2803
2786 #ifdef DEBUG
2787 // We do not assume that the large object iterator works, because it depends 2804 // We do not assume that the large object iterator works, because it depends
2788 // on the invariants we are checking during verification. 2805 // on the invariants we are checking during verification.
2789 void LargeObjectSpace::Verify() { 2806 void LargeObjectSpace::Verify() {
2790 for (LargePage* chunk = first_page_; 2807 for (LargePage* chunk = first_page_;
2791 chunk != NULL; 2808 chunk != NULL;
2792 chunk = chunk->next_page()) { 2809 chunk = chunk->next_page()) {
2793 // Each chunk contains an object that starts at the large object page's 2810 // Each chunk contains an object that starts at the large object page's
2794 // object area start. 2811 // object area start.
2795 HeapObject* object = chunk->GetObject(); 2812 HeapObject* object = chunk->GetObject();
2796 Page* page = Page::FromAddress(object->address()); 2813 Page* page = Page::FromAddress(object->address());
2797 ASSERT(object->address() == page->area_start()); 2814 CHECK(object->address() == page->area_start());
2798 2815
2799 // The first word should be a map, and we expect all map pointers to be 2816 // The first word should be a map, and we expect all map pointers to be
2800 // in map space. 2817 // in map space.
2801 Map* map = object->map(); 2818 Map* map = object->map();
2802 ASSERT(map->IsMap()); 2819 CHECK(map->IsMap());
2803 ASSERT(heap()->map_space()->Contains(map)); 2820 CHECK(heap()->map_space()->Contains(map));
2804 2821
2805 // We have only code, sequential strings, external strings 2822 // We have only code, sequential strings, external strings
2806 // (sequential strings that have been morphed into external 2823 // (sequential strings that have been morphed into external
2807 // strings), fixed arrays, and byte arrays in large object space. 2824 // strings), fixed arrays, and byte arrays in large object space.
2808 ASSERT(object->IsCode() || object->IsSeqString() || 2825 CHECK(object->IsCode() || object->IsSeqString() ||
2809 object->IsExternalString() || object->IsFixedArray() || 2826 object->IsExternalString() || object->IsFixedArray() ||
2810 object->IsFixedDoubleArray() || object->IsByteArray()); 2827 object->IsFixedDoubleArray() || object->IsByteArray());
2811 2828
2829 #ifdef DEBUG
2812 // The object itself should look OK. 2830 // The object itself should look OK.
2813 object->Verify(); 2831 object->Verify();
2832 #endif
2814 2833
2815 // Byte arrays and strings don't have interior pointers. 2834 // Byte arrays and strings don't have interior pointers.
2816 if (object->IsCode()) { 2835 if (object->IsCode()) {
2817 VerifyPointersVisitor code_visitor; 2836 VerifyPointersVisitor code_visitor;
2818 object->IterateBody(map->instance_type(), 2837 object->IterateBody(map->instance_type(),
2819 object->Size(), 2838 object->Size(),
2820 &code_visitor); 2839 &code_visitor);
2821 } else if (object->IsFixedArray()) { 2840 } else if (object->IsFixedArray()) {
2822 FixedArray* array = FixedArray::cast(object); 2841 FixedArray* array = FixedArray::cast(object);
2823 for (int j = 0; j < array->length(); j++) { 2842 for (int j = 0; j < array->length(); j++) {
2824 Object* element = array->get(j); 2843 Object* element = array->get(j);
2825 if (element->IsHeapObject()) { 2844 if (element->IsHeapObject()) {
2826 HeapObject* element_object = HeapObject::cast(element); 2845 HeapObject* element_object = HeapObject::cast(element);
2827 ASSERT(heap()->Contains(element_object)); 2846 CHECK(heap()->Contains(element_object));
2828 ASSERT(element_object->map()->IsMap()); 2847 CHECK(element_object->map()->IsMap());
2829 } 2848 }
2830 } 2849 }
2831 } 2850 }
2832 } 2851 }
2833 } 2852 }
2834 2853
2835 2854 #ifdef DEBUG
2836 void LargeObjectSpace::Print() { 2855 void LargeObjectSpace::Print() {
2837 LargeObjectIterator it(this); 2856 LargeObjectIterator it(this);
2838 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { 2857 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
2839 obj->Print(); 2858 obj->Print();
2840 } 2859 }
2841 } 2860 }
2842 2861
2843 2862
2844 void LargeObjectSpace::ReportStatistics() { 2863 void LargeObjectSpace::ReportStatistics() {
2845 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_); 2864 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
2888 object->ShortPrint(); 2907 object->ShortPrint();
2889 PrintF("\n"); 2908 PrintF("\n");
2890 } 2909 }
2891 printf(" --------------------------------------\n"); 2910 printf(" --------------------------------------\n");
2892 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 2911 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
2893 } 2912 }
2894 2913
2895 #endif // DEBUG 2914 #endif // DEBUG
2896 2915
2897 } } // namespace v8::internal 2916 } } // namespace v8::internal
OLDNEW
« src/spaces.h ('K') | « src/spaces.h ('k') | test/cctest/test-debug.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698