Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1272)

Side by Side Diff: src/heap/heap.cc

Issue 1406133003: [heap] fix crash during the scavenge of ArrayBuffer (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: store-buffer: move IteratePointersToFromSpace Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/heap.h" 5 #include "src/heap/heap.h"
6 6
7 #include "src/accessors.h" 7 #include "src/accessors.h"
8 #include "src/api.h" 8 #include "src/api.h"
9 #include "src/base/bits.h" 9 #include "src/base/bits.h"
10 #include "src/base/once.h" 10 #include "src/base/once.h"
(...skipping 1871 matching lines...) Expand 10 before | Expand all | Expand 10 after
1882 while (!promotion_queue()->is_empty()) { 1882 while (!promotion_queue()->is_empty()) {
1883 HeapObject* target; 1883 HeapObject* target;
1884 int size; 1884 int size;
1885 promotion_queue()->remove(&target, &size); 1885 promotion_queue()->remove(&target, &size);
1886 1886
1887 // Promoted object might be already partially visited 1887 // Promoted object might be already partially visited
1888 // during old space pointer iteration. Thus we search specifically 1888 // during old space pointer iteration. Thus we search specifically
1889 // for pointers to from semispace instead of looking for pointers 1889 // for pointers to from semispace instead of looking for pointers
1890 // to new space. 1890 // to new space.
1891 DCHECK(!target->IsMap()); 1891 DCHECK(!target->IsMap());
1892 Address obj_address = target->address();
1893 1892
1894 // We are not collecting slots on new space objects during mutation 1893 store_buffer()->IteratePointersToFromSpace(target, size,
1895 // thus we have to scan for pointers to evacuation candidates when we 1894 &Scavenger::ScavengeObject);
1896 // promote objects. But we should not record any slots in non-black
1897 // objects. Grey object's slots would be rescanned.
1898 // White object might not survive until the end of collection
1899 // it would be a violation of the invariant to record it's slots.
1900 bool record_slots = false;
1901 if (incremental_marking()->IsCompacting()) {
1902 MarkBit mark_bit = Marking::MarkBitFrom(target);
1903 record_slots = Marking::IsBlack(mark_bit);
1904 }
1905 #if V8_DOUBLE_FIELDS_UNBOXING
1906 LayoutDescriptorHelper helper(target->map());
1907 bool has_only_tagged_fields = helper.all_fields_tagged();
1908
1909 if (!has_only_tagged_fields) {
1910 for (int offset = 0; offset < size;) {
1911 int end_of_region_offset;
1912 if (helper.IsTagged(offset, size, &end_of_region_offset)) {
1913 IterateAndMarkPointersToFromSpace(
1914 target, obj_address + offset,
1915 obj_address + end_of_region_offset, record_slots,
1916 &Scavenger::ScavengeObject);
1917 }
1918 offset = end_of_region_offset;
1919 }
1920 } else {
1921 #endif
1922 IterateAndMarkPointersToFromSpace(target, obj_address,
1923 obj_address + size, record_slots,
1924 &Scavenger::ScavengeObject);
1925 #if V8_DOUBLE_FIELDS_UNBOXING
1926 }
1927 #endif
1928 } 1895 }
1929 } 1896 }
1930 1897
1931 // Take another spin if there are now unswept objects in new space 1898 // Take another spin if there are now unswept objects in new space
1932 // (there are currently no more unswept promoted objects). 1899 // (there are currently no more unswept promoted objects).
1933 } while (new_space_front != new_space_.top()); 1900 } while (new_space_front != new_space_.top());
1934 1901
1935 return new_space_front; 1902 return new_space_front;
1936 } 1903 }
1937 1904
(...skipping 2495 matching lines...) Expand 10 before | Expand all | Expand 10 after
4433 while (it.has_next()) { 4400 while (it.has_next()) {
4434 NewSpacePage* page = it.next(); 4401 NewSpacePage* page = it.next();
4435 for (Address cursor = page->area_start(), limit = page->area_end(); 4402 for (Address cursor = page->area_start(), limit = page->area_end();
4436 cursor < limit; cursor += kPointerSize) { 4403 cursor < limit; cursor += kPointerSize) {
4437 Memory::Address_at(cursor) = kFromSpaceZapValue; 4404 Memory::Address_at(cursor) = kFromSpaceZapValue;
4438 } 4405 }
4439 } 4406 }
4440 } 4407 }
4441 4408
4442 4409
4443 void Heap::IterateAndMarkPointersToFromSpace(HeapObject* object, Address start,
4444 Address end, bool record_slots,
4445 ObjectSlotCallback callback) {
4446 Address slot_address = start;
4447
4448 while (slot_address < end) {
4449 Object** slot = reinterpret_cast<Object**>(slot_address);
4450 Object* target = *slot;
4451 // If the store buffer becomes overfull we mark pages as being exempt from
4452 // the store buffer. These pages are scanned to find pointers that point
4453 // to the new space. In that case we may hit newly promoted objects and
4454 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
4455 if (target->IsHeapObject()) {
4456 if (Heap::InFromSpace(target)) {
4457 callback(reinterpret_cast<HeapObject**>(slot),
4458 HeapObject::cast(target));
4459 Object* new_target = *slot;
4460 if (InNewSpace(new_target)) {
4461 SLOW_DCHECK(Heap::InToSpace(new_target));
4462 SLOW_DCHECK(new_target->IsHeapObject());
4463 store_buffer_.EnterDirectlyIntoStoreBuffer(
4464 reinterpret_cast<Address>(slot));
4465 }
4466 SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_target));
4467 } else if (record_slots &&
4468 MarkCompactCollector::IsOnEvacuationCandidate(target)) {
4469 mark_compact_collector()->RecordSlot(object, slot, target);
4470 }
4471 }
4472 slot_address += kPointerSize;
4473 }
4474 }
4475
4476
4477 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) { 4410 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4478 IterateStrongRoots(v, mode); 4411 IterateStrongRoots(v, mode);
4479 IterateWeakRoots(v, mode); 4412 IterateWeakRoots(v, mode);
4480 } 4413 }
4481 4414
4482 4415
4483 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) { 4416 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
4484 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex])); 4417 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
4485 v->Synchronize(VisitorSynchronization::kStringTable); 4418 v->Synchronize(VisitorSynchronization::kStringTable);
4486 if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) { 4419 if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
(...skipping 1633 matching lines...) Expand 10 before | Expand all | Expand 10 after
6120 } 6053 }
6121 6054
6122 6055
6123 // static 6056 // static
6124 int Heap::GetStaticVisitorIdForMap(Map* map) { 6057 int Heap::GetStaticVisitorIdForMap(Map* map) {
6125 return StaticVisitorBase::GetVisitorId(map); 6058 return StaticVisitorBase::GetVisitorId(map);
6126 } 6059 }
6127 6060
6128 } // namespace internal 6061 } // namespace internal
6129 } // namespace v8 6062 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/heap.h ('k') | src/heap/store-buffer.h » ('j') | src/heap/store-buffer.h » ('J')

Powered by Google App Engine
This is Rietveld 408576698