OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #include "src/accessors.h" | 7 #include "src/accessors.h" |
8 #include "src/api.h" | 8 #include "src/api.h" |
9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
10 #include "src/base/once.h" | 10 #include "src/base/once.h" |
(...skipping 1885 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1896 HeapObject* target; | 1896 HeapObject* target; |
1897 int size; | 1897 int size; |
1898 promotion_queue()->remove(&target, &size); | 1898 promotion_queue()->remove(&target, &size); |
1899 | 1899 |
1900 // Promoted object might be already partially visited | 1900 // Promoted object might be already partially visited |
1901 // during old space pointer iteration. Thus we search specifically | 1901 // during old space pointer iteration. Thus we search specifically |
1902 // for pointers to from semispace instead of looking for pointers | 1902 // for pointers to from semispace instead of looking for pointers |
1903 // to new space. | 1903 // to new space. |
1904 DCHECK(!target->IsMap()); | 1904 DCHECK(!target->IsMap()); |
1905 Address obj_address = target->address(); | 1905 Address obj_address = target->address(); |
| 1906 |
| 1907 // We are not collecting slots on new space objects during mutation |
| 1908 // thus we have to scan for pointers to evacuation candidates when we |
| 1909 // promote objects. But we should not record any slots in non-black |
| 1910 // objects. Grey object's slots would be rescanned. |
| 1911 // White object might not survive until the end of collection |
| 1912 // it would be a violation of the invariant to record it's slots. |
| 1913 bool record_slots = false; |
| 1914 if (incremental_marking()->IsCompacting()) { |
| 1915 MarkBit mark_bit = Marking::MarkBitFrom(target); |
| 1916 record_slots = Marking::IsBlack(mark_bit); |
| 1917 } |
1906 #if V8_DOUBLE_FIELDS_UNBOXING | 1918 #if V8_DOUBLE_FIELDS_UNBOXING |
1907 LayoutDescriptorHelper helper(target->map()); | 1919 LayoutDescriptorHelper helper(target->map()); |
1908 bool has_only_tagged_fields = helper.all_fields_tagged(); | 1920 bool has_only_tagged_fields = helper.all_fields_tagged(); |
1909 | 1921 |
1910 if (!has_only_tagged_fields) { | 1922 if (!has_only_tagged_fields) { |
1911 for (int offset = 0; offset < size;) { | 1923 for (int offset = 0; offset < size;) { |
1912 int end_of_region_offset; | 1924 int end_of_region_offset; |
1913 if (helper.IsTagged(offset, size, &end_of_region_offset)) { | 1925 if (helper.IsTagged(offset, size, &end_of_region_offset)) { |
1914 IterateAndMarkPointersToFromSpace( | 1926 IterateAndMarkPointersToFromSpace( |
1915 obj_address + offset, obj_address + end_of_region_offset, | 1927 record_slots, obj_address + offset, |
1916 &ScavengeObject); | 1928 obj_address + end_of_region_offset, &ScavengeObject); |
1917 } | 1929 } |
1918 offset = end_of_region_offset; | 1930 offset = end_of_region_offset; |
1919 } | 1931 } |
1920 } else { | 1932 } else { |
1921 #endif | 1933 #endif |
1922 IterateAndMarkPointersToFromSpace(obj_address, obj_address + size, | 1934 IterateAndMarkPointersToFromSpace( |
1923 &ScavengeObject); | 1935 record_slots, obj_address, obj_address + size, &ScavengeObject); |
1924 #if V8_DOUBLE_FIELDS_UNBOXING | 1936 #if V8_DOUBLE_FIELDS_UNBOXING |
1925 } | 1937 } |
1926 #endif | 1938 #endif |
1927 } | 1939 } |
1928 } | 1940 } |
1929 | 1941 |
1930 // Take another spin if there are now unswept objects in new space | 1942 // Take another spin if there are now unswept objects in new space |
1931 // (there are currently no more unswept promoted objects). | 1943 // (there are currently no more unswept promoted objects). |
1932 } while (new_space_front != new_space_.top()); | 1944 } while (new_space_front != new_space_.top()); |
1933 | 1945 |
(...skipping 2951 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4885 while (it.has_next()) { | 4897 while (it.has_next()) { |
4886 NewSpacePage* page = it.next(); | 4898 NewSpacePage* page = it.next(); |
4887 for (Address cursor = page->area_start(), limit = page->area_end(); | 4899 for (Address cursor = page->area_start(), limit = page->area_end(); |
4888 cursor < limit; cursor += kPointerSize) { | 4900 cursor < limit; cursor += kPointerSize) { |
4889 Memory::Address_at(cursor) = kFromSpaceZapValue; | 4901 Memory::Address_at(cursor) = kFromSpaceZapValue; |
4890 } | 4902 } |
4891 } | 4903 } |
4892 } | 4904 } |
4893 | 4905 |
4894 | 4906 |
4895 void Heap::IterateAndMarkPointersToFromSpace(Address start, Address end, | 4907 void Heap::IterateAndMarkPointersToFromSpace(bool record_slots, Address start, |
| 4908 Address end, |
4896 ObjectSlotCallback callback) { | 4909 ObjectSlotCallback callback) { |
4897 Address slot_address = start; | 4910 Address slot_address = start; |
4898 | 4911 |
4899 // We are not collecting slots on new space objects during mutation | |
4900 // thus we have to scan for pointers to evacuation candidates when we | |
4901 // promote objects. But we should not record any slots in non-black | |
4902 // objects. Grey object's slots would be rescanned. | |
4903 // White object might not survive until the end of collection | |
4904 // it would be a violation of the invariant to record it's slots. | |
4905 bool record_slots = false; | |
4906 if (incremental_marking()->IsCompacting()) { | |
4907 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start)); | |
4908 record_slots = Marking::IsBlack(mark_bit); | |
4909 } | |
4910 | |
4911 while (slot_address < end) { | 4912 while (slot_address < end) { |
4912 Object** slot = reinterpret_cast<Object**>(slot_address); | 4913 Object** slot = reinterpret_cast<Object**>(slot_address); |
4913 Object* object = *slot; | 4914 Object* object = *slot; |
4914 // If the store buffer becomes overfull we mark pages as being exempt from | 4915 // If the store buffer becomes overfull we mark pages as being exempt from |
4915 // the store buffer. These pages are scanned to find pointers that point | 4916 // the store buffer. These pages are scanned to find pointers that point |
4916 // to the new space. In that case we may hit newly promoted objects and | 4917 // to the new space. In that case we may hit newly promoted objects and |
4917 // fix the pointers before the promotion queue gets to them. Thus the 'if'. | 4918 // fix the pointers before the promotion queue gets to them. Thus the 'if'. |
4918 if (object->IsHeapObject()) { | 4919 if (object->IsHeapObject()) { |
4919 if (Heap::InFromSpace(object)) { | 4920 if (Heap::InFromSpace(object)) { |
4920 callback(reinterpret_cast<HeapObject**>(slot), | 4921 callback(reinterpret_cast<HeapObject**>(slot), |
(...skipping 1473 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6394 static_cast<int>(object_sizes_last_time_[index])); | 6395 static_cast<int>(object_sizes_last_time_[index])); |
6395 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) | 6396 CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT) |
6396 #undef ADJUST_LAST_TIME_OBJECT_COUNT | 6397 #undef ADJUST_LAST_TIME_OBJECT_COUNT |
6397 | 6398 |
6398 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); | 6399 MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_)); |
6399 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); | 6400 MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_)); |
6400 ClearObjectStats(); | 6401 ClearObjectStats(); |
6401 } | 6402 } |
6402 } | 6403 } |
6403 } // namespace v8::internal | 6404 } // namespace v8::internal |
OLD | NEW |