Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(203)

Side by Side Diff: src/heap.cc

Issue 8700: As discussed on the phone, I'd like your thoughts on the... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 12 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
89 int Heap::young_generation_size_ = 0; // Will be 2 * semispace_size_. 89 int Heap::young_generation_size_ = 0; // Will be 2 * semispace_size_.
90 90
91 // Double the new space after this many scavenge collections. 91 // Double the new space after this many scavenge collections.
92 int Heap::new_space_growth_limit_ = 8; 92 int Heap::new_space_growth_limit_ = 8;
93 int Heap::scavenge_count_ = 0; 93 int Heap::scavenge_count_ = 0;
94 Heap::HeapState Heap::gc_state_ = NOT_IN_GC; 94 Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
95 95
96 int Heap::mc_count_ = 0; 96 int Heap::mc_count_ = 0;
97 int Heap::gc_count_ = 0; 97 int Heap::gc_count_ = 0;
98 98
99 int Heap::always_allocate_scope_depth_ = 0;
100
99 #ifdef DEBUG 101 #ifdef DEBUG
100 bool Heap::allocation_allowed_ = true; 102 bool Heap::allocation_allowed_ = true;
101 103
102 int Heap::allocation_timeout_ = 0; 104 int Heap::allocation_timeout_ = 0;
103 bool Heap::disallow_allocation_failure_ = false; 105 bool Heap::disallow_allocation_failure_ = false;
104 #endif // DEBUG 106 #endif // DEBUG
105 107
106 108
107 int Heap::Capacity() { 109 int Heap::Capacity() {
108 if (!HasBeenSetup()) return 0; 110 if (!HasBeenSetup()) return 0;
(...skipping 909 matching lines...) Expand 10 before | Expand all | Expand 10 after
1018 ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array())); 1020 ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
1019 return true; 1021 return true;
1020 } 1022 }
1021 1023
1022 1024
1023 Object* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) { 1025 Object* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
1024 // Statically ensure that it is safe to allocate heap numbers in paged 1026 // Statically ensure that it is safe to allocate heap numbers in paged
1025 // spaces. 1027 // spaces.
1026 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize); 1028 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1027 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE; 1029 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
1028 Object* result = AllocateRaw(HeapNumber::kSize, space); 1030 Object* result = AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
1029 if (result->IsFailure()) return result; 1031 if (result->IsFailure()) return result;
1030 1032
1031 HeapObject::cast(result)->set_map(heap_number_map()); 1033 HeapObject::cast(result)->set_map(heap_number_map());
1032 HeapNumber::cast(result)->set_value(value); 1034 HeapNumber::cast(result)->set_value(value);
1033 return result; 1035 return result;
1034 } 1036 }
1035 1037
1036 1038
1037 Object* Heap::AllocateHeapNumber(double value) { 1039 Object* Heap::AllocateHeapNumber(double value) {
1040 // Use general version, if we're forced to always allocate.
1041 if (always_allocate()) return AllocateHeapNumber(value, NOT_TENURED);
Erik Corry 2008/10/30 09:08:43 I wonder whether this will be a performance hit.
1038 // This version of AllocateHeapNumber is optimized for 1042 // This version of AllocateHeapNumber is optimized for
1039 // allocation in new space. 1043 // allocation in new space.
1040 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize); 1044 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1041 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC); 1045 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
1042 Object* result = new_space_.AllocateRaw(HeapNumber::kSize); 1046 Object* result = new_space_.AllocateRaw(HeapNumber::kSize);
1043 if (result->IsFailure()) return result; 1047 if (result->IsFailure()) return result;
1044 HeapObject::cast(result)->set_map(heap_number_map()); 1048 HeapObject::cast(result)->set_map(heap_number_map());
1045 HeapNumber::cast(result)->set_value(value); 1049 HeapNumber::cast(result)->set_value(value);
1046 return result; 1050 return result;
1047 } 1051 }
(...skipping 476 matching lines...) Expand 10 before | Expand all | Expand 10 after
1524 String::cast(result)->Set(0, code); 1528 String::cast(result)->Set(0, code);
1525 return result; 1529 return result;
1526 } 1530 }
1527 1531
1528 1532
1529 Object* Heap::AllocateByteArray(int length) { 1533 Object* Heap::AllocateByteArray(int length) {
1530 int size = ByteArray::SizeFor(length); 1534 int size = ByteArray::SizeFor(length);
1531 AllocationSpace space = 1535 AllocationSpace space =
1532 size > MaxHeapObjectSize() ? LO_SPACE : NEW_SPACE; 1536 size > MaxHeapObjectSize() ? LO_SPACE : NEW_SPACE;
1533 1537
1534 Object* result = AllocateRaw(size, space); 1538 Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
1535 1539
1536 if (result->IsFailure()) return result; 1540 if (result->IsFailure()) return result;
1537 1541
1538 reinterpret_cast<Array*>(result)->set_map(byte_array_map()); 1542 reinterpret_cast<Array*>(result)->set_map(byte_array_map());
1539 reinterpret_cast<Array*>(result)->set_length(length); 1543 reinterpret_cast<Array*>(result)->set_length(length);
1540 return result; 1544 return result;
1541 } 1545 }
1542 1546
1543 1547
1544 Object* Heap::CreateCode(const CodeDesc& desc, 1548 Object* Heap::CreateCode(const CodeDesc& desc,
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
1597 // Relocate the copy. 1601 // Relocate the copy.
1598 Code* new_code = Code::cast(result); 1602 Code* new_code = Code::cast(result);
1599 new_code->Relocate(new_addr - old_addr); 1603 new_code->Relocate(new_addr - old_addr);
1600 return new_code; 1604 return new_code;
1601 } 1605 }
1602 1606
1603 1607
1604 Object* Heap::Allocate(Map* map, AllocationSpace space) { 1608 Object* Heap::Allocate(Map* map, AllocationSpace space) {
1605 ASSERT(gc_state_ == NOT_IN_GC); 1609 ASSERT(gc_state_ == NOT_IN_GC);
1606 ASSERT(map->instance_type() != MAP_TYPE); 1610 ASSERT(map->instance_type() != MAP_TYPE);
1607 Object* result = AllocateRaw(map->instance_size(), space); 1611 Object* result = AllocateRaw(map->instance_size(),
1612 space,
1613 TargetSpaceId(map->instance_type()));
1608 if (result->IsFailure()) return result; 1614 if (result->IsFailure()) return result;
1609 HeapObject::cast(result)->set_map(map); 1615 HeapObject::cast(result)->set_map(map);
1610 return result; 1616 return result;
1611 } 1617 }
1612 1618
1613 1619
1614 Object* Heap::InitializeFunction(JSFunction* function, 1620 Object* Heap::InitializeFunction(JSFunction* function,
1615 SharedFunctionInfo* shared, 1621 SharedFunctionInfo* shared,
1616 Object* prototype) { 1622 Object* prototype) {
1617 ASSERT(!prototype->IsMap()); 1623 ASSERT(!prototype->IsMap());
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
1657 // This calls Copy directly rather than using Heap::AllocateRaw so we 1663 // This calls Copy directly rather than using Heap::AllocateRaw so we
1658 // duplicate the check here. 1664 // duplicate the check here.
1659 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC); 1665 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
1660 1666
1661 JSObject* boilerplate = 1667 JSObject* boilerplate =
1662 Top::context()->global_context()->arguments_boilerplate(); 1668 Top::context()->global_context()->arguments_boilerplate();
1663 1669
1664 // Make the clone. 1670 // Make the clone.
1665 Map* map = boilerplate->map(); 1671 Map* map = boilerplate->map();
1666 int object_size = map->instance_size(); 1672 int object_size = map->instance_size();
1667 Object* result = new_space_.AllocateRaw(object_size); 1673 Object* result = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
1668 if (result->IsFailure()) return result; 1674 if (result->IsFailure()) return result;
1669 ASSERT(Heap::InNewSpace(result));
1670 1675
1671 // Copy the content. 1676 // Copy the content. The arguments boilerplate doesn't have any
1677 // fields that point to new space so it's safe to skip the write
1678 // barrier here.
1672 CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(result)->address()), 1679 CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(result)->address()),
1673 reinterpret_cast<Object**>(boilerplate->address()), 1680 reinterpret_cast<Object**>(boilerplate->address()),
1674 object_size); 1681 object_size);
1675 1682
1676 // Set the two properties. 1683 // Set the two properties.
1677 JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index, 1684 JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index,
1678 callee, 1685 callee);
1679 SKIP_WRITE_BARRIER);
1680 JSObject::cast(result)->InObjectPropertyAtPut(arguments_length_index, 1686 JSObject::cast(result)->InObjectPropertyAtPut(arguments_length_index,
1681 Smi::FromInt(length), 1687 Smi::FromInt(length),
1682 SKIP_WRITE_BARRIER); 1688 SKIP_WRITE_BARRIER);
1683 1689
1684 // Check the state of the object 1690 // Check the state of the object
1685 ASSERT(JSObject::cast(result)->HasFastProperties()); 1691 ASSERT(JSObject::cast(result)->HasFastProperties());
1686 ASSERT(JSObject::cast(result)->HasFastElements()); 1692 ASSERT(JSObject::cast(result)->HasFastElements());
1687 1693
1688 return result; 1694 return result;
1689 } 1695 }
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
1777 1783
1778 1784
1779 Object* Heap::CopyJSObject(JSObject* source) { 1785 Object* Heap::CopyJSObject(JSObject* source) {
1780 // Never used to copy functions. If functions need to be copied we 1786 // Never used to copy functions. If functions need to be copied we
1781 // have to be careful to clear the literals array. 1787 // have to be careful to clear the literals array.
1782 ASSERT(!source->IsJSFunction()); 1788 ASSERT(!source->IsJSFunction());
1783 1789
1784 // Make the clone. 1790 // Make the clone.
1785 Map* map = source->map(); 1791 Map* map = source->map();
1786 int object_size = map->instance_size(); 1792 int object_size = map->instance_size();
1787 Object* clone = new_space_.AllocateRaw(object_size); 1793 Object* clone;
1788 if (clone->IsFailure()) return clone;
1789 ASSERT(Heap::InNewSpace(clone));
1790 1794
1791 // Copy the content. 1795 // If we're forced to always allocate, we use the general allocation
1792 CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(clone)->address()), 1796 // functions which may leave us with an object in old space.
1793 reinterpret_cast<Object**>(source->address()), 1797 if (always_allocate()) {
1794 object_size); 1798 clone = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
1799 if (clone->IsFailure()) return clone;
1800 Address clone_address = HeapObject::cast(clone)->address();
1801 CopyBlock(reinterpret_cast<Object**>(clone_address),
1802 reinterpret_cast<Object**>(source->address()),
1803 object_size);
1804 // Update write barrier for all fields that lie beyond the header.
1805 for (int offset = JSObject::kHeaderSize;
1806 offset < object_size;
1807 offset += kPointerSize) {
1808 RecordWrite(clone_address, offset);
1809 }
1810 } else {
1811 clone = new_space_.AllocateRaw(object_size);
1812 if (clone->IsFailure()) return clone;
1813 ASSERT(Heap::InNewSpace(clone));
1814 // Since we know the clone is allocated in new space, we can copy
1815 // the contents without worring about updating the write barrier.
1816 CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(clone)->address()),
1817 reinterpret_cast<Object**>(source->address()),
1818 object_size);
1819 }
1795 1820
1796 FixedArray* elements = FixedArray::cast(source->elements()); 1821 FixedArray* elements = FixedArray::cast(source->elements());
1797 FixedArray* properties = FixedArray::cast(source->properties()); 1822 FixedArray* properties = FixedArray::cast(source->properties());
1798 // Update elements if necessary. 1823 // Update elements if necessary.
1799 if (elements->length()> 0) { 1824 if (elements->length()> 0) {
1800 Object* elem = CopyFixedArray(elements); 1825 Object* elem = CopyFixedArray(elements);
1801 if (elem->IsFailure()) return elem; 1826 if (elem->IsFailure()) return elem;
1802 JSObject::cast(clone)->set_elements(FixedArray::cast(elem)); 1827 JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
1803 } 1828 }
1804 // Update properties if necessary. 1829 // Update properties if necessary.
(...skipping 201 matching lines...) Expand 10 before | Expand all | Expand 10 after
2006 map = medium_symbol_map(); 2031 map = medium_symbol_map();
2007 } else { 2032 } else {
2008 map = long_symbol_map(); 2033 map = long_symbol_map();
2009 } 2034 }
2010 size = SeqTwoByteString::SizeFor(chars); 2035 size = SeqTwoByteString::SizeFor(chars);
2011 } 2036 }
2012 2037
2013 // Allocate string. 2038 // Allocate string.
2014 AllocationSpace space = 2039 AllocationSpace space =
2015 (size > MaxHeapObjectSize()) ? LO_SPACE : OLD_DATA_SPACE; 2040 (size > MaxHeapObjectSize()) ? LO_SPACE : OLD_DATA_SPACE;
2016 Object* result = AllocateRaw(size, space); 2041 Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
2017 if (result->IsFailure()) return result; 2042 if (result->IsFailure()) return result;
2018 2043
2019 reinterpret_cast<HeapObject*>(result)->set_map(map); 2044 reinterpret_cast<HeapObject*>(result)->set_map(map);
2020 // The hash value contains the length of the string. 2045 // The hash value contains the length of the string.
2021 String::cast(result)->set_length_field(length_field); 2046 String::cast(result)->set_length_field(length_field);
2022 2047
2023 ASSERT_EQ(size, String::cast(result)->Size()); 2048 ASSERT_EQ(size, String::cast(result)->Size());
2024 2049
2025 // Fill in the characters. 2050 // Fill in the characters.
2026 for (int i = 0; i < chars; i++) { 2051 for (int i = 0; i < chars; i++) {
2027 String::cast(result)->Set(i, buffer->GetNext()); 2052 String::cast(result)->Set(i, buffer->GetNext());
2028 } 2053 }
2029 return result; 2054 return result;
2030 } 2055 }
2031 2056
2032 2057
2033 Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) { 2058 Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
2034 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE; 2059 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2035 int size = SeqAsciiString::SizeFor(length); 2060 int size = SeqAsciiString::SizeFor(length);
2036 if (size > MaxHeapObjectSize()) { 2061 if (size > MaxHeapObjectSize()) {
2037 space = LO_SPACE; 2062 space = LO_SPACE;
2038 } 2063 }
2039 2064
2040 // Use AllocateRaw rather than Allocate because the object's size cannot be 2065 // Use AllocateRaw rather than Allocate because the object's size cannot be
2041 // determined from the map. 2066 // determined from the map.
2042 Object* result = AllocateRaw(size, space); 2067 Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
2043 if (result->IsFailure()) return result; 2068 if (result->IsFailure()) return result;
2044 2069
2045 // Determine the map based on the string's length. 2070 // Determine the map based on the string's length.
2046 Map* map; 2071 Map* map;
2047 if (length <= String::kMaxShortStringSize) { 2072 if (length <= String::kMaxShortStringSize) {
2048 map = short_ascii_string_map(); 2073 map = short_ascii_string_map();
2049 } else if (length <= String::kMaxMediumStringSize) { 2074 } else if (length <= String::kMaxMediumStringSize) {
2050 map = medium_ascii_string_map(); 2075 map = medium_ascii_string_map();
2051 } else { 2076 } else {
2052 map = long_ascii_string_map(); 2077 map = long_ascii_string_map();
2053 } 2078 }
2054 2079
2055 // Partially initialize the object. 2080 // Partially initialize the object.
2056 HeapObject::cast(result)->set_map(map); 2081 HeapObject::cast(result)->set_map(map);
2057 String::cast(result)->set_length(length); 2082 String::cast(result)->set_length(length);
2058 ASSERT_EQ(size, HeapObject::cast(result)->Size()); 2083 ASSERT_EQ(size, HeapObject::cast(result)->Size());
2059 return result; 2084 return result;
2060 } 2085 }
2061 2086
2062 2087
2063 Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) { 2088 Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) {
2064 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE; 2089 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2065 int size = SeqTwoByteString::SizeFor(length); 2090 int size = SeqTwoByteString::SizeFor(length);
2066 if (size > MaxHeapObjectSize()) { 2091 if (size > MaxHeapObjectSize()) {
2067 space = LO_SPACE; 2092 space = LO_SPACE;
2068 } 2093 }
2069 2094
2070 // Use AllocateRaw rather than Allocate because the object's size cannot be 2095 // Use AllocateRaw rather than Allocate because the object's size cannot be
2071 // determined from the map. 2096 // determined from the map.
2072 Object* result = AllocateRaw(size, space); 2097 Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
2073 if (result->IsFailure()) return result; 2098 if (result->IsFailure()) return result;
2074 2099
2075 // Determine the map based on the string's length. 2100 // Determine the map based on the string's length.
2076 Map* map; 2101 Map* map;
2077 if (length <= String::kMaxShortStringSize) { 2102 if (length <= String::kMaxShortStringSize) {
2078 map = short_string_map(); 2103 map = short_string_map();
2079 } else if (length <= String::kMaxMediumStringSize) { 2104 } else if (length <= String::kMaxMediumStringSize) {
2080 map = medium_string_map(); 2105 map = medium_string_map();
2081 } else { 2106 } else {
2082 map = long_string_map(); 2107 map = long_string_map();
2083 } 2108 }
2084 2109
2085 // Partially initialize the object. 2110 // Partially initialize the object.
2086 HeapObject::cast(result)->set_map(map); 2111 HeapObject::cast(result)->set_map(map);
2087 String::cast(result)->set_length(length); 2112 String::cast(result)->set_length(length);
2088 ASSERT_EQ(size, HeapObject::cast(result)->Size()); 2113 ASSERT_EQ(size, HeapObject::cast(result)->Size());
2089 return result; 2114 return result;
2090 } 2115 }
2091 2116
2092 2117
2093 Object* Heap::AllocateEmptyFixedArray() { 2118 Object* Heap::AllocateEmptyFixedArray() {
2094 int size = FixedArray::SizeFor(0); 2119 int size = FixedArray::SizeFor(0);
2095 Object* result = AllocateRaw(size, OLD_DATA_SPACE); 2120 Object* result = AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
2096 if (result->IsFailure()) return result; 2121 if (result->IsFailure()) return result;
2097 // Initialize the object. 2122 // Initialize the object.
2098 reinterpret_cast<Array*>(result)->set_map(fixed_array_map()); 2123 reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
2099 reinterpret_cast<Array*>(result)->set_length(0); 2124 reinterpret_cast<Array*>(result)->set_length(0);
2100 return result; 2125 return result;
2101 } 2126 }
2102 2127
2103 2128
2104 Object* Heap::AllocateRawFixedArray(int length) { 2129 Object* Heap::AllocateRawFixedArray(int length) {
2130 // Use the general function if we're forced to always allocate.
2131 if (always_allocate()) return AllocateFixedArray(length, NOT_TENURED);
2105 // Allocate the raw data for a fixed array. 2132 // Allocate the raw data for a fixed array.
2106 int size = FixedArray::SizeFor(length); 2133 int size = FixedArray::SizeFor(length);
2107 return (size > MaxHeapObjectSize()) 2134 if (size > MaxHeapObjectSize()) {
2108 ? lo_space_->AllocateRawFixedArray(size) 2135 return lo_space_->AllocateRawFixedArray(size);
2109 : new_space_.AllocateRaw(size); 2136 } else {
2137 return new_space_.AllocateRaw(size);
2138 }
2110 } 2139 }
2111 2140
2112 2141
2113 Object* Heap::CopyFixedArray(FixedArray* src) { 2142 Object* Heap::CopyFixedArray(FixedArray* src) {
2114 int len = src->length(); 2143 int len = src->length();
2115 Object* obj = AllocateRawFixedArray(len); 2144 Object* obj = AllocateRawFixedArray(len);
2116 if (obj->IsFailure()) return obj; 2145 if (obj->IsFailure()) return obj;
2117 if (Heap::InNewSpace(obj)) { 2146 if (Heap::InNewSpace(obj)) {
2118 HeapObject* dst = HeapObject::cast(obj); 2147 HeapObject* dst = HeapObject::cast(obj);
2119 CopyBlock(reinterpret_cast<Object**>(dst->address()), 2148 CopyBlock(reinterpret_cast<Object**>(dst->address()),
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
2152 ASSERT(empty_fixed_array()->IsFixedArray()); 2181 ASSERT(empty_fixed_array()->IsFixedArray());
2153 if (length == 0) return empty_fixed_array(); 2182 if (length == 0) return empty_fixed_array();
2154 2183
2155 int size = FixedArray::SizeFor(length); 2184 int size = FixedArray::SizeFor(length);
2156 Object* result; 2185 Object* result;
2157 if (size > MaxHeapObjectSize()) { 2186 if (size > MaxHeapObjectSize()) {
2158 result = lo_space_->AllocateRawFixedArray(size); 2187 result = lo_space_->AllocateRawFixedArray(size);
2159 } else { 2188 } else {
2160 AllocationSpace space = 2189 AllocationSpace space =
2161 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE; 2190 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
2162 result = AllocateRaw(size, space); 2191 result = AllocateRaw(size, space, OLD_POINTER_SPACE);
2163 } 2192 }
2164 if (result->IsFailure()) return result; 2193 if (result->IsFailure()) return result;
2165 2194
2166 // Initialize the object. 2195 // Initialize the object.
2167 reinterpret_cast<Array*>(result)->set_map(fixed_array_map()); 2196 reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
2168 FixedArray* array = FixedArray::cast(result); 2197 FixedArray* array = FixedArray::cast(result);
2169 array->set_length(length); 2198 array->set_length(length);
2170 Object* value = undefined_value(); 2199 Object* value = undefined_value();
2171 for (int index = 0; index < length; index++) { 2200 for (int index = 0; index < length; index++) {
2172 array->set(index, value, SKIP_WRITE_BARRIER); 2201 array->set(index, value, SKIP_WRITE_BARRIER);
(...skipping 1020 matching lines...) Expand 10 before | Expand all | Expand 10 after
3193 #ifdef DEBUG 3222 #ifdef DEBUG
3194 bool Heap::GarbageCollectionGreedyCheck() { 3223 bool Heap::GarbageCollectionGreedyCheck() {
3195 ASSERT(FLAG_gc_greedy); 3224 ASSERT(FLAG_gc_greedy);
3196 if (Bootstrapper::IsActive()) return true; 3225 if (Bootstrapper::IsActive()) return true;
3197 if (disallow_allocation_failure()) return true; 3226 if (disallow_allocation_failure()) return true;
3198 return CollectGarbage(0, NEW_SPACE); 3227 return CollectGarbage(0, NEW_SPACE);
3199 } 3228 }
3200 #endif 3229 #endif
3201 3230
3202 } } // namespace v8::internal 3231 } } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698