Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(267)

Side by Side Diff: src/spaces.cc

Issue 335009: New snapshot framework. Doesn't work on ARM yet (code targets... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 11 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1509 matching lines...) Expand 10 before | Expand all | Expand 10 after
1520 1520
1521 // We write a map and possibly size information to the block. If the block 1521 // We write a map and possibly size information to the block. If the block
1522 // is big enough to be a ByteArray with at least one extra word (the next 1522 // is big enough to be a ByteArray with at least one extra word (the next
1523 // pointer), we set its map to be the byte array map and its size to an 1523 // pointer), we set its map to be the byte array map and its size to an
1524 // appropriate array length for the desired size from HeapObject::Size(). 1524 // appropriate array length for the desired size from HeapObject::Size().
1525 // If the block is too small (eg, one or two words), to hold both a size 1525 // If the block is too small (eg, one or two words), to hold both a size
1526 // field and a next pointer, we give it a filler map that gives it the 1526 // field and a next pointer, we give it a filler map that gives it the
1527 // correct size. 1527 // correct size.
1528 if (size_in_bytes > ByteArray::kAlignedSize) { 1528 if (size_in_bytes > ByteArray::kAlignedSize) {
1529 set_map(Heap::raw_unchecked_byte_array_map()); 1529 set_map(Heap::raw_unchecked_byte_array_map());
1530 ByteArray::cast(this)->set_length(ByteArray::LengthFor(size_in_bytes)); 1530 // Can't use ByteArray::cast because it fails during deserialization.
1531 ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
1532 this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes));
1531 } else if (size_in_bytes == kPointerSize) { 1533 } else if (size_in_bytes == kPointerSize) {
1532 set_map(Heap::raw_unchecked_one_pointer_filler_map()); 1534 set_map(Heap::raw_unchecked_one_pointer_filler_map());
1533 } else if (size_in_bytes == 2 * kPointerSize) { 1535 } else if (size_in_bytes == 2 * kPointerSize) {
1534 set_map(Heap::raw_unchecked_two_pointer_filler_map()); 1536 set_map(Heap::raw_unchecked_two_pointer_filler_map());
1535 } else { 1537 } else {
1536 UNREACHABLE(); 1538 UNREACHABLE();
1537 } 1539 }
1538 ASSERT(Size() == size_in_bytes); 1540 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
1541 // deserialization because the byte array map is not done yet.
1539 } 1542 }
1540 1543
1541 1544
1542 Address FreeListNode::next() { 1545 Address FreeListNode::next() {
1543 ASSERT(IsFreeListNode(this)); 1546 ASSERT(IsFreeListNode(this));
1544 if (map() == Heap::raw_unchecked_byte_array_map()) { 1547 if (map() == Heap::raw_unchecked_byte_array_map()) {
1545 ASSERT(Size() >= kNextOffset + kPointerSize); 1548 ASSERT(Size() >= kNextOffset + kPointerSize);
1546 return Memory::Address_at(address() + kNextOffset); 1549 return Memory::Address_at(address() + kNextOffset);
1547 } else { 1550 } else {
1548 return Memory::Address_at(address() + kPointerSize); 1551 return Memory::Address_at(address() + kPointerSize);
(...skipping 272 matching lines...) Expand 10 before | Expand all | Expand 10 after
1821 HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) { 1824 HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
1822 // Linear allocation in this space has failed. If there is another page 1825 // Linear allocation in this space has failed. If there is another page
1823 // in the space, move to that page and allocate there. This allocation 1826 // in the space, move to that page and allocate there. This allocation
1824 // should succeed (size_in_bytes should not be greater than a page's 1827 // should succeed (size_in_bytes should not be greater than a page's
1825 // object area size). 1828 // object area size).
1826 Page* current_page = TopPageOf(allocation_info_); 1829 Page* current_page = TopPageOf(allocation_info_);
1827 if (current_page->next_page()->is_valid()) { 1830 if (current_page->next_page()->is_valid()) {
1828 return AllocateInNextPage(current_page, size_in_bytes); 1831 return AllocateInNextPage(current_page, size_in_bytes);
1829 } 1832 }
1830 1833
1831 // There is no next page in this space. Try free list allocation. 1834 // There is no next page in this space. Try free list allocation.
Mads Ager (chromium) 2009/10/26 11:14:05 Update comment.
1832 int wasted_bytes; 1835 if (!Heap::linear_allocation()) {
1833 Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes); 1836 int wasted_bytes;
1834 accounting_stats_.WasteBytes(wasted_bytes); 1837 Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes);
1835 if (!result->IsFailure()) { 1838 accounting_stats_.WasteBytes(wasted_bytes);
1836 accounting_stats_.AllocateBytes(size_in_bytes); 1839 if (!result->IsFailure()) {
1837 return HeapObject::cast(result); 1840 accounting_stats_.AllocateBytes(size_in_bytes);
1841 return HeapObject::cast(result);
1842 }
1838 } 1843 }
1839 1844
1840 // Free list allocation failed and there is no next page. Fail if we have 1845 // Free list allocation failed and there is no next page. Fail if we have
1841 // hit the old generation size limit that should cause a garbage 1846 // hit the old generation size limit that should cause a garbage
1842 // collection. 1847 // collection.
1843 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) { 1848 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) {
1844 return NULL; 1849 return NULL;
1845 } 1850 }
1846 1851
1847 // Try to expand the space and allocate in the new next page. 1852 // Try to expand the space and allocate in the new next page.
(...skipping 375 matching lines...) Expand 10 before | Expand all | Expand 10 after
2223 HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) { 2228 HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
2224 ASSERT_EQ(object_size_in_bytes_, size_in_bytes); 2229 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
2225 // Linear allocation in this space has failed. If there is another page 2230 // Linear allocation in this space has failed. If there is another page
2226 // in the space, move to that page and allocate there. This allocation 2231 // in the space, move to that page and allocate there. This allocation
2227 // should succeed. 2232 // should succeed.
2228 Page* current_page = TopPageOf(allocation_info_); 2233 Page* current_page = TopPageOf(allocation_info_);
2229 if (current_page->next_page()->is_valid()) { 2234 if (current_page->next_page()->is_valid()) {
2230 return AllocateInNextPage(current_page, size_in_bytes); 2235 return AllocateInNextPage(current_page, size_in_bytes);
2231 } 2236 }
2232 2237
2233 // There is no next page in this space. Try free list allocation. 2238 // There is no next page in this space. Try free list allocation.
Mads Ager (chromium) 2009/10/26 11:14:05 Update comment.
2234 // The fixed space free list implicitly assumes that all free blocks 2239 // The fixed space free list implicitly assumes that all free blocks
2235 // are of the fixed size. 2240 // are of the fixed size.
2236 if (size_in_bytes == object_size_in_bytes_) { 2241 if (!Heap::linear_allocation()) {
2237 Object* result = free_list_.Allocate(); 2242 Object* result = free_list_.Allocate();
2238 if (!result->IsFailure()) { 2243 if (!result->IsFailure()) {
2239 accounting_stats_.AllocateBytes(size_in_bytes); 2244 accounting_stats_.AllocateBytes(size_in_bytes);
2240 return HeapObject::cast(result); 2245 return HeapObject::cast(result);
2241 } 2246 }
2242 } 2247 }
2243 2248
2244 // Free list allocation failed and there is no next page. Fail if we have 2249 // Free list allocation failed and there is no next page. Fail if we have
2245 // hit the old generation size limit that should cause a garbage 2250 // hit the old generation size limit that should cause a garbage
2246 // collection. 2251 // collection.
(...skipping 531 matching lines...) Expand 10 before | Expand all | Expand 10 after
2778 reinterpret_cast<Object**>(object->address() 2783 reinterpret_cast<Object**>(object->address()
2779 + Page::kObjectAreaSize), 2784 + Page::kObjectAreaSize),
2780 allocation_top); 2785 allocation_top);
2781 PrintF("\n"); 2786 PrintF("\n");
2782 } 2787 }
2783 } 2788 }
2784 } 2789 }
2785 #endif // DEBUG 2790 #endif // DEBUG
2786 2791
2787 } } // namespace v8::internal 2792 } } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698