Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(838)

Side by Side Diff: src/spaces.cc

Issue 8404030: Version 3.7.1 (Closed) Base URL: http://v8.googlecode.com/svn/trunk/
Patch Set: Created 9 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/spaces.h ('k') | src/spaces-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after
88 HeapObjectIterator::PageMode mode, 88 HeapObjectIterator::PageMode mode,
89 HeapObjectCallback size_f) { 89 HeapObjectCallback size_f) {
90 // Check that we actually can iterate this space. 90 // Check that we actually can iterate this space.
91 ASSERT(!space->was_swept_conservatively()); 91 ASSERT(!space->was_swept_conservatively());
92 92
93 space_ = space; 93 space_ = space;
94 cur_addr_ = cur; 94 cur_addr_ = cur;
95 cur_end_ = end; 95 cur_end_ = end;
96 page_mode_ = mode; 96 page_mode_ = mode;
97 size_func_ = size_f; 97 size_func_ = size_f;
98
99 #ifdef DEBUG
100 Verify();
101 #endif
102 } 98 }
103 99
104 100
105 // We have hit the end of the page and should advance to the next block of 101 // We have hit the end of the page and should advance to the next block of
106 // objects. This happens at the end of the page. 102 // objects. This happens at the end of the page.
107 bool HeapObjectIterator::AdvanceToNextPage() { 103 bool HeapObjectIterator::AdvanceToNextPage() {
108 ASSERT(cur_addr_ == cur_end_); 104 ASSERT(cur_addr_ == cur_end_);
109 if (page_mode_ == kOnePageOnly) return false; 105 if (page_mode_ == kOnePageOnly) return false;
110 Page* cur_page; 106 Page* cur_page;
111 if (cur_addr_ == NULL) { 107 if (cur_addr_ == NULL) {
112 cur_page = space_->anchor(); 108 cur_page = space_->anchor();
113 } else { 109 } else {
114 cur_page = Page::FromAddress(cur_addr_ - 1); 110 cur_page = Page::FromAddress(cur_addr_ - 1);
115 ASSERT(cur_addr_ == cur_page->ObjectAreaEnd()); 111 ASSERT(cur_addr_ == cur_page->ObjectAreaEnd());
116 } 112 }
117 cur_page = cur_page->next_page(); 113 cur_page = cur_page->next_page();
118 if (cur_page == space_->anchor()) return false; 114 if (cur_page == space_->anchor()) return false;
119 cur_addr_ = cur_page->ObjectAreaStart(); 115 cur_addr_ = cur_page->ObjectAreaStart();
120 cur_end_ = cur_page->ObjectAreaEnd(); 116 cur_end_ = cur_page->ObjectAreaEnd();
121 ASSERT(cur_page->WasSweptPrecisely()); 117 ASSERT(cur_page->WasSweptPrecisely());
122 return true; 118 return true;
123 } 119 }
124 120
125 121
126 #ifdef DEBUG
127 void HeapObjectIterator::Verify() {
128 // TODO(gc): We should do something here.
129 }
130 #endif
131
132
133 // ----------------------------------------------------------------------------- 122 // -----------------------------------------------------------------------------
134 // CodeRange 123 // CodeRange
135 124
136 125
137 CodeRange::CodeRange(Isolate* isolate) 126 CodeRange::CodeRange(Isolate* isolate)
138 : isolate_(isolate), 127 : isolate_(isolate),
139 code_range_(NULL), 128 code_range_(NULL),
140 free_list_(0), 129 free_list_(0),
141 allocation_list_(0), 130 allocation_list_(0),
142 current_allocation_block_index_(0) { 131 current_allocation_block_index_(0) {
(...skipping 1759 matching lines...) Expand 10 before | Expand all | Expand 10 after
1902 return sum; 1891 return sum;
1903 } 1892 }
1904 #endif 1893 #endif
1905 1894
1906 1895
1907 // ----------------------------------------------------------------------------- 1896 // -----------------------------------------------------------------------------
1908 // OldSpace implementation 1897 // OldSpace implementation
1909 1898
1910 bool NewSpace::ReserveSpace(int bytes) { 1899 bool NewSpace::ReserveSpace(int bytes) {
1911 // We can't reliably unpack a partial snapshot that needs more new space 1900 // We can't reliably unpack a partial snapshot that needs more new space
1912 // space than the minimum NewSpace size. 1901 // space than the minimum NewSpace size. The limit can be set lower than
1902 // the end of new space either because there is more space on the next page
1903 // or because we have lowered the limit in order to get periodic incremental
1904 // marking. The most reliable way to ensure that there is linear space is
1905 // to do the allocation, then rewind the limit.
1913 ASSERT(bytes <= InitialCapacity()); 1906 ASSERT(bytes <= InitialCapacity());
1914 Address limit = allocation_info_.limit; 1907 MaybeObject* maybe = AllocateRawInternal(bytes);
1908 Object* object = NULL;
1909 if (!maybe->ToObject(&object)) return false;
1910 HeapObject* allocation = HeapObject::cast(object);
1915 Address top = allocation_info_.top; 1911 Address top = allocation_info_.top;
1916 return limit - top >= bytes; 1912 if ((top - bytes) == allocation->address()) {
1913 allocation_info_.top = allocation->address();
1914 return true;
1915 }
1916 // There may be a borderline case here where the allocation succeeded, but
1917 // the limit and top have moved on to a new page. In that case we try again.
1918 return ReserveSpace(bytes);
1917 } 1919 }
1918 1920
1919 1921
1920 void PagedSpace::PrepareForMarkCompact() { 1922 void PagedSpace::PrepareForMarkCompact() {
1921 // We don't have a linear allocation area while sweeping. It will be restored 1923 // We don't have a linear allocation area while sweeping. It will be restored
1922 // on the first allocation after the sweep. 1924 // on the first allocation after the sweep.
1923 // Mark the old linear allocation area with a free space map so it can be 1925 // Mark the old linear allocation area with a free space map so it can be
1924 // skipped when scanning the heap. 1926 // skipped when scanning the heap.
1925 int old_linear_size = static_cast<int>(limit() - top()); 1927 int old_linear_size = static_cast<int>(limit() - top());
1926 Free(top(), old_linear_size); 1928 Free(top(), old_linear_size);
(...skipping 344 matching lines...) Expand 10 before | Expand all | Expand 10 after
2271 2273
2272 HeapObject* object = current_->GetObject(); 2274 HeapObject* object = current_->GetObject();
2273 current_ = current_->next_page(); 2275 current_ = current_->next_page();
2274 return object; 2276 return object;
2275 } 2277 }
2276 2278
2277 2279
2278 // ----------------------------------------------------------------------------- 2280 // -----------------------------------------------------------------------------
2279 // LargeObjectSpace 2281 // LargeObjectSpace
2280 2282
2281 LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id) 2283 LargeObjectSpace::LargeObjectSpace(Heap* heap,
2284 intptr_t max_capacity,
2285 AllocationSpace id)
2282 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis 2286 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
2287 max_capacity_(max_capacity),
2283 first_page_(NULL), 2288 first_page_(NULL),
2284 size_(0), 2289 size_(0),
2285 page_count_(0), 2290 page_count_(0),
2286 objects_size_(0) {} 2291 objects_size_(0) {}
2287 2292
2288 2293
2289 bool LargeObjectSpace::Setup() { 2294 bool LargeObjectSpace::Setup() {
2290 first_page_ = NULL; 2295 first_page_ = NULL;
2291 size_ = 0; 2296 size_ = 0;
2292 page_count_ = 0; 2297 page_count_ = 0;
(...skipping 19 matching lines...) Expand all
2312 2317
2313 MaybeObject* LargeObjectSpace::AllocateRaw(int object_size, 2318 MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
2314 Executability executable) { 2319 Executability executable) {
2315 // Check if we want to force a GC before growing the old space further. 2320 // Check if we want to force a GC before growing the old space further.
2316 // If so, fail the allocation. 2321 // If so, fail the allocation.
2317 if (!heap()->always_allocate() && 2322 if (!heap()->always_allocate() &&
2318 heap()->OldGenerationAllocationLimitReached()) { 2323 heap()->OldGenerationAllocationLimitReached()) {
2319 return Failure::RetryAfterGC(identity()); 2324 return Failure::RetryAfterGC(identity());
2320 } 2325 }
2321 2326
2327 if (Size() + object_size > max_capacity_) {
2328 return Failure::RetryAfterGC(identity());
2329 }
2330
2322 LargePage* page = heap()->isolate()->memory_allocator()-> 2331 LargePage* page = heap()->isolate()->memory_allocator()->
2323 AllocateLargePage(object_size, executable, this); 2332 AllocateLargePage(object_size, executable, this);
2324 if (page == NULL) return Failure::RetryAfterGC(identity()); 2333 if (page == NULL) return Failure::RetryAfterGC(identity());
2325 ASSERT(page->body_size() >= object_size); 2334 ASSERT(page->body_size() >= object_size);
2326 2335
2327 size_ += static_cast<int>(page->size()); 2336 size_ += static_cast<int>(page->size());
2328 objects_size_ += object_size; 2337 objects_size_ += object_size;
2329 page_count_++; 2338 page_count_++;
2330 page->set_next_page(first_page_); 2339 page->set_next_page(first_page_);
2331 first_page_ = page; 2340 first_page_ = page;
(...skipping 191 matching lines...) Expand 10 before | Expand all | Expand 10 after
2523 object->ShortPrint(); 2532 object->ShortPrint();
2524 PrintF("\n"); 2533 PrintF("\n");
2525 } 2534 }
2526 printf(" --------------------------------------\n"); 2535 printf(" --------------------------------------\n");
2527 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 2536 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
2528 } 2537 }
2529 2538
2530 #endif // DEBUG 2539 #endif // DEBUG
2531 2540
2532 } } // namespace v8::internal 2541 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/spaces.h ('k') | src/spaces-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698