Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2)

Side by Side Diff: src/spaces-inl.h

Issue 6639024: Get rid of distinction between below- and above-watermark in page allocation.... (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/gc/
Patch Set: '' Created 9 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/spaces.cc ('k') | src/store-buffer.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2010 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution. 11 // with the distribution.
(...skipping 19 matching lines...) Expand all
31 #include "memory.h" 31 #include "memory.h"
32 #include "spaces.h" 32 #include "spaces.h"
33 33
34 namespace v8 { 34 namespace v8 {
35 namespace internal { 35 namespace internal {
36 36
37 37
38 // ----------------------------------------------------------------------------- 38 // -----------------------------------------------------------------------------
39 // PageIterator 39 // PageIterator
40 40
41
42 PageIterator::PageIterator(PagedSpace* space)
43 : space_(space),
44 prev_page_(&space->anchor_),
45 next_page_(prev_page_->next_page()) { }
46
47
41 bool PageIterator::has_next() { 48 bool PageIterator::has_next() {
42 return prev_page_ != stop_page_; 49 return next_page_ != &space_->anchor_;
43 } 50 }
44 51
45 52
46 Page* PageIterator::next() { 53 Page* PageIterator::next() {
47 ASSERT(has_next()); 54 ASSERT(has_next());
48 prev_page_ = (prev_page_ == NULL) 55 prev_page_ = next_page_;
49 ? space_->first_page_ 56 next_page_ = next_page_->next_page();
50 : prev_page_->next_page();
51 return prev_page_; 57 return prev_page_;
52 } 58 }
53 59
54 60
55 // ----------------------------------------------------------------------------- 61 // -----------------------------------------------------------------------------
56 // Page 62 // HeapObjectIterator
57 63 HeapObject* HeapObjectIterator::FromCurrentPage() {
58 64 while (cur_addr_ != cur_end_) {
59 Address Page::AllocationTop() { 65 if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
60 return static_cast<PagedSpace*>(owner())->PageAllocationTop(this); 66 cur_addr_ = space_->limit();
61 } 67 continue;
62 68 }
63 69 HeapObject* obj = HeapObject::FromAddress(cur_addr_);
64 Address Page::AllocationWatermark() { 70 int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
65 if (this == static_cast<PagedSpace*>(owner())->AllocationTopPage()) { 71 cur_addr_ += obj_size;
66 return static_cast<PagedSpace*>(owner())->top(); 72 ASSERT(cur_addr_ <= cur_end_);
73 if (!obj->IsFiller()) {
74 ASSERT_OBJECT_SIZE(obj_size);
75 return obj;
76 }
67 } 77 }
68 return address() + AllocationWatermarkOffset(); 78 return NULL;
69 }
70
71
72 uint32_t Page::AllocationWatermarkOffset() {
73 return static_cast<uint32_t>((flags_ & kAllocationWatermarkOffsetMask) >>
74 kAllocationWatermarkOffsetShift);
75 }
76
77
78 void Page::SetAllocationWatermark(Address allocation_watermark) {
79 if ((Heap::gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) {
80 // When iterating intergenerational references during scavenge
81 // we might decide to promote an encountered young object.
82 // We will allocate a space for such an object and put it
83 // into the promotion queue to process it later.
84 // If space for object was allocated somewhere beyond allocation
85 // watermark this might cause garbage pointers to appear under allocation
86 // watermark. To avoid visiting them during pointer-to-newspace iteration
87 // which might be still in progress we store a valid allocation watermark
88 // value and mark this page as having an invalid watermark.
89 SetCachedAllocationWatermark(AllocationWatermark());
90 InvalidateWatermark(true);
91 }
92
93 flags_ = (flags_ & kFlagsMask) |
94 Offset(allocation_watermark) << kAllocationWatermarkOffsetShift;
95 ASSERT(AllocationWatermarkOffset()
96 == static_cast<uint32_t>(Offset(allocation_watermark)));
97 }
98
99
100 void Page::SetCachedAllocationWatermark(Address allocation_watermark) {
101 allocation_watermark_ = allocation_watermark;
102 }
103
104
105 Address Page::CachedAllocationWatermark() {
106 return allocation_watermark_;
107 }
108
109
110 void Page::FlipMeaningOfInvalidatedWatermarkFlag() {
111 watermark_invalidated_mark_ ^= 1 << WATERMARK_INVALIDATED;
112 }
113
114
115 bool Page::IsWatermarkValid() {
116 return (flags_ & (1 << WATERMARK_INVALIDATED)) != watermark_invalidated_mark_;
117 }
118
119
120 void Page::InvalidateWatermark(bool value) {
121 if (value) {
122 flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
123 watermark_invalidated_mark_;
124 } else {
125 flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
126 (watermark_invalidated_mark_ ^ (1 << WATERMARK_INVALIDATED));
127 }
128
129 ASSERT(IsWatermarkValid() == !value);
130 }
131
132
133 void Page::ClearGCFields() {
134 InvalidateWatermark(true);
135 SetAllocationWatermark(ObjectAreaStart());
136 if (Heap::gc_state() == Heap::SCAVENGE) {
137 SetCachedAllocationWatermark(ObjectAreaStart());
138 }
139 } 79 }
140 80
141 81
142 // ----------------------------------------------------------------------------- 82 // -----------------------------------------------------------------------------
143 // MemoryAllocator 83 // MemoryAllocator
144 84
145 #ifdef ENABLE_HEAP_PROTECTION 85 #ifdef ENABLE_HEAP_PROTECTION
146 86
147 void MemoryAllocator::Protect(Address start, size_t size) { 87 void MemoryAllocator::Protect(Address start, size_t size) {
148 OS::Protect(start, size); 88 OS::Protect(start, size);
(...skipping 25 matching lines...) Expand all
174 // -------------------------------------------------------------------------- 114 // --------------------------------------------------------------------------
175 // PagedSpace 115 // PagedSpace
176 116
177 bool PagedSpace::Contains(Address addr) { 117 bool PagedSpace::Contains(Address addr) {
178 Page* p = Page::FromAddress(addr); 118 Page* p = Page::FromAddress(addr);
179 if (!p->is_valid()) return false; 119 if (!p->is_valid()) return false;
180 return p->owner() == this; 120 return p->owner() == this;
181 } 121 }
182 122
183 123
124 Page* Page::Initialize(MemoryChunk* chunk,
125 Executability executable,
126 PagedSpace* owner) {
127 Page* page = reinterpret_cast<Page*>(chunk);
128 MemoryChunk::Initialize(reinterpret_cast<Address>(chunk),
129 kPageSize,
130 executable,
131 owner);
132 owner->IncreaseCapacity(Page::kObjectAreaSize);
133 owner->Free(page->ObjectAreaStart(),
134 page->ObjectAreaEnd() - page->ObjectAreaStart());
135 return page;
136 }
137
138
139 Page* Page::next_page() {
140 ASSERT(next_chunk()->owner() == owner());
141 return static_cast<Page*>(next_chunk());
142 }
143
144
145 Page* Page::prev_page() {
146 ASSERT(prev_chunk()->owner() == owner());
147 return static_cast<Page*>(prev_chunk());
148 }
149
150
151 void Page::set_next_page(Page* page) {
152 ASSERT(page->owner() == owner());
153 set_next_chunk(page);
154 }
155
156
157 void Page::set_prev_page(Page* page) {
158 ASSERT(page->owner() == owner());
159 set_prev_chunk(page);
160 }
161
162
184 // Try linear allocation in the page of alloc_info's allocation top. Does 163 // Try linear allocation in the page of alloc_info's allocation top. Does
185 // not contain slow case logic (eg, move to the next page or try free list 164 // not contain slow case logic (eg, move to the next page or try free list
186 // allocation) so it can be used by all the allocation functions and for all 165 // allocation) so it can be used by all the allocation functions and for all
187 // the paged spaces. 166 // the paged spaces.
188 HeapObject* PagedSpace::AllocateLinearly(AllocationInfo* alloc_info, 167 HeapObject* PagedSpace::AllocateLinearly(AllocationInfo* alloc_info,
189 int size_in_bytes) { 168 int size_in_bytes) {
190 Address current_top = alloc_info->top; 169 Address current_top = alloc_info->top;
191 Address new_top = current_top + size_in_bytes; 170 Address new_top = current_top + size_in_bytes;
192 if (new_top > alloc_info->limit) return NULL; 171 if (new_top > alloc_info->limit) return NULL;
193 172
194 alloc_info->top = new_top; 173 alloc_info->top = new_top;
195 ASSERT(alloc_info->VerifyPagedAllocation()); 174 ASSERT(alloc_info->VerifyPagedAllocation());
196 accounting_stats_.AllocateBytes(size_in_bytes); 175 ASSERT(current_top != NULL);
197 return HeapObject::FromAddress(current_top); 176 return HeapObject::FromAddress(current_top);
198 } 177 }
199 178
200 179
201 // Raw allocation. 180 // Raw allocation.
202 MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) { 181 MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
203 ASSERT(HasBeenSetup()); 182 ASSERT(HasBeenSetup());
204 ASSERT_OBJECT_SIZE(size_in_bytes); 183 ASSERT_OBJECT_SIZE(size_in_bytes);
184 MaybeObject* object = AllocateLinearly(&allocation_info_, size_in_bytes);
185 if (object != NULL) {
186 return object;
187 }
205 188
206 HeapObject* object = AllocateLinearly(&allocation_info_, size_in_bytes); 189 object = free_list_.Allocate(size_in_bytes);
207 if (object != NULL) { 190 if (object != NULL) {
208 IncrementalMarking::Step(size_in_bytes);
Vyacheslav Egorov (Chromium) 2011/03/17 16:11:00 It seems that we are doing step less frequently. W
Erik Corry 2011/03/17 17:24:26 The idea is that we set the linear allocation limi
209 return object; 191 return object;
210 } 192 }
211 193
212 object = SlowAllocateRaw(size_in_bytes); 194 object = SlowAllocateRaw(size_in_bytes);
213 if (object != NULL) { 195 if (object != NULL) {
214 IncrementalMarking::Step(size_in_bytes);
215 return object; 196 return object;
216 } 197 }
217 198
218 return Failure::RetryAfterGC(identity()); 199 return Failure::RetryAfterGC(identity());
219 } 200 }
220 201
221 202
222 // ----------------------------------------------------------------------------- 203 // -----------------------------------------------------------------------------
223 // NewSpace 204 // NewSpace
224 205
(...skipping 24 matching lines...) Expand all
249 ASSERT(string->IsSeqString()); 230 ASSERT(string->IsSeqString());
250 ASSERT(string->address() + StringType::SizeFor(string->length()) == 231 ASSERT(string->address() + StringType::SizeFor(string->length()) ==
251 allocation_info_.top); 232 allocation_info_.top);
252 allocation_info_.top = 233 allocation_info_.top =
253 string->address() + StringType::SizeFor(length); 234 string->address() + StringType::SizeFor(length);
254 string->set_length(length); 235 string->set_length(length);
255 } 236 }
256 237
257 238
258 bool FreeListNode::IsFreeListNode(HeapObject* object) { 239 bool FreeListNode::IsFreeListNode(HeapObject* object) {
259 return object->map() == Heap::raw_unchecked_byte_array_map() 240 return object->map() == Heap::raw_unchecked_free_space_map()
260 || object->map() == Heap::raw_unchecked_one_pointer_filler_map() 241 || object->map() == Heap::raw_unchecked_one_pointer_filler_map()
261 || object->map() == Heap::raw_unchecked_two_pointer_filler_map(); 242 || object->map() == Heap::raw_unchecked_two_pointer_filler_map();
262 } 243 }
263 244
264 } } // namespace v8::internal 245 } } // namespace v8::internal
265 246
266 #endif // V8_SPACES_INL_H_ 247 #endif // V8_SPACES_INL_H_
OLDNEW
« no previous file with comments | « src/spaces.cc ('k') | src/store-buffer.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698