Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(223)

Side by Side Diff: src/store-buffer.cc

Issue 16690006: Clean up StoreBuffer::EnsureSpace. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 7 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/store-buffer.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
135 if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) { 135 if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) {
136 *write++ = current; 136 *write++ = current;
137 } 137 }
138 } 138 }
139 previous = current; 139 previous = current;
140 } 140 }
141 old_top_ = write; 141 old_top_ = write;
142 } 142 }
143 143
144 144
145 bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
146 return old_limit_ - old_top_ >= space_needed;
147 }
148
149
145 void StoreBuffer::EnsureSpace(intptr_t space_needed) { 150 void StoreBuffer::EnsureSpace(intptr_t space_needed) {
146 while (old_limit_ - old_top_ < space_needed && 151 while (old_limit_ - old_top_ < space_needed &&
147 old_limit_ < old_reserved_limit_) { 152 old_limit_ < old_reserved_limit_) {
148 size_t grow = old_limit_ - old_start_; // Double size. 153 size_t grow = old_limit_ - old_start_; // Double size.
149 CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_), 154 CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
150 grow * kPointerSize, 155 grow * kPointerSize,
151 false)); 156 false));
152 old_limit_ += grow; 157 old_limit_ += grow;
153 } 158 }
154 159
155 if (old_limit_ - old_top_ >= space_needed) return; 160 if (SpaceAvailable(space_needed)) return;
156 161
157 if (old_buffer_is_filtered_) return; 162 if (old_buffer_is_filtered_) return;
158 ASSERT(may_move_store_buffer_entries_); 163 ASSERT(may_move_store_buffer_entries_);
159 Compact(); 164 Compact();
160 165
161 old_buffer_is_filtered_ = true; 166 old_buffer_is_filtered_ = true;
162 bool page_has_scan_on_scavenge_flag = false; 167 bool page_has_scan_on_scavenge_flag = false;
163 168
164 PointerChunkIterator it(heap_); 169 PointerChunkIterator it(heap_);
165 MemoryChunk* chunk; 170 MemoryChunk* chunk;
166 while ((chunk = it.next()) != NULL) { 171 while ((chunk = it.next()) != NULL) {
167 if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true; 172 if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true;
168 } 173 }
169 174
170 if (page_has_scan_on_scavenge_flag) { 175 if (page_has_scan_on_scavenge_flag) {
171 Filter(MemoryChunk::SCAN_ON_SCAVENGE); 176 Filter(MemoryChunk::SCAN_ON_SCAVENGE);
172 } 177 }
173 178
174 // If filtering out the entries from scan_on_scavenge pages got us down to 179 if (SpaceAvailable(space_needed)) return;
175 // less than half full, then we are satisfied with that.
176 if (old_limit_ - old_top_ > old_top_ - old_start_) return;
177 180
178 // Sample 1 entry in 97 and filter out the pages where we estimate that more 181 // Sample 1 entry in 97 and filter out the pages where we estimate that more
179 // than 1 in 8 pointers are to new space. 182 // than 1 in 8 pointers are to new space.
180 static const int kSampleFinenesses = 5; 183 static const int kSampleFinenesses = 5;
181 static const struct Samples { 184 static const struct Samples {
182 int prime_sample_step; 185 int prime_sample_step;
183 int threshold; 186 int threshold;
184 } samples[kSampleFinenesses] = { 187 } samples[kSampleFinenesses] = {
185 { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 }, 188 { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 },
186 { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 }, 189 { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 },
187 { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 }, 190 { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 },
188 { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 }, 191 { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
189 { 1, 0} 192 { 1, 0}
190 }; 193 };
191 for (int i = 0; i < kSampleFinenesses; i++) { 194 for (int i = 0; i < kSampleFinenesses; i++) {
192 ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold); 195 ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
193 // As a last resort we mark all pages as being exempt from the store buffer. 196 // As a last resort we mark all pages as being exempt from the store buffer.
194 ASSERT(i != (kSampleFinenesses - 1) || old_top_ == old_start_); 197 ASSERT(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
195 if (old_limit_ - old_top_ > old_top_ - old_start_) return; 198 if (SpaceAvailable(space_needed)) return;
196 } 199 }
197 UNREACHABLE(); 200 UNREACHABLE();
198 } 201 }
199 202
200 203
201 // Sample the store buffer to see if some pages are taking up a lot of space 204 // Sample the store buffer to see if some pages are taking up a lot of space
202 // in the store buffer. 205 // in the store buffer.
203 void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) { 206 void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
204 PointerChunkIterator it(heap_); 207 PointerChunkIterator it(heap_);
205 MemoryChunk* chunk; 208 MemoryChunk* chunk;
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
287 ClearFilteringHashSets(); 290 ClearFilteringHashSets();
288 291
289 return page_has_scan_on_scavenge_flag; 292 return page_has_scan_on_scavenge_flag;
290 } 293 }
291 294
292 295
293 #ifdef DEBUG 296 #ifdef DEBUG
294 void StoreBuffer::Clean() { 297 void StoreBuffer::Clean() {
295 ClearFilteringHashSets(); 298 ClearFilteringHashSets();
296 Uniq(); // Also removes things that no longer point to new space. 299 Uniq(); // Also removes things that no longer point to new space.
297 CheckForFullBuffer(); 300 EnsureSpace(kStoreBufferSize / 2);
298 } 301 }
299 302
300 303
301 static Address* in_store_buffer_1_element_cache = NULL; 304 static Address* in_store_buffer_1_element_cache = NULL;
302 305
303 306
304 bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) { 307 bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) {
305 if (!FLAG_enable_slow_asserts) return true; 308 if (!FLAG_enable_slow_asserts) return true;
306 if (in_store_buffer_1_element_cache != NULL && 309 if (in_store_buffer_1_element_cache != NULL &&
307 *in_store_buffer_1_element_cache == cell_address) { 310 *in_store_buffer_1_element_cache == cell_address) {
(...skipping 372 matching lines...) Expand 10 before | Expand all | Expand 10 after
680 // cause some duplicates to remain undetected. 683 // cause some duplicates to remain undetected.
681 hash_set_1_[hash1] = int_addr; 684 hash_set_1_[hash1] = int_addr;
682 hash_set_2_[hash2] = 0; 685 hash_set_2_[hash2] = 0;
683 } 686 }
684 old_buffer_is_sorted_ = false; 687 old_buffer_is_sorted_ = false;
685 old_buffer_is_filtered_ = false; 688 old_buffer_is_filtered_ = false;
686 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); 689 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
687 ASSERT(old_top_ <= old_limit_); 690 ASSERT(old_top_ <= old_limit_);
688 } 691 }
689 heap_->isolate()->counters()->store_buffer_compactions()->Increment(); 692 heap_->isolate()->counters()->store_buffer_compactions()->Increment();
690 CheckForFullBuffer();
691 }
692
693
694 void StoreBuffer::CheckForFullBuffer() {
695 EnsureSpace(kStoreBufferSize * 2);
696 } 693 }
697 694
698 } } // namespace v8::internal 695 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/store-buffer.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698