Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(269)

Side by Side Diff: src/store-buffer.cc

Issue 15896037: Fix store buffer ensure space heuristics. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 7 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/store-buffer.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
135 if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) { 135 if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) {
136 *write++ = current; 136 *write++ = current;
137 } 137 }
138 } 138 }
139 previous = current; 139 previous = current;
140 } 140 }
141 old_top_ = write; 141 old_top_ = write;
142 } 142 }
143 143
144 144
145 bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
146 return old_limit_ - old_top_ >= space_needed;
147 }
148
149
145 void StoreBuffer::EnsureSpace(intptr_t space_needed) { 150 void StoreBuffer::EnsureSpace(intptr_t space_needed) {
146 while (old_limit_ - old_top_ < space_needed && 151 while (old_limit_ - old_top_ < space_needed &&
147 old_limit_ < old_reserved_limit_) { 152 old_limit_ < old_reserved_limit_) {
148 size_t grow = old_limit_ - old_start_; // Double size. 153 size_t grow = old_limit_ - old_start_; // Double size.
149 CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_), 154 CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
150 grow * kPointerSize, 155 grow * kPointerSize,
151 false)); 156 false));
152 old_limit_ += grow; 157 old_limit_ += grow;
153 } 158 }
154 159
155 if (old_limit_ - old_top_ >= space_needed) return; 160 if (SpaceAvailable(space_needed)) return;
156 161
157 if (old_buffer_is_filtered_) return; 162 if (old_buffer_is_filtered_) return;
158 ASSERT(may_move_store_buffer_entries_); 163 ASSERT(may_move_store_buffer_entries_);
159 Compact(); 164 Compact();
160 165
161 old_buffer_is_filtered_ = true; 166 old_buffer_is_filtered_ = true;
162 bool page_has_scan_on_scavenge_flag = false; 167 bool page_has_scan_on_scavenge_flag = false;
163 168
164 PointerChunkIterator it(heap_); 169 PointerChunkIterator it(heap_);
165 MemoryChunk* chunk; 170 MemoryChunk* chunk;
166 while ((chunk = it.next()) != NULL) { 171 while ((chunk = it.next()) != NULL) {
167 if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true; 172 if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true;
168 } 173 }
169 174
170 if (page_has_scan_on_scavenge_flag) { 175 if (page_has_scan_on_scavenge_flag) {
171 Filter(MemoryChunk::SCAN_ON_SCAVENGE); 176 Filter(MemoryChunk::SCAN_ON_SCAVENGE);
172 } 177 }
173 178
174 // If filtering out the entries from scan_on_scavenge pages got us down to 179 if (SpaceAvailable(space_needed)) return;
175 // less than half full, then we are satisfied with that.
176 if (old_limit_ - old_top_ > old_top_ - old_start_) return;
177
178 // Sample 1 entry in 97 and filter out the pages where we estimate that more 180 // Sample 1 entry in 97 and filter out the pages where we estimate that more
179 // than 1 in 8 pointers are to new space. 181 // than 1 in 8 pointers are to new space.
180 static const int kSampleFinenesses = 5; 182 static const int kSampleFinenesses = 5;
181 static const struct Samples { 183 static const struct Samples {
182 int prime_sample_step; 184 int prime_sample_step;
183 int threshold; 185 int threshold;
184 } samples[kSampleFinenesses] = { 186 } samples[kSampleFinenesses] = {
185 { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 }, 187 { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 },
186 { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 }, 188 { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 },
187 { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 }, 189 { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 },
188 { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 }, 190 { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
189 { 1, 0} 191 { 1, 0}
190 }; 192 };
191 for (int i = kSampleFinenesses - 1; i >= 0; i--) { 193 for (int i = 0; i < kSampleFinenesses; i++) {
192 ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold); 194 ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
193 // As a last resort we mark all pages as being exempt from the store buffer. 195 // As a last resort we mark all pages as being exempt from the store buffer.
194 ASSERT(i != 0 || old_top_ == old_start_); 196 ASSERT(i != 0 || old_top_ == old_start_);
195 if (old_limit_ - old_top_ > old_top_ - old_start_) return; 197 if (SpaceAvailable(space_needed)) return;
196 } 198 }
197 UNREACHABLE(); 199 UNREACHABLE();
198 } 200 }
199 201
200 202
201 // Sample the store buffer to see if some pages are taking up a lot of space 203 // Sample the store buffer to see if some pages are taking up a lot of space
202 // in the store buffer. 204 // in the store buffer.
203 void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) { 205 void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
204 PointerChunkIterator it(heap_); 206 PointerChunkIterator it(heap_);
205 MemoryChunk* chunk; 207 MemoryChunk* chunk;
(...skipping 483 matching lines...) Expand 10 before | Expand all | Expand 10 after
689 heap_->isolate()->counters()->store_buffer_compactions()->Increment(); 691 heap_->isolate()->counters()->store_buffer_compactions()->Increment();
690 CheckForFullBuffer(); 692 CheckForFullBuffer();
691 } 693 }
692 694
693 695
694 void StoreBuffer::CheckForFullBuffer() { 696 void StoreBuffer::CheckForFullBuffer() {
695 EnsureSpace(kStoreBufferSize * 2); 697 EnsureSpace(kStoreBufferSize * 2);
696 } 698 }
697 699
698 } } // namespace v8::internal 700 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/store-buffer.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698