Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(91)

Side by Side Diff: src/store-buffer.cc

Issue 6309012: * Complete new store buffer on ia32. The store buffer now covers... (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/gc/
Patch Set: '' Created 9 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« src/store-buffer.h ('K') | « src/store-buffer.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 16 matching lines...) Expand all
27 27
28 #include "v8-counters.h" 28 #include "v8-counters.h"
29 #include "store-buffer.h" 29 #include "store-buffer.h"
30 #include "store-buffer-inl.h" 30 #include "store-buffer-inl.h"
31 31
32 namespace v8 { 32 namespace v8 {
33 namespace internal { 33 namespace internal {
34 34
35 Address* StoreBuffer::start_ = NULL; 35 Address* StoreBuffer::start_ = NULL;
36 Address* StoreBuffer::limit_ = NULL; 36 Address* StoreBuffer::limit_ = NULL;
37 Address* StoreBuffer::old_start_ = NULL;
38 Address* StoreBuffer::old_limit_ = NULL;
39 Address* StoreBuffer::old_top_ = NULL;
37 uintptr_t* StoreBuffer::hash_map_1_ = NULL; 40 uintptr_t* StoreBuffer::hash_map_1_ = NULL;
38 uintptr_t* StoreBuffer::hash_map_2_ = NULL; 41 uintptr_t* StoreBuffer::hash_map_2_ = NULL;
39 VirtualMemory* StoreBuffer::virtual_memory_ = NULL; 42 VirtualMemory* StoreBuffer::virtual_memory_ = NULL;
43 bool StoreBuffer::must_scan_entire_memory_ = false;
44 bool StoreBuffer::old_buffer_is_sorted_ = false;
45 bool StoreBuffer::during_gc_ = false;
40 46
41 void StoreBuffer::Setup() { 47 void StoreBuffer::Setup() {
42 virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3); 48 virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3);
43 uintptr_t start_as_int = 49 uintptr_t start_as_int =
44 reinterpret_cast<uintptr_t>(virtual_memory_->address()); 50 reinterpret_cast<uintptr_t>(virtual_memory_->address());
45 start_ = 51 start_ =
46 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2)); 52 reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
47 limit_ = start_ + (kStoreBufferSize / sizeof(*start_)); 53 limit_ = start_ + (kStoreBufferSize / sizeof(*start_));
48 54
55 old_top_ = old_start_ = new Address[kOldStoreBufferLength];
56 old_limit_ = old_start_ + kOldStoreBufferLength;
57
49 ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address()); 58 ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
50 ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address()); 59 ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
51 Address* vm_limit = reinterpret_cast<Address*>( 60 Address* vm_limit = reinterpret_cast<Address*>(
52 reinterpret_cast<char*>(virtual_memory_->address()) + 61 reinterpret_cast<char*>(virtual_memory_->address()) +
53 virtual_memory_->size()); 62 virtual_memory_->size());
54 ASSERT(start_ <= vm_limit); 63 ASSERT(start_ <= vm_limit);
55 ASSERT(limit_ <= vm_limit); 64 ASSERT(limit_ <= vm_limit);
56 USE(vm_limit); 65 USE(vm_limit);
57 ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0); 66 ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0);
58 ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) == 67 ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
59 0); 68 0);
60 69
61 virtual_memory_->Commit(reinterpret_cast<Address>(start_), 70 virtual_memory_->Commit(reinterpret_cast<Address>(start_),
62 kStoreBufferSize, 71 kStoreBufferSize,
63 false); // Not executable. 72 false); // Not executable.
64 Heap::public_set_store_buffer_top(start_); 73 Heap::public_set_store_buffer_top(start_);
65 74
66 hash_map_1_ = new uintptr_t[kHashMapLength]; 75 hash_map_1_ = new uintptr_t[kHashMapLength];
67 hash_map_2_ = new uintptr_t[kHashMapLength]; 76 hash_map_2_ = new uintptr_t[kHashMapLength];
77
78 Heap::AddGCPrologueCallback(&GCPrologue, kGCTypeAll);
79 Heap::AddGCEpilogueCallback(&GCEpilogue, kGCTypeAll);
80
81 GCPrologue(kGCTypeMarkSweepCompact, kNoGCCallbackFlags);
68 } 82 }
69 83
70 84
71 void StoreBuffer::TearDown() { 85 void StoreBuffer::TearDown() {
72 delete virtual_memory_; 86 delete virtual_memory_;
73 delete[] hash_map_1_; 87 delete[] hash_map_1_;
74 delete[] hash_map_2_; 88 delete[] hash_map_2_;
89 delete[] old_start_;
90 old_start_ = old_top_ = old_limit_ = NULL;
75 start_ = limit_ = NULL; 91 start_ = limit_ = NULL;
76 Heap::public_set_store_buffer_top(start_); 92 Heap::public_set_store_buffer_top(start_);
77 } 93 }
78 94
79 95
80 void StoreBuffer::Compact() { 96
97 #if V8_TARGET_ARCH_X64
98 static int CompareAddresses(const void* void_a, const void* void_b) {
99 intptr_t a = static_cast<intptr_t>(*reinterpret_cast<Address*>(a));
100 intptr_t b = static_cast<intptr_t>(*reinterpret_cast<Address*>(b));
101 // Unfortunately if int is smaller than intptr_t there is no branch-free
102 // way to return a number with the same sign as the difference between the
103 // pointers.
104 if (a == b) return 0;
105 if (a < b) return -1;
106 if (a > b) return 1;
107 }
108 #else
109 static int CompareAddresses(const void* void_a, const void* void_b) {
110 intptr_t a =
111 reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_a));
112 intptr_t b =
113 reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b));
114 ASSERT(sizeof(1) == sizeof(a));
Vyacheslav Egorov (Chromium) 2011/01/21 18:18:18 this look really strange. should not you check for
Erik Corry 2011/01/24 13:56:00 The linter objects to sizeof on types.
115 // Shift down to avoid wraparound.
116 return (a >> kPointerSizeLog2) - (b >> kPointerSizeLog2);
117 }
118 #endif
119
120
121 void StoreBuffer::Uniq() {
122 ASSERT(HashTablesAreZapped());
123 // Remove duplicates and cells that do not point at new space.
Vyacheslav Egorov (Chromium) 2011/01/21 18:18:18 This comment is a bit confusing. It removes all d
Erik Corry 2011/01/24 13:56:00 Comment fixed.
124 Address previous = NULL;
125 Address* write = old_start_;
126 for (Address* read = old_start_; read < old_top_; read++) {
127 Address current = *read;
128 if (current != previous) {
129 if (Heap::new_space()->Contains(*reinterpret_cast<Address*>(current))) {
Vyacheslav Egorov (Chromium) 2011/01/21 18:18:18 There is (now) Heap::InNewSpace(Address addr)
Erik Corry 2011/01/24 13:56:00 Done.
130 *write++ = current;
131 }
132 }
133 previous = current;
134 }
135 old_top_ = write;
136 }
137
138
139 void StoreBuffer::SortUniq() {
140 Compact();
141 if (old_buffer_is_sorted_) return;
142 ZapHashTables();
Vyacheslav Egorov (Chromium) 2011/01/21 18:18:18 Instead of zaping hashtables completely we can upd
Erik Corry 2011/01/24 13:56:00 I think it isn't worth it.
143 qsort(reinterpret_cast<void*>(old_start_),
144 old_top_ - old_start_,
145 sizeof(*old_top_),
146 &CompareAddresses);
147 Uniq();
148
149 old_buffer_is_sorted_ = true;
150 }
151
152
153 #ifdef DEBUG
154 void StoreBuffer::Clean() {
155 if (must_scan_entire_memory_) {
156 // We don't currently have a way to recover from this condition.
Vyacheslav Egorov (Chromium) 2011/01/21 18:18:18 If we do not have a way to recover from this condi
Erik Corry 2011/01/24 13:56:00 I will update the comment. We can recover in term
157 // We should rebuild the store buffer during GC.
158 old_top_ = old_start_; // Just clear the cache.
159 return;
160 }
161 ASSERT(old_buffer_is_sorted_);
162 ZapHashTables();
163 Uniq(); // Also removes things that no longer point to new space.
164 CheckForFullBuffer();
165 }
166
167
168 static bool Zapped(char* start, int size) {
169 for (int i = 0; i < size; i++) {
170 if (start[i] != 0) return false;
171 }
172 return true;
173 }
174
175
176 bool StoreBuffer::HashTablesAreZapped() {
177 return Zapped(reinterpret_cast<char*>(hash_map_1_),
178 sizeof(uintptr_t) * kHashMapLength) &&
179 Zapped(reinterpret_cast<char*>(hash_map_2_),
180 sizeof(uintptr_t) * kHashMapLength);
181 }
182
183
184 #endif
185
186
187 void StoreBuffer::ZapHashTables() {
81 memset(reinterpret_cast<void*>(hash_map_1_), 188 memset(reinterpret_cast<void*>(hash_map_1_),
82 0, 189 0,
83 sizeof(uintptr_t) * kHashMapLength); 190 sizeof(uintptr_t) * kHashMapLength);
84 memset(reinterpret_cast<void*>(hash_map_2_), 191 memset(reinterpret_cast<void*>(hash_map_2_),
85 0, 192 0,
86 sizeof(uintptr_t) * kHashMapLength); 193 sizeof(uintptr_t) * kHashMapLength);
194 }
195
196
197 void StoreBuffer::GCPrologue(GCType type, GCCallbackFlags flags) {
198 ZapHashTables();
199 during_gc_ = true;
200 if (type != kGCTypeScavenge) {
201 old_top_ = old_start_;
202 Heap::public_set_store_buffer_top(start_);
203 }
204 }
205
206
207 void StoreBuffer::Verify() {
208 #ifdef DEBUG
209 if (FLAG_verify_heap && !StoreBuffer::must_scan_entire_memory()) {
210 Heap::OldPointerSpaceCheckStoreBuffer(Heap::WATERMARK_SHOULD_BE_VALID);
Vyacheslav Egorov (Chromium) 2011/01/21 18:18:18 I don't see a reason why this methods should be in
Erik Corry 2011/01/24 13:56:00 They use the constants like Heap::WATERMARK_SHOULD
211 Heap::MapSpaceCheckStoreBuffer(Heap::WATERMARK_SHOULD_BE_VALID);
212 Heap::LargeObjectSpaceCheckStoreBuffer();
213 }
214 #endif
215 }
216
217
218 void StoreBuffer::GCEpilogue(GCType type, GCCallbackFlags flags) {
219 during_gc_ = false;
220 Verify();
221 }
222
223
224 void StoreBuffer::Compact() {
87 Address* top = reinterpret_cast<Address*>(Heap::store_buffer_top()); 225 Address* top = reinterpret_cast<Address*>(Heap::store_buffer_top());
88 Address* stop = top; 226 if (top == start_) return;
89 ASSERT(top <= limit_); 227 ASSERT(top <= limit_);
90 top = start_; 228 Heap::public_set_store_buffer_top(start_);
229 if (must_scan_entire_memory_) return;
91 // Goes through the addresses in the store buffer attempting to remove 230 // Goes through the addresses in the store buffer attempting to remove
92 // duplicates. In the interest of speed this is a lossy operation. Some 231 // duplicates. In the interest of speed this is a lossy operation. Some
93 // duplicates will remain. We have two hash tables with different hash 232 // duplicates will remain. We have two hash tables with different hash
94 // functions to reduce the number of unnecessary clashes. 233 // functions to reduce the number of unnecessary clashes.
95 for (Address* current = start_; current < stop; current++) { 234 for (Address* current = start_; current < top; current++) {
96 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current); 235 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
97 // Shift out the last bits including any tags. 236 // Shift out the last bits including any tags.
98 int_addr >>= kPointerSizeLog2; 237 int_addr >>= kPointerSizeLog2;
99 int hash1 = 238 int hash1 =
100 ((int_addr ^ (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1)); 239 ((int_addr ^ (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1));
101 if (hash_map_1_[hash1] == int_addr) continue; 240 if (hash_map_1_[hash1] == int_addr) continue;
102 int hash2 = 241 int hash2 =
103 ((int_addr - (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1)); 242 ((int_addr - (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1));
104 hash2 ^= hash2 >> (kHashMapLengthLog2 * 2); 243 hash2 ^= hash2 >> (kHashMapLengthLog2 * 2);
105 if (hash_map_2_[hash2] == int_addr) continue; 244 if (hash_map_2_[hash2] == int_addr) continue;
106 if (hash_map_1_[hash1] == 0) { 245 if (hash_map_1_[hash1] == 0) {
107 hash_map_1_[hash1] = int_addr; 246 hash_map_1_[hash1] = int_addr;
108 } else if (hash_map_2_[hash2] == 0) { 247 } else if (hash_map_2_[hash2] == 0) {
109 hash_map_2_[hash2] = int_addr; 248 hash_map_2_[hash2] = int_addr;
110 } else { 249 } else {
111 // Rather than slowing down we just throw away some entries. This will 250 // Rather than slowing down we just throw away some entries. This will
112 // cause some duplicates to remain undetected. 251 // cause some duplicates to remain undetected.
113 hash_map_1_[hash1] = int_addr; 252 hash_map_1_[hash1] = int_addr;
114 hash_map_2_[hash2] = 0; 253 hash_map_2_[hash2] = 0;
115 } 254 }
116 ASSERT(top <= current); 255 old_buffer_is_sorted_ = false;
117 ASSERT(top <= limit_); 256 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
118 *top++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); 257 ASSERT(old_top_ <= old_limit_);
119 } 258 }
120 Counters::store_buffer_compactions.Increment(); 259 Counters::store_buffer_compactions.Increment();
121 if (limit_ - top < top - start_) { 260 CheckForFullBuffer();
122 // Compression did not free up at least half. 261 }
262
263
264 void StoreBuffer::CheckForFullBuffer() {
265 if (old_limit_ - old_top_ < kStoreBufferSize * 2) {
266 // After compression we don't have enough space that we can be sure that
267 // the next two compressions will have enough space in the buffer. We
268 // start by trying a more agressive compression. If this frees up at least
269 // half the space then we can keep going, otherwise it is time to brake.
270 SortUniq();
271 if (old_limit_ - old_top_ < old_limit_ - old_top_) {
272 return;
273 }
123 // TODO(gc): Set an interrupt to do a GC on the next back edge. 274 // TODO(gc): Set an interrupt to do a GC on the next back edge.
124 // TODO(gc): Allocate the rest of new space to force a GC on the next 275 // TODO(gc): Allocate the rest of new space to force a GC on the next
125 // allocation. 276 // allocation.
126 if (limit_ - top < (top - start_) >> 1) { 277 if (old_limit_ - old_top_ < kStoreBufferSize) {
127 // Compression did not free up at least one quarter. 278 // After compression we don't even have enough space for the next
279 // compression to be guaranteed to succeed.
128 // TODO(gc): Set a flag to scan all of memory. 280 // TODO(gc): Set a flag to scan all of memory.
129 top = start_;
130 Counters::store_buffer_overflows.Increment(); 281 Counters::store_buffer_overflows.Increment();
282 printf("store buffer overfull\n");
Vyacheslav Egorov (Chromium) 2011/01/21 18:18:18 kill printf
Erik Corry 2011/01/24 13:56:00 Done.
283 must_scan_entire_memory_ = true;
131 } 284 }
132 } 285 }
133 Heap::public_set_store_buffer_top(top);
134 } 286 }
135 287
136 } } // namespace v8::internal 288 } } // namespace v8::internal
OLDNEW
« src/store-buffer.h ('K') | « src/store-buffer.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698