Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 128 incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit); | 128 incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit); |
| 129 } | 129 } |
| 130 } | 130 } |
| 131 } | 131 } |
| 132 | 132 |
| 133 Heap* heap_; | 133 Heap* heap_; |
| 134 IncrementalMarking* incremental_marking_; | 134 IncrementalMarking* incremental_marking_; |
| 135 }; | 135 }; |
| 136 | 136 |
| 137 | 137 |
| 138 void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk, | |
| 139 bool is_marking) { | |
| 140 if (is_marking) { | |
| 141 chunk->SetFlag(MemoryChunk::CONTAINS_INTERESTING_VALUES); | |
| 142 chunk->SetFlag(MemoryChunk::CONTAINS_INTERESTING_DESTINATIONS); | |
| 143 } else if (chunk->owner()->identity() == CELL_SPACE || | |
| 144 chunk->scan_on_scavenge()) { | |
| 145 chunk->ClearFlag(MemoryChunk::CONTAINS_INTERESTING_VALUES); | |
| 146 chunk->ClearFlag(MemoryChunk::CONTAINS_INTERESTING_DESTINATIONS); | |
| 147 } else { | |
| 148 chunk->ClearFlag(MemoryChunk::CONTAINS_INTERESTING_VALUES); | |
| 149 chunk->SetFlag(MemoryChunk::CONTAINS_INTERESTING_DESTINATIONS); | |
| 150 } | |
| 151 } | |
| 152 | |
| 153 | |
| 154 void IncrementalMarking::SetNewSpacePageFlags(MemoryChunk* chunk, | |
| 155 bool is_marking) { | |
| 156 chunk->SetFlag(MemoryChunk::CONTAINS_INTERESTING_VALUES); | |
| 157 if (is_marking) { | |
| 158 chunk->SetFlag(MemoryChunk::CONTAINS_INTERESTING_DESTINATIONS); | |
| 159 } else { | |
| 160 chunk->ClearFlag(MemoryChunk::CONTAINS_INTERESTING_DESTINATIONS); | |
| 161 } | |
| 162 } | |
| 163 | |
| 164 | |
| 165 void IncrementalMarking::RevertWriteBarrierFlags(PagedSpace* space) { | |
|
Erik Corry
2011/05/11 18:53:57
Revert doesn't convey a lot of meaning to me. Rev
Vyacheslav Egorov (Chromium)
2011/05/13 11:06:52
Done.
| |
| 166 PageIterator it(space); | |
| 167 while (it.has_next()) { | |
| 168 Page* p = it.next(); | |
| 169 SetOldSpacePageFlags(p, false); | |
| 170 } | |
| 171 } | |
| 172 | |
| 173 | |
| 174 void IncrementalMarking::RevertWriteBarrierFlags() { | |
| 175 RevertWriteBarrierFlags(heap_->old_pointer_space()); | |
| 176 RevertWriteBarrierFlags(heap_->old_data_space()); | |
| 177 RevertWriteBarrierFlags(heap_->cell_space()); | |
| 178 RevertWriteBarrierFlags(heap_->map_space()); | |
| 179 RevertWriteBarrierFlags(heap_->code_space()); | |
| 180 | |
| 181 SetNewSpacePageFlags(heap_->new_space()->ActivePage(), false); | |
| 182 | |
| 183 LargePage* lop = heap_->lo_space()->first_page(); | |
| 184 while (lop->is_valid()) { | |
| 185 SetOldSpacePageFlags(lop, false); | |
| 186 lop = lop->next_page(); | |
| 187 } | |
| 188 } | |
| 189 | |
| 190 | |
| 138 void IncrementalMarking::ClearMarkbits(PagedSpace* space) { | 191 void IncrementalMarking::ClearMarkbits(PagedSpace* space) { |
| 139 PageIterator it(space); | 192 PageIterator it(space); |
| 140 | |
| 141 while (it.has_next()) { | 193 while (it.has_next()) { |
| 142 Page* p = it.next(); | 194 Page* p = it.next(); |
| 143 p->markbits()->Clear(); | 195 p->markbits()->Clear(); |
| 196 SetOldSpacePageFlags(p, true); | |
| 144 } | 197 } |
| 145 } | 198 } |
| 146 | 199 |
| 147 | 200 |
| 148 void IncrementalMarking::ClearMarkbits() { | 201 void IncrementalMarking::ClearMarkbits() { |
| 149 // TODO(gc): Clear the mark bits in the sweeper. | 202 // TODO(gc): Clear the mark bits in the sweeper. |
| 150 ClearMarkbits(heap_->old_pointer_space()); | 203 ClearMarkbits(heap_->old_pointer_space()); |
| 151 ClearMarkbits(heap_->old_data_space()); | 204 ClearMarkbits(heap_->old_data_space()); |
| 152 ClearMarkbits(heap_->cell_space()); | 205 ClearMarkbits(heap_->cell_space()); |
| 153 ClearMarkbits(heap_->map_space()); | 206 ClearMarkbits(heap_->map_space()); |
| 154 ClearMarkbits(heap_->code_space()); | 207 ClearMarkbits(heap_->code_space()); |
| 208 | |
| 209 SetNewSpacePageFlags(heap_->new_space()->ActivePage(), true); | |
| 210 | |
| 211 LargePage* lop = heap_->lo_space()->first_page(); | |
| 212 while (lop->is_valid()) { | |
| 213 SetOldSpacePageFlags(lop, true); | |
| 214 lop = lop->next_page(); | |
| 215 } | |
| 155 } | 216 } |
| 156 | 217 |
| 157 | 218 |
| 158 #ifdef DEBUG | 219 #ifdef DEBUG |
| 159 void IncrementalMarking::VerifyMarkbitsAreClean(PagedSpace* space) { | 220 void IncrementalMarking::VerifyMarkbitsAreClean(PagedSpace* space) { |
| 160 PageIterator it(space); | 221 PageIterator it(space); |
| 161 | 222 |
| 162 while (it.has_next()) { | 223 while (it.has_next()) { |
| 163 Page* p = it.next(); | 224 Page* p = it.next(); |
| 164 ASSERT(p->markbits()->IsClean()); | 225 ASSERT(p->markbits()->IsClean()); |
| (...skipping 196 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 361 | 422 |
| 362 | 423 |
| 363 void IncrementalMarking::Abort() { | 424 void IncrementalMarking::Abort() { |
| 364 if (IsStopped()) return; | 425 if (IsStopped()) return; |
| 365 if (FLAG_trace_incremental_marking) { | 426 if (FLAG_trace_incremental_marking) { |
| 366 PrintF("[IncrementalMarking] Aborting.\n"); | 427 PrintF("[IncrementalMarking] Aborting.\n"); |
| 367 } | 428 } |
| 368 heap_->new_space()->LowerInlineAllocationLimit(0); | 429 heap_->new_space()->LowerInlineAllocationLimit(0); |
| 369 IncrementalMarking::set_should_hurry(false); | 430 IncrementalMarking::set_should_hurry(false); |
| 370 ResetStepCounters(); | 431 ResetStepCounters(); |
| 371 if (IsMarking()) PatchIncrementalMarkingRecordWriteStubs(false); | 432 if (IsMarking()) { |
| 433 PatchIncrementalMarkingRecordWriteStubs(false); | |
| 434 RevertWriteBarrierFlags(); | |
| 435 } | |
| 372 heap_->isolate()->stack_guard()->Continue(GC_REQUEST); | 436 heap_->isolate()->stack_guard()->Continue(GC_REQUEST); |
| 373 state_ = STOPPED; | 437 state_ = STOPPED; |
| 374 } | 438 } |
| 375 | 439 |
| 376 | 440 |
| 377 void IncrementalMarking::Finalize() { | 441 void IncrementalMarking::Finalize() { |
| 378 Hurry(); | 442 Hurry(); |
| 379 state_ = STOPPED; | 443 state_ = STOPPED; |
| 380 heap_->new_space()->LowerInlineAllocationLimit(0); | 444 heap_->new_space()->LowerInlineAllocationLimit(0); |
| 381 IncrementalMarking::set_should_hurry(false); | 445 IncrementalMarking::set_should_hurry(false); |
| 382 ResetStepCounters(); | 446 ResetStepCounters(); |
| 383 PatchIncrementalMarkingRecordWriteStubs(false); | 447 PatchIncrementalMarkingRecordWriteStubs(false); |
| 448 RevertWriteBarrierFlags(); | |
| 384 ASSERT(marking_deque_.IsEmpty()); | 449 ASSERT(marking_deque_.IsEmpty()); |
| 385 heap_->isolate()->stack_guard()->Continue(GC_REQUEST); | 450 heap_->isolate()->stack_guard()->Continue(GC_REQUEST); |
| 386 } | 451 } |
| 387 | 452 |
| 388 | 453 |
| 389 void IncrementalMarking::MarkingComplete() { | 454 void IncrementalMarking::MarkingComplete() { |
| 390 state_ = COMPLETE; | 455 state_ = COMPLETE; |
| 391 // We will set the stack guard to request a GC now. This will mean the rest | 456 // We will set the stack guard to request a GC now. This will mean the rest |
| 392 // of the GC gets performed as soon as possible (we can't do a GC here in a | 457 // of the GC gets performed as soon as possible (we can't do a GC here in a |
| 393 // record-write context). If a few things get allocated between now and then | 458 // record-write context). If a few things get allocated between now and then |
| (...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 461 } | 526 } |
| 462 | 527 |
| 463 if (FLAG_trace_incremental_marking || FLAG_trace_gc) { | 528 if (FLAG_trace_incremental_marking || FLAG_trace_gc) { |
| 464 double end = OS::TimeCurrentMillis(); | 529 double end = OS::TimeCurrentMillis(); |
| 465 steps_took_ += (end - start); | 530 steps_took_ += (end - start); |
| 466 } | 531 } |
| 467 } | 532 } |
| 468 | 533 |
| 469 | 534 |
| 470 } } // namespace v8::internal | 535 } } // namespace v8::internal |
| OLD | NEW |