| Index: src/heap.cc | 
| diff --git a/src/heap.cc b/src/heap.cc | 
| index dfb24783d016d6d2c5cde5106e93df6ce399cf5d..246cffd2c096fab687189833f493d61e41c5c0e5 100644 | 
| --- a/src/heap.cc | 
| +++ b/src/heap.cc | 
| @@ -465,7 +465,7 @@ void Heap::GarbageCollectionPrologue() { | 
|  | 
| store_buffer()->GCPrologue(); | 
|  | 
| -  if (FLAG_concurrent_osr) { | 
| +  if (isolate()->concurrent_osr_enabled()) { | 
| isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs(); | 
| } | 
| } | 
| @@ -662,7 +662,7 @@ void Heap::CollectAllAvailableGarbage(const char* gc_reason) { | 
| // Note: as weak callbacks can execute arbitrary code, we cannot | 
| // hope that eventually there will be no weak callbacks invocations. | 
| // Therefore stop recollecting after several attempts. | 
| -  if (FLAG_concurrent_recompilation) { | 
| +  if (isolate()->concurrent_recompilation_enabled()) { | 
| // The optimizing compiler may be unnecessarily holding on to memory. | 
| DisallowHeapAllocation no_recursive_gc; | 
| isolate()->optimizing_compiler_thread()->Flush(); | 
| @@ -763,7 +763,7 @@ bool Heap::CollectGarbage(AllocationSpace space, | 
|  | 
|  | 
| int Heap::NotifyContextDisposed() { | 
| -  if (FLAG_concurrent_recompilation) { | 
| +  if (isolate()->concurrent_recompilation_enabled()) { | 
| // Flush the queued recompilation tasks. | 
| isolate()->optimizing_compiler_thread()->Flush(); | 
| } | 
| @@ -6581,6 +6581,14 @@ intptr_t Heap::PromotedSpaceSizeOfObjects() { | 
| } | 
|  | 
|  | 
| +bool Heap::AdvanceSweepers(int step_size) { | 
| +  ASSERT(isolate()->num_sweeper_threads() == 0); | 
| +  bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size); | 
| +  sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size); | 
| +  return sweeping_complete; | 
| +} | 
| + | 
| + | 
| intptr_t Heap::PromotedExternalMemorySize() { | 
| if (amount_of_external_allocated_memory_ | 
| <= amount_of_external_allocated_memory_at_last_global_gc_) return 0; | 
| @@ -6727,9 +6735,6 @@ bool Heap::SetUp() { | 
| store_buffer()->SetUp(); | 
|  | 
| if (FLAG_concurrent_recompilation) relocation_mutex_ = new Mutex; | 
| -#ifdef DEBUG | 
| -  relocation_mutex_locked_by_optimizer_thread_ = false; | 
| -#endif  // DEBUG | 
|  | 
| return true; | 
| } | 
| @@ -6874,6 +6879,7 @@ void Heap::TearDown() { | 
| isolate_->memory_allocator()->TearDown(); | 
|  | 
| delete relocation_mutex_; | 
| +  relocation_mutex_ = NULL; | 
| } | 
|  | 
|  | 
| @@ -7953,15 +7959,4 @@ void Heap::CheckpointObjectStats() { | 
| ClearObjectStats(); | 
| } | 
|  | 
| - | 
| -Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) { | 
| -  if (FLAG_concurrent_recompilation) { | 
| -    heap_->relocation_mutex_->Lock(); | 
| -#ifdef DEBUG | 
| -    heap_->relocation_mutex_locked_by_optimizer_thread_ = | 
| -        heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread(); | 
| -#endif  // DEBUG | 
| -  } | 
| -} | 
| - | 
| } }  // namespace v8::internal | 
|  |