OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #include "src/accessors.h" | 7 #include "src/accessors.h" |
8 #include "src/codegen.h" | 8 #include "src/codegen.h" |
9 #include "src/deoptimizer.h" | 9 #include "src/deoptimizer.h" |
10 #include "src/disasm.h" | 10 #include "src/disasm.h" |
(...skipping 334 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
345 JSFunction* function = | 345 JSFunction* function = |
346 static_cast<OptimizedFrame*>(it.frame())->function(); | 346 static_cast<OptimizedFrame*>(it.frame())->function(); |
347 CodeTracer::Scope scope(isolate->GetCodeTracer()); | 347 CodeTracer::Scope scope(isolate->GetCodeTracer()); |
348 PrintF(scope.file(), "[deoptimizer found activation of function: "); | 348 PrintF(scope.file(), "[deoptimizer found activation of function: "); |
349 function->PrintName(scope.file()); | 349 function->PrintName(scope.file()); |
350 PrintF(scope.file(), | 350 PrintF(scope.file(), |
351 " / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function)); | 351 " / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function)); |
352 } | 352 } |
353 SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc()); | 353 SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc()); |
354 int deopt_index = safepoint.deoptimization_index(); | 354 int deopt_index = safepoint.deoptimization_index(); |
355 bool safe_to_deopt = deopt_index != Safepoint::kNoDeoptimizationIndex; | 355 // Turbofan deopt is checked when we are patching addresses on stack. |
356 CHECK(topmost_optimized_code == NULL || safe_to_deopt); | 356 bool turbofanned = code->is_turbofanned(); |
| 357 bool safe_to_deopt = |
| 358 deopt_index != Safepoint::kNoDeoptimizationIndex || turbofanned; |
| 359 CHECK(topmost_optimized_code == NULL || safe_to_deopt || turbofanned); |
357 if (topmost_optimized_code == NULL) { | 360 if (topmost_optimized_code == NULL) { |
358 topmost_optimized_code = code; | 361 topmost_optimized_code = code; |
359 safe_to_deopt_topmost_optimized_code = safe_to_deopt; | 362 safe_to_deopt_topmost_optimized_code = safe_to_deopt; |
360 } | 363 } |
361 } | 364 } |
362 } | 365 } |
363 #endif | 366 #endif |
364 | 367 |
365 // Move marked code from the optimized code list to the deoptimized | 368 // Move marked code from the optimized code list to the deoptimized |
366 // code list, collecting them into a ZoneList. | 369 // code list, collecting them into a ZoneList. |
367 Zone zone(isolate); | 370 Zone zone(isolate); |
368 ZoneList<Code*> codes(10, &zone); | 371 ZoneList<Code*> codes(10, &zone); |
369 | 372 |
370 // Walk over all optimized code objects in this native context. | 373 // Walk over all optimized code objects in this native context. |
371 Code* prev = NULL; | 374 Code* prev = NULL; |
372 Object* element = context->OptimizedCodeListHead(); | 375 Object* element = context->OptimizedCodeListHead(); |
373 while (!element->IsUndefined()) { | 376 while (!element->IsUndefined()) { |
374 Code* code = Code::cast(element); | 377 Code* code = Code::cast(element); |
375 CHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION); | 378 CHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION); |
376 Object* next = code->next_code_link(); | 379 Object* next = code->next_code_link(); |
| 380 |
377 if (code->marked_for_deoptimization()) { | 381 if (code->marked_for_deoptimization()) { |
378 // Put the code into the list for later patching. | 382 // Put the code into the list for later patching. |
379 codes.Add(code, &zone); | 383 codes.Add(code, &zone); |
380 | 384 |
381 if (prev != NULL) { | 385 if (prev != NULL) { |
382 // Skip this code in the optimized code list. | 386 // Skip this code in the optimized code list. |
383 prev->set_next_code_link(next); | 387 prev->set_next_code_link(next); |
384 } else { | 388 } else { |
385 // There was no previous node, the next node is the new head. | 389 // There was no previous node, the next node is the new head. |
386 context->SetOptimizedCodeListHead(next); | 390 context->SetOptimizedCodeListHead(next); |
387 } | 391 } |
388 | 392 |
389 // Move the code to the _deoptimized_ code list. | 393 // Move the code to the _deoptimized_ code list. |
390 code->set_next_code_link(context->DeoptimizedCodeListHead()); | 394 code->set_next_code_link(context->DeoptimizedCodeListHead()); |
391 context->SetDeoptimizedCodeListHead(code); | 395 context->SetDeoptimizedCodeListHead(code); |
392 } else { | 396 } else { |
393 // Not marked; preserve this element. | 397 // Not marked; preserve this element. |
394 prev = code; | 398 prev = code; |
395 } | 399 } |
396 element = next; | 400 element = next; |
397 } | 401 } |
398 | 402 |
| 403 if (FLAG_turbo_deoptimization) { |
| 404 PatchStackForMarkedCode(isolate); |
| 405 } |
| 406 |
399 // TODO(titzer): we need a handle scope only because of the macro assembler, | 407 // TODO(titzer): we need a handle scope only because of the macro assembler, |
400 // which is only used in EnsureCodeForDeoptimizationEntry. | 408 // which is only used in EnsureCodeForDeoptimizationEntry. |
401 HandleScope scope(isolate); | 409 HandleScope scope(isolate); |
402 | 410 |
403 // Now patch all the codes for deoptimization. | 411 // Now patch all the codes for deoptimization. |
404 for (int i = 0; i < codes.length(); i++) { | 412 for (int i = 0; i < codes.length(); i++) { |
405 #ifdef DEBUG | 413 #ifdef DEBUG |
406 if (codes[i] == topmost_optimized_code) { | 414 if (codes[i] == topmost_optimized_code) { |
407 ASSERT(safe_to_deopt_topmost_optimized_code); | 415 ASSERT(safe_to_deopt_topmost_optimized_code); |
408 } | 416 } |
409 #endif | 417 #endif |
410 // It is finally time to die, code object. | 418 // It is finally time to die, code object. |
| 419 |
| 420 // Remove the code from optimized code map. |
| 421 DeoptimizationInputData* deopt_data = |
| 422 DeoptimizationInputData::cast(codes[i]->deoptimization_data()); |
| 423 SharedFunctionInfo* shared = |
| 424 SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo()); |
| 425 shared->EvictFromOptimizedCodeMap(codes[i], "deoptimized code"); |
| 426 |
411 // Do platform-specific patching to force any activations to lazy deopt. | 427 // Do platform-specific patching to force any activations to lazy deopt. |
412 PatchCodeForDeoptimization(isolate, codes[i]); | 428 // |
| 429 // We skip patching Turbofan code - we patch return addresses on stack. |
| 430 // TODO(jarin) We should still zap the code object (but we have to |
| 431 // be careful not to zap the deoptimization block). |
| 432 if (!codes[i]->is_turbofanned()) { |
| 433 PatchCodeForDeoptimization(isolate, codes[i]); |
413 | 434 |
414 // We might be in the middle of incremental marking with compaction. | 435 // We might be in the middle of incremental marking with compaction. |
415 // Tell collector to treat this code object in a special way and | 436 // Tell collector to treat this code object in a special way and |
416 // ignore all slots that might have been recorded on it. | 437 // ignore all slots that might have been recorded on it. |
417 isolate->heap()->mark_compact_collector()->InvalidateCode(codes[i]); | 438 isolate->heap()->mark_compact_collector()->InvalidateCode(codes[i]); |
| 439 } |
418 } | 440 } |
419 } | 441 } |
420 | 442 |
| 443 |
| 444 static int FindPatchAddressForReturnAddress(Code* code, int pc) { |
| 445 DeoptimizationInputData* input_data = |
| 446 DeoptimizationInputData::cast(code->deoptimization_data()); |
| 447 int patch_count = input_data->ReturnAddressPatchCount(); |
| 448 for (int i = 0; i < patch_count; i++) { |
| 449 int return_pc = input_data->ReturnAddressPc(i)->value(); |
| 450 if (pc == return_pc) { |
| 451 return input_data->PatchedAddressPc(i)->value(); |
| 452 } |
| 453 } |
| 454 return -1; |
| 455 } |
| 456 |
| 457 |
| 458 // For all marked Turbofanned code on stack, change the return address to go |
| 459 // to the deoptimization block. |
| 460 void Deoptimizer::PatchStackForMarkedCode(Isolate* isolate) { |
| 461 // TODO(jarin) We should tolerate missing patch entry for the topmost frame. |
| 462 for (StackFrameIterator it(isolate, isolate->thread_local_top()); !it.done(); |
| 463 it.Advance()) { |
| 464 StackFrame::Type type = it.frame()->type(); |
| 465 if (type == StackFrame::OPTIMIZED) { |
| 466 Code* code = it.frame()->LookupCode(); |
| 467 if (code->is_turbofanned() && code->marked_for_deoptimization()) { |
| 468 JSFunction* function = |
| 469 static_cast<OptimizedFrame*>(it.frame())->function(); |
| 470 Address* pc_address = it.frame()->pc_address(); |
| 471 int pc_offset = *pc_address - code->instruction_start(); |
| 472 int new_pc_offset = FindPatchAddressForReturnAddress(code, pc_offset); |
| 473 |
| 474 if (FLAG_trace_deopt) { |
| 475 CodeTracer::Scope scope(isolate->GetCodeTracer()); |
| 476 PrintF(scope.file(), "[patching stack address for function: "); |
| 477 function->PrintName(scope.file()); |
| 478 PrintF(scope.file(), " (Pc offset %i -> %i)]\n", pc_offset, |
| 479 new_pc_offset); |
| 480 } |
| 481 |
| 482 CHECK_LE(0, new_pc_offset); |
| 483 *pc_address += new_pc_offset - pc_offset; |
| 484 } |
| 485 } |
| 486 } |
| 487 } |
| 488 |
421 | 489 |
422 void Deoptimizer::DeoptimizeAll(Isolate* isolate) { | 490 void Deoptimizer::DeoptimizeAll(Isolate* isolate) { |
423 if (FLAG_trace_deopt) { | 491 if (FLAG_trace_deopt) { |
424 CodeTracer::Scope scope(isolate->GetCodeTracer()); | 492 CodeTracer::Scope scope(isolate->GetCodeTracer()); |
425 PrintF(scope.file(), "[deoptimize all code in all contexts]\n"); | 493 PrintF(scope.file(), "[deoptimize all code in all contexts]\n"); |
426 } | 494 } |
427 DisallowHeapAllocation no_allocation; | 495 DisallowHeapAllocation no_allocation; |
428 // For all contexts, mark all code, then deoptimize. | 496 // For all contexts, mark all code, then deoptimize. |
429 Object* context = isolate->heap()->native_contexts_list(); | 497 Object* context = isolate->heap()->native_contexts_list(); |
430 while (!context->IsUndefined()) { | 498 while (!context->IsUndefined()) { |
(...skipping 465 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
896 output_[frame_index] = output_frame; | 964 output_[frame_index] = output_frame; |
897 | 965 |
898 // The top address for the bottommost output frame can be computed from | 966 // The top address for the bottommost output frame can be computed from |
899 // the input frame pointer and the output frame's height. For all | 967 // the input frame pointer and the output frame's height. For all |
900 // subsequent output frames, it can be computed from the previous one's | 968 // subsequent output frames, it can be computed from the previous one's |
901 // top address and the current frame's size. | 969 // top address and the current frame's size. |
902 Register fp_reg = JavaScriptFrame::fp_register(); | 970 Register fp_reg = JavaScriptFrame::fp_register(); |
903 intptr_t top_address; | 971 intptr_t top_address; |
904 if (is_bottommost) { | 972 if (is_bottommost) { |
905 // Determine whether the input frame contains alignment padding. | 973 // Determine whether the input frame contains alignment padding. |
906 has_alignment_padding_ = HasAlignmentPadding(function) ? 1 : 0; | 974 has_alignment_padding_ = |
| 975 (!compiled_code_->is_turbofanned() && HasAlignmentPadding(function)) |
| 976 ? 1 |
| 977 : 0; |
907 // 2 = context and function in the frame. | 978 // 2 = context and function in the frame. |
908 // If the optimized frame had alignment padding, adjust the frame pointer | 979 // If the optimized frame had alignment padding, adjust the frame pointer |
909 // to point to the new position of the old frame pointer after padding | 980 // to point to the new position of the old frame pointer after padding |
910 // is removed. Subtract 2 * kPointerSize for the context and function slots. | 981 // is removed. Subtract 2 * kPointerSize for the context and function slots. |
911 top_address = input_->GetRegister(fp_reg.code()) - | 982 top_address = input_->GetRegister(fp_reg.code()) - |
912 StandardFrameConstants::kFixedFrameSizeFromFp - | 983 StandardFrameConstants::kFixedFrameSizeFromFp - |
913 height_in_bytes + has_alignment_padding_ * kPointerSize; | 984 height_in_bytes + has_alignment_padding_ * kPointerSize; |
914 } else { | 985 } else { |
915 top_address = output_[frame_index - 1]->GetTop() - output_frame_size; | 986 top_address = output_[frame_index - 1]->GetTop() - output_frame_size; |
916 } | 987 } |
(...skipping 2681 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3598 } | 3669 } |
3599 | 3670 |
3600 | 3671 |
3601 void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) { | 3672 void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) { |
3602 v->VisitPointer(BitCast<Object**>(&function_)); | 3673 v->VisitPointer(BitCast<Object**>(&function_)); |
3603 v->VisitPointers(parameters_, parameters_ + parameters_count_); | 3674 v->VisitPointers(parameters_, parameters_ + parameters_count_); |
3604 v->VisitPointers(expression_stack_, expression_stack_ + expression_count_); | 3675 v->VisitPointers(expression_stack_, expression_stack_ + expression_count_); |
3605 } | 3676 } |
3606 | 3677 |
3607 } } // namespace v8::internal | 3678 } } // namespace v8::internal |
OLD | NEW |