| OLD | NEW |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 477 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 488 bool Deoptimizer::TraceEnabledFor(BailoutType deopt_type, | 488 bool Deoptimizer::TraceEnabledFor(BailoutType deopt_type, |
| 489 StackFrame::Type frame_type) { | 489 StackFrame::Type frame_type) { |
| 490 switch (deopt_type) { | 490 switch (deopt_type) { |
| 491 case EAGER: | 491 case EAGER: |
| 492 case SOFT: | 492 case SOFT: |
| 493 case LAZY: | 493 case LAZY: |
| 494 case DEBUGGER: | 494 case DEBUGGER: |
| 495 return (frame_type == StackFrame::STUB) | 495 return (frame_type == StackFrame::STUB) |
| 496 ? FLAG_trace_stub_failures | 496 ? FLAG_trace_stub_failures |
| 497 : FLAG_trace_deopt; | 497 : FLAG_trace_deopt; |
| 498 case OSR: | |
| 499 return FLAG_trace_osr; | |
| 500 } | 498 } |
| 501 UNREACHABLE(); | 499 UNREACHABLE(); |
| 502 return false; | 500 return false; |
| 503 } | 501 } |
| 504 | 502 |
| 505 | 503 |
| 506 const char* Deoptimizer::MessageFor(BailoutType type) { | 504 const char* Deoptimizer::MessageFor(BailoutType type) { |
| 507 switch (type) { | 505 switch (type) { |
| 508 case EAGER: return "eager"; | 506 case EAGER: return "eager"; |
| 509 case SOFT: return "soft"; | 507 case SOFT: return "soft"; |
| 510 case LAZY: return "lazy"; | 508 case LAZY: return "lazy"; |
| 511 case DEBUGGER: return "debugger"; | 509 case DEBUGGER: return "debugger"; |
| 512 case OSR: return "OSR"; | |
| 513 } | 510 } |
| 514 UNREACHABLE(); | 511 UNREACHABLE(); |
| 515 return NULL; | 512 return NULL; |
| 516 } | 513 } |
| 517 | 514 |
| 518 | 515 |
| 519 Deoptimizer::Deoptimizer(Isolate* isolate, | 516 Deoptimizer::Deoptimizer(Isolate* isolate, |
| 520 JSFunction* function, | 517 JSFunction* function, |
| 521 BailoutType type, | 518 BailoutType type, |
| 522 unsigned bailout_id, | 519 unsigned bailout_id, |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 556 if (bailout_type_ == Deoptimizer::SOFT) { | 553 if (bailout_type_ == Deoptimizer::SOFT) { |
| 557 isolate->counters()->soft_deopts_executed()->Increment(); | 554 isolate->counters()->soft_deopts_executed()->Increment(); |
| 558 // Soft deopts shouldn't count against the overall re-optimization count | 555 // Soft deopts shouldn't count against the overall re-optimization count |
| 559 // that can eventually lead to disabling optimization for a function. | 556 // that can eventually lead to disabling optimization for a function. |
| 560 int opt_count = function->shared()->opt_count(); | 557 int opt_count = function->shared()->opt_count(); |
| 561 if (opt_count > 0) opt_count--; | 558 if (opt_count > 0) opt_count--; |
| 562 function->shared()->set_opt_count(opt_count); | 559 function->shared()->set_opt_count(opt_count); |
| 563 } | 560 } |
| 564 } | 561 } |
| 565 compiled_code_ = FindOptimizedCode(function, optimized_code); | 562 compiled_code_ = FindOptimizedCode(function, optimized_code); |
| 563 |
| 564 #if DEBUG |
| 565 ASSERT(compiled_code_ != NULL); |
| 566 if (type == EAGER || type == SOFT || type == LAZY) { |
| 567 ASSERT(compiled_code_->kind() != Code::FUNCTION); |
| 568 } |
| 569 #endif |
| 570 |
| 566 StackFrame::Type frame_type = function == NULL | 571 StackFrame::Type frame_type = function == NULL |
| 567 ? StackFrame::STUB | 572 ? StackFrame::STUB |
| 568 : StackFrame::JAVA_SCRIPT; | 573 : StackFrame::JAVA_SCRIPT; |
| 569 trace_ = TraceEnabledFor(type, frame_type); | 574 trace_ = TraceEnabledFor(type, frame_type); |
| 570 #ifdef DEBUG | 575 #ifdef DEBUG |
| 571 CHECK(AllowHeapAllocation::IsAllowed()); | 576 CHECK(AllowHeapAllocation::IsAllowed()); |
| 572 disallow_heap_allocation_ = new DisallowHeapAllocation(); | 577 disallow_heap_allocation_ = new DisallowHeapAllocation(); |
| 573 #endif // DEBUG | 578 #endif // DEBUG |
| 574 unsigned size = ComputeInputFrameSize(); | 579 unsigned size = ComputeInputFrameSize(); |
| 575 input_ = new(size) FrameDescription(size, function); | 580 input_ = new(size) FrameDescription(size, function); |
| 576 input_->SetFrameType(frame_type); | 581 input_->SetFrameType(frame_type); |
| 577 } | 582 } |
| 578 | 583 |
| 579 | 584 |
| 580 Code* Deoptimizer::FindOptimizedCode(JSFunction* function, | 585 Code* Deoptimizer::FindOptimizedCode(JSFunction* function, |
| 581 Code* optimized_code) { | 586 Code* optimized_code) { |
| 582 switch (bailout_type_) { | 587 switch (bailout_type_) { |
| 583 case Deoptimizer::SOFT: | 588 case Deoptimizer::SOFT: |
| 584 case Deoptimizer::EAGER: | 589 case Deoptimizer::EAGER: |
| 585 case Deoptimizer::LAZY: { | 590 case Deoptimizer::LAZY: { |
| 586 Code* compiled_code = FindDeoptimizingCode(from_); | 591 Code* compiled_code = FindDeoptimizingCode(from_); |
| 587 return (compiled_code == NULL) | 592 return (compiled_code == NULL) |
| 588 ? static_cast<Code*>(isolate_->FindCodeObject(from_)) | 593 ? static_cast<Code*>(isolate_->FindCodeObject(from_)) |
| 589 : compiled_code; | 594 : compiled_code; |
| 590 } | 595 } |
| 591 case Deoptimizer::OSR: { | |
| 592 // The function has already been optimized and we're transitioning | |
| 593 // from the unoptimized shared version to the optimized one in the | |
| 594 // function. The return address (from_) points to unoptimized code. | |
| 595 Code* compiled_code = function->code(); | |
| 596 ASSERT(compiled_code->kind() == Code::OPTIMIZED_FUNCTION); | |
| 597 ASSERT(!compiled_code->contains(from_)); | |
| 598 return compiled_code; | |
| 599 } | |
| 600 case Deoptimizer::DEBUGGER: | 596 case Deoptimizer::DEBUGGER: |
| 601 ASSERT(optimized_code->contains(from_)); | 597 ASSERT(optimized_code->contains(from_)); |
| 602 return optimized_code; | 598 return optimized_code; |
| 603 } | 599 } |
| 604 UNREACHABLE(); | 600 UNREACHABLE(); |
| 605 return NULL; | 601 return NULL; |
| 606 } | 602 } |
| 607 | 603 |
| 608 | 604 |
| 609 void Deoptimizer::PrintFunctionName() { | 605 void Deoptimizer::PrintFunctionName() { |
| (...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 713 } | 709 } |
| 714 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK); | 710 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK); |
| 715 } | 711 } |
| 716 return length; | 712 return length; |
| 717 } | 713 } |
| 718 | 714 |
| 719 | 715 |
| 720 // We rely on this function not causing a GC. It is called from generated code | 716 // We rely on this function not causing a GC. It is called from generated code |
| 721 // without having a real stack frame in place. | 717 // without having a real stack frame in place. |
| 722 void Deoptimizer::DoComputeOutputFrames() { | 718 void Deoptimizer::DoComputeOutputFrames() { |
| 723 if (bailout_type_ == OSR) { | |
| 724 DoComputeOsrOutputFrame(); | |
| 725 return; | |
| 726 } | |
| 727 | |
| 728 // Print some helpful diagnostic information. | 719 // Print some helpful diagnostic information. |
| 729 if (FLAG_log_timer_events && | 720 if (FLAG_log_timer_events && |
| 730 compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) { | 721 compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) { |
| 731 LOG(isolate(), CodeDeoptEvent(compiled_code_)); | 722 LOG(isolate(), CodeDeoptEvent(compiled_code_)); |
| 732 } | 723 } |
| 733 ElapsedTimer timer; | 724 ElapsedTimer timer; |
| 734 if (trace_) { | 725 if (trace_) { |
| 735 timer.Start(); | 726 timer.Start(); |
| 736 PrintF("[deoptimizing (DEOPT %s): begin 0x%08" V8PRIxPTR " ", | 727 PrintF("[deoptimizing (DEOPT %s): begin 0x%08" V8PRIxPTR " ", |
| 737 MessageFor(bailout_type_), | 728 MessageFor(bailout_type_), |
| (...skipping 1601 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2339 int object_index = deferred_objects_.length() - 1; | 2330 int object_index = deferred_objects_.length() - 1; |
| 2340 for (int i = 0; i < length; i++) { | 2331 for (int i = 0; i < length; i++) { |
| 2341 DoTranslateObject(iterator, object_index, i); | 2332 DoTranslateObject(iterator, object_index, i); |
| 2342 } | 2333 } |
| 2343 return; | 2334 return; |
| 2344 } | 2335 } |
| 2345 } | 2336 } |
| 2346 } | 2337 } |
| 2347 | 2338 |
| 2348 | 2339 |
| 2349 bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator, | |
| 2350 int* input_offset) { | |
| 2351 disasm::NameConverter converter; | |
| 2352 FrameDescription* output = output_[0]; | |
| 2353 | |
| 2354 // The input values are all part of the unoptimized frame so they | |
| 2355 // are all tagged pointers. | |
| 2356 uintptr_t input_value = input_->GetFrameSlot(*input_offset); | |
| 2357 Object* input_object = reinterpret_cast<Object*>(input_value); | |
| 2358 | |
| 2359 Translation::Opcode opcode = | |
| 2360 static_cast<Translation::Opcode>(iterator->Next()); | |
| 2361 | |
| 2362 switch (opcode) { | |
| 2363 case Translation::BEGIN: | |
| 2364 case Translation::JS_FRAME: | |
| 2365 case Translation::ARGUMENTS_ADAPTOR_FRAME: | |
| 2366 case Translation::CONSTRUCT_STUB_FRAME: | |
| 2367 case Translation::GETTER_STUB_FRAME: | |
| 2368 case Translation::SETTER_STUB_FRAME: | |
| 2369 case Translation::COMPILED_STUB_FRAME: | |
| 2370 UNREACHABLE(); // Malformed input. | |
| 2371 return false; | |
| 2372 | |
| 2373 case Translation::REGISTER: { | |
| 2374 int output_reg = iterator->Next(); | |
| 2375 if (FLAG_trace_osr) { | |
| 2376 PrintF(" %s <- 0x%08" V8PRIxPTR " ; [sp + %d]\n", | |
| 2377 converter.NameOfCPURegister(output_reg), | |
| 2378 input_value, | |
| 2379 *input_offset); | |
| 2380 } | |
| 2381 output->SetRegister(output_reg, input_value); | |
| 2382 break; | |
| 2383 } | |
| 2384 | |
| 2385 case Translation::INT32_REGISTER: { | |
| 2386 int32_t int32_value = 0; | |
| 2387 if (!input_object->ToInt32(&int32_value)) return false; | |
| 2388 | |
| 2389 int output_reg = iterator->Next(); | |
| 2390 if (FLAG_trace_osr) { | |
| 2391 PrintF(" %s <- %d (int32) ; [sp + %d]\n", | |
| 2392 converter.NameOfCPURegister(output_reg), | |
| 2393 int32_value, | |
| 2394 *input_offset); | |
| 2395 } | |
| 2396 output->SetRegister(output_reg, int32_value); | |
| 2397 break; | |
| 2398 } | |
| 2399 | |
| 2400 case Translation::UINT32_REGISTER: { | |
| 2401 uint32_t uint32_value = 0; | |
| 2402 if (!input_object->ToUint32(&uint32_value)) return false; | |
| 2403 | |
| 2404 int output_reg = iterator->Next(); | |
| 2405 if (FLAG_trace_osr) { | |
| 2406 PrintF(" %s <- %u (uint32) ; [sp + %d]\n", | |
| 2407 converter.NameOfCPURegister(output_reg), | |
| 2408 uint32_value, | |
| 2409 *input_offset); | |
| 2410 } | |
| 2411 output->SetRegister(output_reg, static_cast<int32_t>(uint32_value)); | |
| 2412 } | |
| 2413 | |
| 2414 | |
| 2415 case Translation::DOUBLE_REGISTER: { | |
| 2416 // Abort OSR if we don't have a number. | |
| 2417 if (!input_object->IsNumber()) return false; | |
| 2418 | |
| 2419 int output_reg = iterator->Next(); | |
| 2420 double double_value = input_object->Number(); | |
| 2421 if (FLAG_trace_osr) { | |
| 2422 PrintF(" %s <- %g (double) ; [sp + %d]\n", | |
| 2423 DoubleRegister::AllocationIndexToString(output_reg), | |
| 2424 double_value, | |
| 2425 *input_offset); | |
| 2426 } | |
| 2427 output->SetDoubleRegister(output_reg, double_value); | |
| 2428 break; | |
| 2429 } | |
| 2430 | |
| 2431 case Translation::STACK_SLOT: { | |
| 2432 int output_index = iterator->Next(); | |
| 2433 unsigned output_offset = | |
| 2434 output->GetOffsetFromSlotIndex(output_index); | |
| 2435 if (FLAG_trace_osr) { | |
| 2436 PrintF(" [sp + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ", | |
| 2437 output_offset, | |
| 2438 input_value, | |
| 2439 *input_offset); | |
| 2440 reinterpret_cast<Object*>(input_value)->ShortPrint(); | |
| 2441 PrintF("\n"); | |
| 2442 } | |
| 2443 output->SetFrameSlot(output_offset, input_value); | |
| 2444 break; | |
| 2445 } | |
| 2446 | |
| 2447 case Translation::INT32_STACK_SLOT: { | |
| 2448 int32_t int32_value = 0; | |
| 2449 if (!input_object->ToInt32(&int32_value)) return false; | |
| 2450 | |
| 2451 int output_index = iterator->Next(); | |
| 2452 unsigned output_offset = | |
| 2453 output->GetOffsetFromSlotIndex(output_index); | |
| 2454 if (FLAG_trace_osr) { | |
| 2455 PrintF(" [sp + %d] <- %d (int32) ; [sp + %d]\n", | |
| 2456 output_offset, | |
| 2457 int32_value, | |
| 2458 *input_offset); | |
| 2459 } | |
| 2460 output->SetFrameSlot(output_offset, int32_value); | |
| 2461 break; | |
| 2462 } | |
| 2463 | |
| 2464 case Translation::UINT32_STACK_SLOT: { | |
| 2465 uint32_t uint32_value = 0; | |
| 2466 if (!input_object->ToUint32(&uint32_value)) return false; | |
| 2467 | |
| 2468 int output_index = iterator->Next(); | |
| 2469 unsigned output_offset = | |
| 2470 output->GetOffsetFromSlotIndex(output_index); | |
| 2471 if (FLAG_trace_osr) { | |
| 2472 PrintF(" [sp + %d] <- %u (uint32) ; [sp + %d]\n", | |
| 2473 output_offset, | |
| 2474 uint32_value, | |
| 2475 *input_offset); | |
| 2476 } | |
| 2477 output->SetFrameSlot(output_offset, static_cast<int32_t>(uint32_value)); | |
| 2478 break; | |
| 2479 } | |
| 2480 | |
| 2481 case Translation::DOUBLE_STACK_SLOT: { | |
| 2482 static const int kLowerOffset = 0 * kPointerSize; | |
| 2483 static const int kUpperOffset = 1 * kPointerSize; | |
| 2484 | |
| 2485 // Abort OSR if we don't have a number. | |
| 2486 if (!input_object->IsNumber()) return false; | |
| 2487 | |
| 2488 int output_index = iterator->Next(); | |
| 2489 unsigned output_offset = | |
| 2490 output->GetOffsetFromSlotIndex(output_index); | |
| 2491 double double_value = input_object->Number(); | |
| 2492 uint64_t int_value = BitCast<uint64_t, double>(double_value); | |
| 2493 int32_t lower = static_cast<int32_t>(int_value); | |
| 2494 int32_t upper = static_cast<int32_t>(int_value >> kBitsPerInt); | |
| 2495 if (FLAG_trace_osr) { | |
| 2496 PrintF(" [sp + %d] <- 0x%08x (upper bits of %g) ; [sp + %d]\n", | |
| 2497 output_offset + kUpperOffset, | |
| 2498 upper, | |
| 2499 double_value, | |
| 2500 *input_offset); | |
| 2501 PrintF(" [sp + %d] <- 0x%08x (lower bits of %g) ; [sp + %d]\n", | |
| 2502 output_offset + kLowerOffset, | |
| 2503 lower, | |
| 2504 double_value, | |
| 2505 *input_offset); | |
| 2506 } | |
| 2507 output->SetFrameSlot(output_offset + kLowerOffset, lower); | |
| 2508 output->SetFrameSlot(output_offset + kUpperOffset, upper); | |
| 2509 break; | |
| 2510 } | |
| 2511 | |
| 2512 case Translation::LITERAL: { | |
| 2513 // Just ignore non-materialized literals. | |
| 2514 iterator->Next(); | |
| 2515 break; | |
| 2516 } | |
| 2517 | |
| 2518 case Translation::DUPLICATED_OBJECT: | |
| 2519 case Translation::ARGUMENTS_OBJECT: | |
| 2520 case Translation::CAPTURED_OBJECT: { | |
| 2521 // Optimized code assumes that the argument object has not been | |
| 2522 // materialized and so bypasses it when doing arguments access. | |
| 2523 // We should have bailed out before starting the frame | |
| 2524 // translation. | |
| 2525 UNREACHABLE(); | |
| 2526 return false; | |
| 2527 } | |
| 2528 } | |
| 2529 | |
| 2530 *input_offset -= kPointerSize; | |
| 2531 return true; | |
| 2532 } | |
| 2533 | |
| 2534 | |
| 2535 void Deoptimizer::PatchInterruptCode(Isolate* isolate, | 2340 void Deoptimizer::PatchInterruptCode(Isolate* isolate, |
| 2536 Code* unoptimized) { | 2341 Code* unoptimized) { |
| 2537 DisallowHeapAllocation no_gc; | 2342 DisallowHeapAllocation no_gc; |
| 2538 Code* replacement_code = | 2343 Code* replacement_code = |
| 2539 isolate->builtins()->builtin(Builtins::kOnStackReplacement); | 2344 isolate->builtins()->builtin(Builtins::kOnStackReplacement); |
| 2540 | 2345 |
| 2541 // Iterate over the back edge table and patch every interrupt | 2346 // Iterate over the back edge table and patch every interrupt |
| 2542 // call to an unconditional call to the replacement code. | 2347 // call to an unconditional call to the replacement code. |
| 2543 int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level(); | 2348 int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level(); |
| 2544 | 2349 |
| (...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2610 } | 2415 } |
| 2611 #endif // DEBUG | 2416 #endif // DEBUG |
| 2612 | 2417 |
| 2613 | 2418 |
| 2614 unsigned Deoptimizer::ComputeInputFrameSize() const { | 2419 unsigned Deoptimizer::ComputeInputFrameSize() const { |
| 2615 unsigned fixed_size = ComputeFixedSize(function_); | 2420 unsigned fixed_size = ComputeFixedSize(function_); |
| 2616 // The fp-to-sp delta already takes the context and the function | 2421 // The fp-to-sp delta already takes the context and the function |
| 2617 // into account so we have to avoid double counting them (-2). | 2422 // into account so we have to avoid double counting them (-2). |
| 2618 unsigned result = fixed_size + fp_to_sp_delta_ - (2 * kPointerSize); | 2423 unsigned result = fixed_size + fp_to_sp_delta_ - (2 * kPointerSize); |
| 2619 #ifdef DEBUG | 2424 #ifdef DEBUG |
| 2620 if (bailout_type_ == OSR) { | 2425 if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) { |
| 2621 // TODO(kasperl): It would be nice if we could verify that the | |
| 2622 // size matches with the stack height we can compute based on the | |
| 2623 // environment at the OSR entry. The code for that his built into | |
| 2624 // the DoComputeOsrOutputFrame function for now. | |
| 2625 } else if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) { | |
| 2626 unsigned stack_slots = compiled_code_->stack_slots(); | 2426 unsigned stack_slots = compiled_code_->stack_slots(); |
| 2627 unsigned outgoing_size = ComputeOutgoingArgumentSize(); | 2427 unsigned outgoing_size = ComputeOutgoingArgumentSize(); |
| 2628 ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size); | 2428 ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size); |
| 2629 } | 2429 } |
| 2630 #endif | 2430 #endif |
| 2631 return result; | 2431 return result; |
| 2632 } | 2432 } |
| 2633 | 2433 |
| 2634 | 2434 |
| 2635 unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const { | 2435 unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const { |
| (...skipping 615 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3251 | 3051 |
| 3252 void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) { | 3052 void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) { |
| 3253 v->VisitPointer(BitCast<Object**>(&function_)); | 3053 v->VisitPointer(BitCast<Object**>(&function_)); |
| 3254 v->VisitPointers(parameters_, parameters_ + parameters_count_); | 3054 v->VisitPointers(parameters_, parameters_ + parameters_count_); |
| 3255 v->VisitPointers(expression_stack_, expression_stack_ + expression_count_); | 3055 v->VisitPointers(expression_stack_, expression_stack_ + expression_count_); |
| 3256 } | 3056 } |
| 3257 | 3057 |
| 3258 #endif // ENABLE_DEBUGGER_SUPPORT | 3058 #endif // ENABLE_DEBUGGER_SUPPORT |
| 3259 | 3059 |
| 3260 } } // namespace v8::internal | 3060 } } // namespace v8::internal |
| OLD | NEW |