OLD | NEW |
---|---|
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 541 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
552 bool Deoptimizer::TraceEnabledFor(BailoutType deopt_type, | 552 bool Deoptimizer::TraceEnabledFor(BailoutType deopt_type, |
553 StackFrame::Type frame_type) { | 553 StackFrame::Type frame_type) { |
554 switch (deopt_type) { | 554 switch (deopt_type) { |
555 case EAGER: | 555 case EAGER: |
556 case SOFT: | 556 case SOFT: |
557 case LAZY: | 557 case LAZY: |
558 case DEBUGGER: | 558 case DEBUGGER: |
559 return (frame_type == StackFrame::STUB) | 559 return (frame_type == StackFrame::STUB) |
560 ? FLAG_trace_stub_failures | 560 ? FLAG_trace_stub_failures |
561 : FLAG_trace_deopt; | 561 : FLAG_trace_deopt; |
562 case OSR: | |
563 return FLAG_trace_osr; | |
564 } | 562 } |
565 UNREACHABLE(); | 563 UNREACHABLE(); |
566 return false; | 564 return false; |
567 } | 565 } |
568 | 566 |
569 | 567 |
570 const char* Deoptimizer::MessageFor(BailoutType type) { | 568 const char* Deoptimizer::MessageFor(BailoutType type) { |
571 switch (type) { | 569 switch (type) { |
572 case EAGER: return "eager"; | 570 case EAGER: return "eager"; |
573 case SOFT: return "soft"; | 571 case SOFT: return "soft"; |
574 case LAZY: return "lazy"; | 572 case LAZY: return "lazy"; |
575 case DEBUGGER: return "debugger"; | 573 case DEBUGGER: return "debugger"; |
576 case OSR: return "OSR"; | |
577 } | 574 } |
578 UNREACHABLE(); | 575 UNREACHABLE(); |
579 return NULL; | 576 return NULL; |
580 } | 577 } |
581 | 578 |
582 | 579 |
583 Deoptimizer::Deoptimizer(Isolate* isolate, | 580 Deoptimizer::Deoptimizer(Isolate* isolate, |
584 JSFunction* function, | 581 JSFunction* function, |
585 BailoutType type, | 582 BailoutType type, |
586 unsigned bailout_id, | 583 unsigned bailout_id, |
(...skipping 16 matching lines...) Expand all Loading... | |
603 deferred_objects_(0), | 600 deferred_objects_(0), |
604 deferred_heap_numbers_(0), | 601 deferred_heap_numbers_(0), |
605 trace_(false) { | 602 trace_(false) { |
606 // For COMPILED_STUBs called from builtins, the function pointer is a SMI | 603 // For COMPILED_STUBs called from builtins, the function pointer is a SMI |
607 // indicating an internal frame. | 604 // indicating an internal frame. |
608 if (function->IsSmi()) { | 605 if (function->IsSmi()) { |
609 function = NULL; | 606 function = NULL; |
610 } | 607 } |
611 ASSERT(from != NULL); | 608 ASSERT(from != NULL); |
612 if (function != NULL && function->IsOptimized()) { | 609 if (function != NULL && function->IsOptimized()) { |
613 function->shared()->increment_deopt_count(); | |
614 if (bailout_type_ == Deoptimizer::SOFT) { | 610 if (bailout_type_ == Deoptimizer::SOFT) { |
615 isolate->counters()->soft_deopts_executed()->Increment(); | |
616 // Soft deopts shouldn't count against the overall re-optimization count | 611 // Soft deopts shouldn't count against the overall re-optimization count |
617 // that can eventually lead to disabling optimization for a function. | 612 // that can eventually lead to disabling optimization for a function. |
618 int opt_count = function->shared()->opt_count(); | 613 isolate->counters()->soft_deopts_executed()->Increment(); |
619 if (opt_count > 0) opt_count--; | 614 } else { |
620 function->shared()->set_opt_count(opt_count); | 615 function->shared()->increment_deopt_count(); |
621 } | 616 } |
622 } | 617 } |
623 compiled_code_ = FindOptimizedCode(function, optimized_code); | 618 compiled_code_ = FindOptimizedCode(function, optimized_code); |
619 | |
620 #if DEBUG | |
621 ASSERT(compiled_code_ != NULL); | |
622 if (type == EAGER || type == SOFT || type == LAZY) { | |
623 ASSERT(compiled_code_->kind() != Code::FUNCTION); | |
624 } | |
625 #endif | |
626 | |
624 StackFrame::Type frame_type = function == NULL | 627 StackFrame::Type frame_type = function == NULL |
625 ? StackFrame::STUB | 628 ? StackFrame::STUB |
626 : StackFrame::JAVA_SCRIPT; | 629 : StackFrame::JAVA_SCRIPT; |
627 trace_ = TraceEnabledFor(type, frame_type); | 630 trace_ = TraceEnabledFor(type, frame_type); |
628 #ifdef DEBUG | 631 #ifdef DEBUG |
629 CHECK(AllowHeapAllocation::IsAllowed()); | 632 CHECK(AllowHeapAllocation::IsAllowed()); |
630 disallow_heap_allocation_ = new DisallowHeapAllocation(); | 633 disallow_heap_allocation_ = new DisallowHeapAllocation(); |
631 #endif // DEBUG | 634 #endif // DEBUG |
632 unsigned size = ComputeInputFrameSize(); | 635 unsigned size = ComputeInputFrameSize(); |
633 input_ = new(size) FrameDescription(size, function); | 636 input_ = new(size) FrameDescription(size, function); |
634 input_->SetFrameType(frame_type); | 637 input_->SetFrameType(frame_type); |
635 } | 638 } |
636 | 639 |
637 | 640 |
638 Code* Deoptimizer::FindOptimizedCode(JSFunction* function, | 641 Code* Deoptimizer::FindOptimizedCode(JSFunction* function, |
639 Code* optimized_code) { | 642 Code* optimized_code) { |
640 switch (bailout_type_) { | 643 switch (bailout_type_) { |
641 case Deoptimizer::SOFT: | 644 case Deoptimizer::SOFT: |
642 case Deoptimizer::EAGER: | 645 case Deoptimizer::EAGER: |
643 case Deoptimizer::LAZY: { | 646 case Deoptimizer::LAZY: { |
644 Code* compiled_code = | 647 Code* compiled_code = |
645 isolate_->deoptimizer_data()->FindDeoptimizingCode(from_); | 648 isolate_->deoptimizer_data()->FindDeoptimizingCode(from_); |
646 return (compiled_code == NULL) | 649 return (compiled_code == NULL) |
647 ? static_cast<Code*>(isolate_->FindCodeObject(from_)) | 650 ? static_cast<Code*>(isolate_->FindCodeObject(from_)) |
648 : compiled_code; | 651 : compiled_code; |
649 } | 652 } |
650 case Deoptimizer::OSR: { | |
651 // The function has already been optimized and we're transitioning | |
652 // from the unoptimized shared version to the optimized one in the | |
653 // function. The return address (from_) points to unoptimized code. | |
654 Code* compiled_code = function->code(); | |
655 ASSERT(compiled_code->kind() == Code::OPTIMIZED_FUNCTION); | |
656 ASSERT(!compiled_code->contains(from_)); | |
657 return compiled_code; | |
658 } | |
659 case Deoptimizer::DEBUGGER: | 653 case Deoptimizer::DEBUGGER: |
660 ASSERT(optimized_code->contains(from_)); | 654 ASSERT(optimized_code->contains(from_)); |
661 return optimized_code; | 655 return optimized_code; |
662 } | 656 } |
663 UNREACHABLE(); | 657 UNREACHABLE(); |
664 return NULL; | 658 return NULL; |
665 } | 659 } |
666 | 660 |
667 | 661 |
668 void Deoptimizer::PrintFunctionName() { | 662 void Deoptimizer::PrintFunctionName() { |
(...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
765 length++; | 759 length++; |
766 node = node->next(); | 760 node = node->next(); |
767 } | 761 } |
768 return length; | 762 return length; |
769 } | 763 } |
770 | 764 |
771 | 765 |
772 // We rely on this function not causing a GC. It is called from generated code | 766 // We rely on this function not causing a GC. It is called from generated code |
773 // without having a real stack frame in place. | 767 // without having a real stack frame in place. |
774 void Deoptimizer::DoComputeOutputFrames() { | 768 void Deoptimizer::DoComputeOutputFrames() { |
775 if (bailout_type_ == OSR) { | |
776 DoComputeOsrOutputFrame(); | |
777 return; | |
778 } | |
779 | |
780 // Print some helpful diagnostic information. | 769 // Print some helpful diagnostic information. |
781 int64_t start = OS::Ticks(); | 770 int64_t start = OS::Ticks(); |
782 if (FLAG_log_timer_events && | 771 if (FLAG_log_timer_events && |
783 compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) { | 772 compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) { |
784 LOG(isolate(), CodeDeoptEvent(compiled_code_)); | 773 LOG(isolate(), CodeDeoptEvent(compiled_code_)); |
785 } | 774 } |
786 if (trace_) { | 775 if (trace_) { |
787 PrintF("[deoptimizing (DEOPT %s): begin 0x%08" V8PRIxPTR " ", | 776 PrintF("[deoptimizing (DEOPT %s): begin 0x%08" V8PRIxPTR " ", |
788 MessageFor(bailout_type_), | 777 MessageFor(bailout_type_), |
789 reinterpret_cast<intptr_t>(function_)); | 778 reinterpret_cast<intptr_t>(function_)); |
(...skipping 1440 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2230 // arguments object after the deoptimized frame is built. | 2219 // arguments object after the deoptimized frame is built. |
2231 for (int i = 0; i < length; i++) { | 2220 for (int i = 0; i < length; i++) { |
2232 DoTranslateObject(iterator, Translation::ARGUMENTS_OBJECT, i); | 2221 DoTranslateObject(iterator, Translation::ARGUMENTS_OBJECT, i); |
2233 } | 2222 } |
2234 return; | 2223 return; |
2235 } | 2224 } |
2236 } | 2225 } |
2237 } | 2226 } |
2238 | 2227 |
2239 | 2228 |
2240 bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator, | |
2241 int* input_offset) { | |
2242 disasm::NameConverter converter; | |
2243 FrameDescription* output = output_[0]; | |
2244 | |
2245 // The input values are all part of the unoptimized frame so they | |
2246 // are all tagged pointers. | |
2247 uintptr_t input_value = input_->GetFrameSlot(*input_offset); | |
2248 Object* input_object = reinterpret_cast<Object*>(input_value); | |
2249 | |
2250 Translation::Opcode opcode = | |
2251 static_cast<Translation::Opcode>(iterator->Next()); | |
2252 | |
2253 switch (opcode) { | |
2254 case Translation::BEGIN: | |
2255 case Translation::JS_FRAME: | |
2256 case Translation::ARGUMENTS_ADAPTOR_FRAME: | |
2257 case Translation::CONSTRUCT_STUB_FRAME: | |
2258 case Translation::GETTER_STUB_FRAME: | |
2259 case Translation::SETTER_STUB_FRAME: | |
2260 case Translation::COMPILED_STUB_FRAME: | |
2261 UNREACHABLE(); // Malformed input. | |
2262 return false; | |
2263 | |
2264 case Translation::REGISTER: { | |
2265 int output_reg = iterator->Next(); | |
2266 if (FLAG_trace_osr) { | |
2267 PrintF(" %s <- 0x%08" V8PRIxPTR " ; [sp + %d]\n", | |
2268 converter.NameOfCPURegister(output_reg), | |
2269 input_value, | |
2270 *input_offset); | |
2271 } | |
2272 output->SetRegister(output_reg, input_value); | |
2273 break; | |
2274 } | |
2275 | |
2276 case Translation::INT32_REGISTER: { | |
2277 int32_t int32_value = 0; | |
2278 if (!input_object->ToInt32(&int32_value)) return false; | |
2279 | |
2280 int output_reg = iterator->Next(); | |
2281 if (FLAG_trace_osr) { | |
2282 PrintF(" %s <- %d (int32) ; [sp + %d]\n", | |
2283 converter.NameOfCPURegister(output_reg), | |
2284 int32_value, | |
2285 *input_offset); | |
2286 } | |
2287 output->SetRegister(output_reg, int32_value); | |
2288 break; | |
2289 } | |
2290 | |
2291 case Translation::UINT32_REGISTER: { | |
2292 uint32_t uint32_value = 0; | |
2293 if (!input_object->ToUint32(&uint32_value)) return false; | |
2294 | |
2295 int output_reg = iterator->Next(); | |
2296 if (FLAG_trace_osr) { | |
2297 PrintF(" %s <- %u (uint32) ; [sp + %d]\n", | |
2298 converter.NameOfCPURegister(output_reg), | |
2299 uint32_value, | |
2300 *input_offset); | |
2301 } | |
2302 output->SetRegister(output_reg, static_cast<int32_t>(uint32_value)); | |
2303 } | |
2304 | |
2305 | |
2306 case Translation::DOUBLE_REGISTER: { | |
2307 // Abort OSR if we don't have a number. | |
2308 if (!input_object->IsNumber()) return false; | |
2309 | |
2310 int output_reg = iterator->Next(); | |
2311 double double_value = input_object->Number(); | |
2312 if (FLAG_trace_osr) { | |
2313 PrintF(" %s <- %g (double) ; [sp + %d]\n", | |
2314 DoubleRegister::AllocationIndexToString(output_reg), | |
2315 double_value, | |
2316 *input_offset); | |
2317 } | |
2318 output->SetDoubleRegister(output_reg, double_value); | |
2319 break; | |
2320 } | |
2321 | |
2322 case Translation::STACK_SLOT: { | |
2323 int output_index = iterator->Next(); | |
2324 unsigned output_offset = | |
2325 output->GetOffsetFromSlotIndex(output_index); | |
2326 if (FLAG_trace_osr) { | |
2327 PrintF(" [sp + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ", | |
2328 output_offset, | |
2329 input_value, | |
2330 *input_offset); | |
2331 reinterpret_cast<Object*>(input_value)->ShortPrint(); | |
2332 PrintF("\n"); | |
2333 } | |
2334 output->SetFrameSlot(output_offset, input_value); | |
2335 break; | |
2336 } | |
2337 | |
2338 case Translation::INT32_STACK_SLOT: { | |
2339 int32_t int32_value = 0; | |
2340 if (!input_object->ToInt32(&int32_value)) return false; | |
2341 | |
2342 int output_index = iterator->Next(); | |
2343 unsigned output_offset = | |
2344 output->GetOffsetFromSlotIndex(output_index); | |
2345 if (FLAG_trace_osr) { | |
2346 PrintF(" [sp + %d] <- %d (int32) ; [sp + %d]\n", | |
2347 output_offset, | |
2348 int32_value, | |
2349 *input_offset); | |
2350 } | |
2351 output->SetFrameSlot(output_offset, int32_value); | |
2352 break; | |
2353 } | |
2354 | |
2355 case Translation::UINT32_STACK_SLOT: { | |
2356 uint32_t uint32_value = 0; | |
2357 if (!input_object->ToUint32(&uint32_value)) return false; | |
2358 | |
2359 int output_index = iterator->Next(); | |
2360 unsigned output_offset = | |
2361 output->GetOffsetFromSlotIndex(output_index); | |
2362 if (FLAG_trace_osr) { | |
2363 PrintF(" [sp + %d] <- %u (uint32) ; [sp + %d]\n", | |
2364 output_offset, | |
2365 uint32_value, | |
2366 *input_offset); | |
2367 } | |
2368 output->SetFrameSlot(output_offset, static_cast<int32_t>(uint32_value)); | |
2369 break; | |
2370 } | |
2371 | |
2372 case Translation::DOUBLE_STACK_SLOT: { | |
2373 static const int kLowerOffset = 0 * kPointerSize; | |
2374 static const int kUpperOffset = 1 * kPointerSize; | |
2375 | |
2376 // Abort OSR if we don't have a number. | |
2377 if (!input_object->IsNumber()) return false; | |
2378 | |
2379 int output_index = iterator->Next(); | |
2380 unsigned output_offset = | |
2381 output->GetOffsetFromSlotIndex(output_index); | |
2382 double double_value = input_object->Number(); | |
2383 uint64_t int_value = BitCast<uint64_t, double>(double_value); | |
2384 int32_t lower = static_cast<int32_t>(int_value); | |
2385 int32_t upper = static_cast<int32_t>(int_value >> kBitsPerInt); | |
2386 if (FLAG_trace_osr) { | |
2387 PrintF(" [sp + %d] <- 0x%08x (upper bits of %g) ; [sp + %d]\n", | |
2388 output_offset + kUpperOffset, | |
2389 upper, | |
2390 double_value, | |
2391 *input_offset); | |
2392 PrintF(" [sp + %d] <- 0x%08x (lower bits of %g) ; [sp + %d]\n", | |
2393 output_offset + kLowerOffset, | |
2394 lower, | |
2395 double_value, | |
2396 *input_offset); | |
2397 } | |
2398 output->SetFrameSlot(output_offset + kLowerOffset, lower); | |
2399 output->SetFrameSlot(output_offset + kUpperOffset, upper); | |
2400 break; | |
2401 } | |
2402 | |
2403 case Translation::LITERAL: { | |
2404 // Just ignore non-materialized literals. | |
2405 iterator->Next(); | |
2406 break; | |
2407 } | |
2408 | |
2409 case Translation::ARGUMENTS_OBJECT: { | |
2410 // Optimized code assumes that the argument object has not been | |
2411 // materialized and so bypasses it when doing arguments access. | |
2412 // We should have bailed out before starting the frame | |
2413 // translation. | |
2414 UNREACHABLE(); | |
2415 return false; | |
2416 } | |
2417 } | |
2418 | |
2419 *input_offset -= kPointerSize; | |
2420 return true; | |
2421 } | |
2422 | |
2423 | |
2424 void Deoptimizer::PatchInterruptCode(Code* unoptimized_code, | 2229 void Deoptimizer::PatchInterruptCode(Code* unoptimized_code, |
2425 Code* interrupt_code, | 2230 Code* interrupt_code, |
2426 Code* replacement_code) { | 2231 Code* replacement_code) { |
2427 // Iterate over the back edge table and patch every interrupt | 2232 // Iterate over the back edge table and patch every interrupt |
2428 // call to an unconditional call to the replacement code. | 2233 // call to an unconditional call to the replacement code. |
2429 ASSERT(unoptimized_code->kind() == Code::FUNCTION); | 2234 ASSERT(unoptimized_code->kind() == Code::FUNCTION); |
2430 int loop_nesting_level = unoptimized_code->allow_osr_at_loop_nesting_level(); | 2235 int loop_nesting_level = unoptimized_code->allow_osr_at_loop_nesting_level(); |
2431 Address back_edge_cursor = unoptimized_code->instruction_start() + | 2236 Address back_edge_cursor = unoptimized_code->instruction_start() + |
2432 unoptimized_code->back_edge_table_offset(); | 2237 unoptimized_code->back_edge_table_offset(); |
2433 uint32_t table_length = Memory::uint32_at(back_edge_cursor); | 2238 uint32_t table_length = Memory::uint32_at(back_edge_cursor); |
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2512 } | 2317 } |
2513 } | 2318 } |
2514 #endif // DEBUG | 2319 #endif // DEBUG |
2515 | 2320 |
2516 | 2321 |
2517 unsigned Deoptimizer::ComputeInputFrameSize() const { | 2322 unsigned Deoptimizer::ComputeInputFrameSize() const { |
2518 unsigned fixed_size = ComputeFixedSize(function_); | 2323 unsigned fixed_size = ComputeFixedSize(function_); |
2519 // The fp-to-sp delta already takes the context and the function | 2324 // The fp-to-sp delta already takes the context and the function |
2520 // into account so we have to avoid double counting them (-2). | 2325 // into account so we have to avoid double counting them (-2). |
2521 unsigned result = fixed_size + fp_to_sp_delta_ - (2 * kPointerSize); | 2326 unsigned result = fixed_size + fp_to_sp_delta_ - (2 * kPointerSize); |
2327 if (FLAG_trace_deopt) { | |
2328 PrintF("Deopt input frame size=%d (fixed_size=%d + fp_sp_delta=%d - %d)\n", | |
Michael Starzinger
2013/07/31 14:55:50
nit: Can we remove this left-over debug code again
| |
2329 result, fixed_size, fp_to_sp_delta_, (2 * kPointerSize)); | |
2330 } | |
2522 #ifdef DEBUG | 2331 #ifdef DEBUG |
2523 if (bailout_type_ == OSR) { | 2332 if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) { |
2524 // TODO(kasperl): It would be nice if we could verify that the | |
2525 // size matches with the stack height we can compute based on the | |
2526 // environment at the OSR entry. The code for that his built into | |
2527 // the DoComputeOsrOutputFrame function for now. | |
2528 } else if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) { | |
2529 unsigned stack_slots = compiled_code_->stack_slots(); | 2333 unsigned stack_slots = compiled_code_->stack_slots(); |
2530 unsigned outgoing_size = ComputeOutgoingArgumentSize(); | 2334 unsigned outgoing_size = ComputeOutgoingArgumentSize(); |
2531 ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size); | 2335 unsigned check = fixed_size + (stack_slots * kPointerSize) + outgoing_size; |
2336 if (FLAG_trace_deopt) { | |
2337 PrintF("Deopt input frame size=%d (fixed_size=%d +" | |
Michael Starzinger
2013/07/31 14:55:50
Likewise.
| |
2338 " stack_slots=%d * %d + %d)\n", | |
2339 check, fixed_size, stack_slots, kPointerSize, outgoing_size); | |
2340 } | |
2341 ASSERT(result == check); | |
2532 } | 2342 } |
2533 #endif | 2343 #endif |
2534 return result; | 2344 return result; |
2535 } | 2345 } |
2536 | 2346 |
2537 | 2347 |
2538 unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const { | 2348 unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const { |
2539 // The fixed part of the frame consists of the return address, frame | 2349 // The fixed part of the frame consists of the return address, frame |
2540 // pointer, function, context, and all the incoming arguments. | 2350 // pointer, function, context, and all the incoming arguments. |
2541 return ComputeIncomingArgumentSize(function) + | 2351 return ComputeIncomingArgumentSize(function) + |
(...skipping 601 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3143 | 2953 |
3144 void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) { | 2954 void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) { |
3145 v->VisitPointer(BitCast<Object**>(&function_)); | 2955 v->VisitPointer(BitCast<Object**>(&function_)); |
3146 v->VisitPointers(parameters_, parameters_ + parameters_count_); | 2956 v->VisitPointers(parameters_, parameters_ + parameters_count_); |
3147 v->VisitPointers(expression_stack_, expression_stack_ + expression_count_); | 2957 v->VisitPointers(expression_stack_, expression_stack_ + expression_count_); |
3148 } | 2958 } |
3149 | 2959 |
3150 #endif // ENABLE_DEBUGGER_SUPPORT | 2960 #endif // ENABLE_DEBUGGER_SUPPORT |
3151 | 2961 |
3152 } } // namespace v8::internal | 2962 } } // namespace v8::internal |
OLD | NEW |