Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 367 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 378 | 378 |
| 379 bool LCodeGen::GenerateJumpTable() { | 379 bool LCodeGen::GenerateJumpTable() { |
| 380 Label needs_frame_not_call; | 380 Label needs_frame_not_call; |
| 381 Label needs_frame_is_call; | 381 Label needs_frame_is_call; |
| 382 if (jump_table_.length() > 0) { | 382 if (jump_table_.length() > 0) { |
| 383 Comment(";;; -------------------- Jump table --------------------"); | 383 Comment(";;; -------------------- Jump table --------------------"); |
| 384 } | 384 } |
| 385 for (int i = 0; i < jump_table_.length(); i++) { | 385 for (int i = 0; i < jump_table_.length(); i++) { |
| 386 __ bind(&jump_table_[i].label); | 386 __ bind(&jump_table_[i].label); |
| 387 Address entry = jump_table_[i].address; | 387 Address entry = jump_table_[i].address; |
| 388 bool is_lazy_deopt = jump_table_[i].is_lazy_deopt; | 388 Deoptimizer::BailoutType type = jump_table_[i].bailout_type; |
| 389 Deoptimizer::BailoutType type = | |
| 390 is_lazy_deopt ? Deoptimizer::LAZY : Deoptimizer::EAGER; | |
| 391 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); | 389 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); |
| 392 if (id == Deoptimizer::kNotDeoptimizationEntry) { | 390 if (id == Deoptimizer::kNotDeoptimizationEntry) { |
| 393 Comment(";;; jump table entry %d.", i); | 391 Comment(";;; jump table entry %d.", i); |
| 394 } else { | 392 } else { |
| 395 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); | 393 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); |
| 396 } | 394 } |
| 397 if (jump_table_[i].needs_frame) { | 395 if (jump_table_[i].needs_frame) { |
| 398 __ push(Immediate(ExternalReference::ForDeoptEntry(entry))); | 396 __ push(Immediate(ExternalReference::ForDeoptEntry(entry))); |
| 399 if (is_lazy_deopt) { | 397 if (type == Deoptimizer::LAZY) { |
| 400 if (needs_frame_is_call.is_bound()) { | 398 if (needs_frame_is_call.is_bound()) { |
| 401 __ jmp(&needs_frame_is_call); | 399 __ jmp(&needs_frame_is_call); |
| 402 } else { | 400 } else { |
| 403 __ bind(&needs_frame_is_call); | 401 __ bind(&needs_frame_is_call); |
| 404 __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset)); | 402 __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset)); |
| 405 // This variant of deopt can only be used with stubs. Since we don't | 403 // This variant of deopt can only be used with stubs. Since we don't |
| 406 // have a function pointer to install in the stack frame that we're | 404 // have a function pointer to install in the stack frame that we're |
| 407 // building, install a special marker there instead. | 405 // building, install a special marker there instead. |
| 408 ASSERT(info()->IsStub()); | 406 ASSERT(info()->IsStub()); |
| 409 __ push(Immediate(Smi::FromInt(StackFrame::STUB))); | 407 __ push(Immediate(Smi::FromInt(StackFrame::STUB))); |
| (...skipping 24 matching lines...) Expand all Loading... | |
| 434 __ push(Immediate(Smi::FromInt(StackFrame::STUB))); | 432 __ push(Immediate(Smi::FromInt(StackFrame::STUB))); |
| 435 // Push the continuation which was stashed were the ebp should | 433 // Push the continuation which was stashed were the ebp should |
| 436 // be. Replace it with the saved ebp. | 434 // be. Replace it with the saved ebp. |
| 437 __ push(MemOperand(esp, 2 * kPointerSize)); | 435 __ push(MemOperand(esp, 2 * kPointerSize)); |
| 438 __ mov(MemOperand(esp, 3 * kPointerSize), ebp); | 436 __ mov(MemOperand(esp, 3 * kPointerSize), ebp); |
| 439 __ lea(ebp, MemOperand(esp, 3 * kPointerSize)); | 437 __ lea(ebp, MemOperand(esp, 3 * kPointerSize)); |
| 440 __ ret(0); // Call the continuation without clobbering registers. | 438 __ ret(0); // Call the continuation without clobbering registers. |
| 441 } | 439 } |
| 442 } | 440 } |
| 443 } else { | 441 } else { |
| 444 if (is_lazy_deopt) { | 442 if (type == Deoptimizer::LAZY) { |
| 445 __ call(entry, RelocInfo::RUNTIME_ENTRY); | 443 __ call(entry, RelocInfo::RUNTIME_ENTRY); |
| 446 } else { | 444 } else { |
| 447 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); | 445 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); |
| 448 } | 446 } |
| 449 } | 447 } |
| 450 } | 448 } |
| 451 return !is_aborted(); | 449 return !is_aborted(); |
| 452 } | 450 } |
| 453 | 451 |
| 454 | 452 |
| (...skipping 431 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 886 int deoptimization_index = deoptimizations_.length(); | 884 int deoptimization_index = deoptimizations_.length(); |
| 887 int pc_offset = masm()->pc_offset(); | 885 int pc_offset = masm()->pc_offset(); |
| 888 environment->Register(deoptimization_index, | 886 environment->Register(deoptimization_index, |
| 889 translation.index(), | 887 translation.index(), |
| 890 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); | 888 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); |
| 891 deoptimizations_.Add(environment, zone()); | 889 deoptimizations_.Add(environment, zone()); |
| 892 } | 890 } |
| 893 } | 891 } |
| 894 | 892 |
| 895 | 893 |
| 896 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { | 894 void LCodeGen::DeoptimizeIf(Condition cc, |
| 895 LEnvironment* environment, | |
| 896 Deoptimizer::BailoutType bailout_type) { | |
| 897 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 897 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
| 898 ASSERT(environment->HasBeenRegistered()); | 898 ASSERT(environment->HasBeenRegistered()); |
| 899 // It's an error to deoptimize with the x87 fp stack in use. | 899 // It's an error to deoptimize with the x87 fp stack in use. |
| 900 ASSERT(x87_stack_depth_ == 0); | 900 ASSERT(x87_stack_depth_ == 0); |
| 901 int id = environment->deoptimization_index(); | 901 int id = environment->deoptimization_index(); |
| 902 ASSERT(info()->IsOptimizing() || info()->IsStub()); | 902 ASSERT(info()->IsOptimizing() || info()->IsStub()); |
| 903 Deoptimizer::BailoutType bailout_type = info()->IsStub() | |
| 904 ? Deoptimizer::LAZY | |
| 905 : Deoptimizer::EAGER; | |
| 906 Address entry = | 903 Address entry = |
| 907 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); | 904 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); |
| 908 if (entry == NULL) { | 905 if (entry == NULL) { |
| 909 Abort("bailout was not prepared"); | 906 Abort("bailout was not prepared"); |
| 910 return; | 907 return; |
| 911 } | 908 } |
| 912 | 909 |
| 913 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { | 910 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { |
| 914 Handle<SharedFunctionInfo> shared(info()->shared_info()); | 911 Handle<SharedFunctionInfo> shared(info()->shared_info()); |
| 915 Label no_deopt; | 912 Label no_deopt; |
| (...skipping 25 matching lines...) Expand all Loading... | |
| 941 if (FLAG_trap_on_deopt) { | 938 if (FLAG_trap_on_deopt) { |
| 942 Label done; | 939 Label done; |
| 943 if (cc != no_condition) { | 940 if (cc != no_condition) { |
| 944 __ j(NegateCondition(cc), &done, Label::kNear); | 941 __ j(NegateCondition(cc), &done, Label::kNear); |
| 945 } | 942 } |
| 946 __ int3(); | 943 __ int3(); |
| 947 __ bind(&done); | 944 __ bind(&done); |
| 948 } | 945 } |
| 949 | 946 |
| 950 ASSERT(info()->IsStub() || frame_is_built_); | 947 ASSERT(info()->IsStub() || frame_is_built_); |
| 951 bool needs_lazy_deopt = info()->IsStub(); | |
| 952 if (cc == no_condition && frame_is_built_) { | 948 if (cc == no_condition && frame_is_built_) { |
| 953 if (needs_lazy_deopt) { | 949 if (bailout_type == Deoptimizer::LAZY) { |
| 954 __ call(entry, RelocInfo::RUNTIME_ENTRY); | 950 __ call(entry, RelocInfo::RUNTIME_ENTRY); |
| 955 } else { | 951 } else { |
| 956 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); | 952 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); |
| 957 } | 953 } |
| 958 } else { | 954 } else { |
| 959 // We often have several deopts to the same entry, reuse the last | 955 // We often have several deopts to the same entry, reuse the last |
| 960 // jump entry if this is the case. | 956 // jump entry if this is the case. |
| 961 if (jump_table_.is_empty() || | 957 if (jump_table_.is_empty() || |
| 962 jump_table_.last().address != entry || | 958 jump_table_.last().address != entry || |
| 963 jump_table_.last().needs_frame != !frame_is_built_ || | 959 jump_table_.last().needs_frame != !frame_is_built_ || |
| 964 jump_table_.last().is_lazy_deopt != needs_lazy_deopt) { | 960 jump_table_.last().bailout_type != bailout_type) { |
| 965 JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt); | 961 Deoptimizer::JumpTableEntry table_entry(entry, |
| 962 bailout_type, | |
| 963 !frame_is_built_); | |
| 966 jump_table_.Add(table_entry, zone()); | 964 jump_table_.Add(table_entry, zone()); |
| 967 } | 965 } |
| 968 if (cc == no_condition) { | 966 if (cc == no_condition) { |
| 969 __ jmp(&jump_table_.last().label); | 967 __ jmp(&jump_table_.last().label); |
| 970 } else { | 968 } else { |
| 971 __ j(cc, &jump_table_.last().label); | 969 __ j(cc, &jump_table_.last().label); |
| 972 } | 970 } |
| 973 } | 971 } |
| 974 } | 972 } |
| 975 | 973 |
| 976 | 974 |
| 975 void LCodeGen::DeoptimizeIf(Condition cc, | |
| 976 LEnvironment* environment) { | |
| 977 Deoptimizer::BailoutType bailout_type = info()->IsStub() | |
| 978 ? Deoptimizer::LAZY | |
| 979 : Deoptimizer::EAGER; | |
| 980 DeoptimizeIf(cc, environment, bailout_type); | |
| 981 } | |
| 982 | |
| 983 | |
| 984 void LCodeGen::SoftDeoptimizeIf(Condition cc, | |
|
Jakob Kummerow
2013/05/14 11:01:57
Soft deopts are always unconditional, so you could
danno
2013/05/14 11:29:09
Done.
| |
| 985 LEnvironment* environment) { | |
| 986 ASSERT(!info()->IsStub()); | |
| 987 DeoptimizeIf(cc, environment, Deoptimizer::SOFT); | |
| 988 } | |
| 989 | |
| 990 | |
| 977 void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) { | 991 void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) { |
| 978 ZoneList<Handle<Map> > maps(1, zone()); | 992 ZoneList<Handle<Map> > maps(1, zone()); |
| 979 int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); | 993 int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); |
| 980 for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) { | 994 for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) { |
| 981 RelocInfo::Mode mode = it.rinfo()->rmode(); | 995 RelocInfo::Mode mode = it.rinfo()->rmode(); |
| 982 if (mode == RelocInfo::EMBEDDED_OBJECT && | 996 if (mode == RelocInfo::EMBEDDED_OBJECT && |
| 983 it.rinfo()->target_object()->IsMap()) { | 997 it.rinfo()->target_object()->IsMap()) { |
| 984 Handle<Map> map(Map::cast(it.rinfo()->target_object())); | 998 Handle<Map> map(Map::cast(it.rinfo()->target_object())); |
| 985 if (map->CanTransition()) { | 999 if (map->CanTransition()) { |
| 986 maps.Add(map, zone()); | 1000 maps.Add(map, zone()); |
| (...skipping 5322 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 6309 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { | 6323 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { |
| 6310 EnsureSpaceForLazyDeopt(); | 6324 EnsureSpaceForLazyDeopt(); |
| 6311 ASSERT(instr->HasEnvironment()); | 6325 ASSERT(instr->HasEnvironment()); |
| 6312 LEnvironment* env = instr->environment(); | 6326 LEnvironment* env = instr->environment(); |
| 6313 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); | 6327 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); |
| 6314 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); | 6328 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); |
| 6315 } | 6329 } |
| 6316 | 6330 |
| 6317 | 6331 |
| 6318 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { | 6332 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { |
| 6319 DeoptimizeIf(no_condition, instr->environment()); | 6333 if (instr->hydrogen_value()->IsSoftDeoptimize()) { |
| 6334 SoftDeoptimizeIf(no_condition, instr->environment()); | |
| 6335 } else { | |
| 6336 DeoptimizeIf(no_condition, instr->environment()); | |
| 6337 } | |
| 6320 } | 6338 } |
| 6321 | 6339 |
| 6322 | 6340 |
| 6323 void LCodeGen::DoDummyUse(LDummyUse* instr) { | 6341 void LCodeGen::DoDummyUse(LDummyUse* instr) { |
| 6324 // Nothing to see here, move on! | 6342 // Nothing to see here, move on! |
| 6325 } | 6343 } |
| 6326 | 6344 |
| 6327 | 6345 |
| 6328 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) { | 6346 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) { |
| 6329 LOperand* obj = instr->object(); | 6347 LOperand* obj = instr->object(); |
| (...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 6522 FixedArray::kHeaderSize - kPointerSize)); | 6540 FixedArray::kHeaderSize - kPointerSize)); |
| 6523 __ bind(&done); | 6541 __ bind(&done); |
| 6524 } | 6542 } |
| 6525 | 6543 |
| 6526 | 6544 |
| 6527 #undef __ | 6545 #undef __ |
| 6528 | 6546 |
| 6529 } } // namespace v8::internal | 6547 } } // namespace v8::internal |
| 6530 | 6548 |
| 6531 #endif // V8_TARGET_ARCH_IA32 | 6549 #endif // V8_TARGET_ARCH_IA32 |
| OLD | NEW |