| OLD | NEW |
| 1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 176 RegisterAllocator register_allocator(this); | 176 RegisterAllocator register_allocator(this); |
| 177 allocator_ = ®ister_allocator; | 177 allocator_ = ®ister_allocator; |
| 178 ASSERT(frame_ == NULL); | 178 ASSERT(frame_ == NULL); |
| 179 frame_ = new VirtualFrame(); | 179 frame_ = new VirtualFrame(); |
| 180 set_in_spilled_code(false); | 180 set_in_spilled_code(false); |
| 181 | 181 |
| 182 // Adjust for function-level loop nesting. | 182 // Adjust for function-level loop nesting. |
| 183 ASSERT_EQ(0, loop_nesting_); | 183 ASSERT_EQ(0, loop_nesting_); |
| 184 loop_nesting_ = info->is_in_loop() ? 1 : 0; | 184 loop_nesting_ = info->is_in_loop() ? 1 : 0; |
| 185 | 185 |
| 186 Isolate::Current()->set_jump_target_compiling_deferred_code(false); | 186 masm()->isolate()->set_jump_target_compiling_deferred_code(false); |
| 187 | 187 |
| 188 { | 188 { |
| 189 CodeGenState state(this); | 189 CodeGenState state(this); |
| 190 | 190 |
| 191 // Entry: | 191 // Entry: |
| 192 // Stack: receiver, arguments, return address. | 192 // Stack: receiver, arguments, return address. |
| 193 // ebp: caller's frame pointer | 193 // ebp: caller's frame pointer |
| 194 // esp: stack pointer | 194 // esp: stack pointer |
| 195 // edi: called JS function | 195 // edi: called JS function |
| 196 // esi: callee's context | 196 // esi: callee's context |
| (...skipping 352 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 549 void CodeGenerator::ConvertInt32ResultToNumber(Result* value) { | 549 void CodeGenerator::ConvertInt32ResultToNumber(Result* value) { |
| 550 ASSERT(value->is_untagged_int32()); | 550 ASSERT(value->is_untagged_int32()); |
| 551 if (value->is_register()) { | 551 if (value->is_register()) { |
| 552 Register val = value->reg(); | 552 Register val = value->reg(); |
| 553 JumpTarget done; | 553 JumpTarget done; |
| 554 __ add(val, Operand(val)); | 554 __ add(val, Operand(val)); |
| 555 done.Branch(no_overflow, value); | 555 done.Branch(no_overflow, value); |
| 556 __ sar(val, 1); | 556 __ sar(val, 1); |
| 557 // If there was an overflow, bits 30 and 31 of the original number disagree. | 557 // If there was an overflow, bits 30 and 31 of the original number disagree. |
| 558 __ xor_(val, 0x80000000u); | 558 __ xor_(val, 0x80000000u); |
| 559 if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) { | 559 if (masm()->isolate()->cpu_features()->IsSupported(SSE2)) { |
| 560 CpuFeatures::Scope fscope(SSE2); | 560 CpuFeatures::Scope fscope(SSE2); |
| 561 __ cvtsi2sd(xmm0, Operand(val)); | 561 __ cvtsi2sd(xmm0, Operand(val)); |
| 562 } else { | 562 } else { |
| 563 // Move val to ST[0] in the FPU | 563 // Move val to ST[0] in the FPU |
| 564 // Push and pop are safe with respect to the virtual frame because | 564 // Push and pop are safe with respect to the virtual frame because |
| 565 // all synced elements are below the actual stack pointer. | 565 // all synced elements are below the actual stack pointer. |
| 566 __ push(val); | 566 __ push(val); |
| 567 __ fild_s(Operand(esp, 0)); | 567 __ fild_s(Operand(esp, 0)); |
| 568 __ pop(val); | 568 __ pop(val); |
| 569 } | 569 } |
| 570 Result scratch = allocator_->Allocate(); | 570 Result scratch = allocator_->Allocate(); |
| 571 ASSERT(scratch.is_register()); | 571 ASSERT(scratch.is_register()); |
| 572 Label allocation_failed; | 572 Label allocation_failed; |
| 573 __ AllocateHeapNumber(val, scratch.reg(), | 573 __ AllocateHeapNumber(val, scratch.reg(), |
| 574 no_reg, &allocation_failed); | 574 no_reg, &allocation_failed); |
| 575 VirtualFrame* clone = new VirtualFrame(frame_); | 575 VirtualFrame* clone = new VirtualFrame(frame_); |
| 576 scratch.Unuse(); | 576 scratch.Unuse(); |
| 577 if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) { | 577 if (masm()->isolate()->cpu_features()->IsSupported(SSE2)) { |
| 578 CpuFeatures::Scope fscope(SSE2); | 578 CpuFeatures::Scope fscope(SSE2); |
| 579 __ movdbl(FieldOperand(val, HeapNumber::kValueOffset), xmm0); | 579 __ movdbl(FieldOperand(val, HeapNumber::kValueOffset), xmm0); |
| 580 } else { | 580 } else { |
| 581 __ fstp_d(FieldOperand(val, HeapNumber::kValueOffset)); | 581 __ fstp_d(FieldOperand(val, HeapNumber::kValueOffset)); |
| 582 } | 582 } |
| 583 done.Jump(value); | 583 done.Jump(value); |
| 584 | 584 |
| 585 // Establish the virtual frame, cloned from where AllocateHeapNumber | 585 // Establish the virtual frame, cloned from where AllocateHeapNumber |
| 586 // jumped to allocation_failed. | 586 // jumped to allocation_failed. |
| 587 RegisterFile empty_regs; | 587 RegisterFile empty_regs; |
| 588 SetFrame(clone, &empty_regs); | 588 SetFrame(clone, &empty_regs); |
| 589 __ bind(&allocation_failed); | 589 __ bind(&allocation_failed); |
| 590 if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) { | 590 if (!masm()->isolate()->cpu_features()->IsSupported(SSE2)) { |
| 591 // Pop the value from the floating point stack. | 591 // Pop the value from the floating point stack. |
| 592 __ fstp(0); | 592 __ fstp(0); |
| 593 } | 593 } |
| 594 unsafe_bailout_->Jump(); | 594 unsafe_bailout_->Jump(); |
| 595 | 595 |
| 596 done.Bind(value); | 596 done.Bind(value); |
| 597 } else { | 597 } else { |
| 598 ASSERT(value->is_constant()); | 598 ASSERT(value->is_constant()); |
| 599 } | 599 } |
| 600 value->set_untagged_int32(false); | 600 value->set_untagged_int32(false); |
| 601 value->set_type_info(TypeInfo::Integer32()); | 601 value->set_type_info(TypeInfo::Integer32()); |
| 602 } | 602 } |
| 603 | 603 |
| 604 | 604 |
| 605 void CodeGenerator::Load(Expression* expr) { | 605 void CodeGenerator::Load(Expression* expr) { |
| 606 #ifdef DEBUG | 606 #ifdef DEBUG |
| 607 int original_height = frame_->height(); | 607 int original_height = frame_->height(); |
| 608 #endif | 608 #endif |
| 609 ASSERT(!in_spilled_code()); | 609 ASSERT(!in_spilled_code()); |
| 610 | 610 |
| 611 // If the expression should be a side-effect-free 32-bit int computation, | 611 // If the expression should be a side-effect-free 32-bit int computation, |
| 612 // compile that SafeInt32 path, and a bailout path. | 612 // compile that SafeInt32 path, and a bailout path. |
| 613 if (!in_safe_int32_mode() && | 613 if (!in_safe_int32_mode() && |
| 614 safe_int32_mode_enabled() && | 614 safe_int32_mode_enabled() && |
| 615 expr->side_effect_free() && | 615 expr->side_effect_free() && |
| 616 expr->num_bit_ops() > 2 && | 616 expr->num_bit_ops() > 2 && |
| 617 Isolate::Current()->cpu_features()->IsSupported(SSE2)) { | 617 masm()->isolate()->cpu_features()->IsSupported(SSE2)) { |
| 618 BreakTarget unsafe_bailout; | 618 BreakTarget unsafe_bailout; |
| 619 JumpTarget done; | 619 JumpTarget done; |
| 620 unsafe_bailout.set_expected_height(frame_->height()); | 620 unsafe_bailout.set_expected_height(frame_->height()); |
| 621 LoadInSafeInt32Mode(expr, &unsafe_bailout); | 621 LoadInSafeInt32Mode(expr, &unsafe_bailout); |
| 622 done.Jump(); | 622 done.Jump(); |
| 623 | 623 |
| 624 if (unsafe_bailout.is_linked()) { | 624 if (unsafe_bailout.is_linked()) { |
| 625 unsafe_bailout.Bind(); | 625 unsafe_bailout.Bind(); |
| 626 LoadWithSafeInt32ModeDisabled(expr); | 626 LoadWithSafeInt32ModeDisabled(expr); |
| 627 } | 627 } |
| (...skipping 360 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 988 OverwriteMode mode_; | 988 OverwriteMode mode_; |
| 989 Label answer_out_of_range_; | 989 Label answer_out_of_range_; |
| 990 Label non_smi_input_; | 990 Label non_smi_input_; |
| 991 Label constant_rhs_; | 991 Label constant_rhs_; |
| 992 Smi* smi_value_; | 992 Smi* smi_value_; |
| 993 }; | 993 }; |
| 994 | 994 |
| 995 | 995 |
| 996 Label* DeferredInlineBinaryOperation::NonSmiInputLabel() { | 996 Label* DeferredInlineBinaryOperation::NonSmiInputLabel() { |
| 997 if (Token::IsBitOp(op_) && | 997 if (Token::IsBitOp(op_) && |
| 998 Isolate::Current()->cpu_features()->IsSupported(SSE2)) { | 998 masm()->isolate()->cpu_features()->IsSupported(SSE2)) { |
| 999 return &non_smi_input_; | 999 return &non_smi_input_; |
| 1000 } else { | 1000 } else { |
| 1001 return entry_label(); | 1001 return entry_label(); |
| 1002 } | 1002 } |
| 1003 } | 1003 } |
| 1004 | 1004 |
| 1005 | 1005 |
| 1006 void DeferredInlineBinaryOperation::JumpToAnswerOutOfRange(Condition cond) { | 1006 void DeferredInlineBinaryOperation::JumpToAnswerOutOfRange(Condition cond) { |
| 1007 __ j(cond, &answer_out_of_range_); | 1007 __ j(cond, &answer_out_of_range_); |
| 1008 } | 1008 } |
| 1009 | 1009 |
| 1010 | 1010 |
| 1011 void DeferredInlineBinaryOperation::JumpToConstantRhs(Condition cond, | 1011 void DeferredInlineBinaryOperation::JumpToConstantRhs(Condition cond, |
| 1012 Smi* smi_value) { | 1012 Smi* smi_value) { |
| 1013 smi_value_ = smi_value; | 1013 smi_value_ = smi_value; |
| 1014 __ j(cond, &constant_rhs_); | 1014 __ j(cond, &constant_rhs_); |
| 1015 } | 1015 } |
| 1016 | 1016 |
| 1017 | 1017 |
| 1018 void DeferredInlineBinaryOperation::Generate() { | 1018 void DeferredInlineBinaryOperation::Generate() { |
| 1019 // Registers are not saved implicitly for this stub, so we should not | 1019 // Registers are not saved implicitly for this stub, so we should not |
| 1020 // tread on the registers that were not passed to us. | 1020 // tread on the registers that were not passed to us. |
| 1021 if (Isolate::Current()->cpu_features()->IsSupported(SSE2) && | 1021 if (masm()->isolate()->cpu_features()->IsSupported(SSE2) && |
| 1022 ((op_ == Token::ADD) || | 1022 ((op_ == Token::ADD) || |
| 1023 (op_ == Token::SUB) || | 1023 (op_ == Token::SUB) || |
| 1024 (op_ == Token::MUL) || | 1024 (op_ == Token::MUL) || |
| 1025 (op_ == Token::DIV))) { | 1025 (op_ == Token::DIV))) { |
| 1026 CpuFeatures::Scope use_sse2(SSE2); | 1026 CpuFeatures::Scope use_sse2(SSE2); |
| 1027 Label call_runtime, after_alloc_failure; | 1027 Label call_runtime, after_alloc_failure; |
| 1028 Label left_smi, right_smi, load_right, do_op; | 1028 Label left_smi, right_smi, load_right, do_op; |
| 1029 if (!left_info_.IsSmi()) { | 1029 if (!left_info_.IsSmi()) { |
| 1030 __ test(left_, Immediate(kSmiTagMask)); | 1030 __ test(left_, Immediate(kSmiTagMask)); |
| 1031 __ j(zero, &left_smi); | 1031 __ j(zero, &left_smi); |
| (...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1147 } | 1147 } |
| 1148 | 1148 |
| 1149 __ bind(&non_smi_input_); | 1149 __ bind(&non_smi_input_); |
| 1150 | 1150 |
| 1151 if (rhs_is_constant) { | 1151 if (rhs_is_constant) { |
| 1152 __ bind(&constant_rhs_); | 1152 __ bind(&constant_rhs_); |
| 1153 // In this case the input is a heap object and it is in the dst_ register. | 1153 // In this case the input is a heap object and it is in the dst_ register. |
| 1154 // The left_ and right_ registers have not been initialized yet. | 1154 // The left_ and right_ registers have not been initialized yet. |
| 1155 __ mov(right_, Immediate(smi_value_)); | 1155 __ mov(right_, Immediate(smi_value_)); |
| 1156 __ mov(left_, Operand(dst_)); | 1156 __ mov(left_, Operand(dst_)); |
| 1157 if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) { | 1157 if (!masm()->isolate()->cpu_features()->IsSupported(SSE2)) { |
| 1158 __ jmp(entry_label()); | 1158 __ jmp(entry_label()); |
| 1159 return; | 1159 return; |
| 1160 } else { | 1160 } else { |
| 1161 CpuFeatures::Scope use_sse2(SSE2); | 1161 CpuFeatures::Scope use_sse2(SSE2); |
| 1162 __ JumpIfNotNumber(dst_, left_info_, entry_label()); | 1162 __ JumpIfNotNumber(dst_, left_info_, entry_label()); |
| 1163 __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label()); | 1163 __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label()); |
| 1164 __ SmiUntag(right_); | 1164 __ SmiUntag(right_); |
| 1165 } | 1165 } |
| 1166 } else { | 1166 } else { |
| 1167 // We know we have SSE2 here because otherwise the label is not linked (see | 1167 // We know we have SSE2 here because otherwise the label is not linked (see |
| (...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1260 // Put a heap number pointer in left_. | 1260 // Put a heap number pointer in left_. |
| 1261 __ bind(&answer_out_of_range_); | 1261 __ bind(&answer_out_of_range_); |
| 1262 SaveRegisters(); | 1262 SaveRegisters(); |
| 1263 if (mode_ == OVERWRITE_LEFT) { | 1263 if (mode_ == OVERWRITE_LEFT) { |
| 1264 __ test(left_, Immediate(kSmiTagMask)); | 1264 __ test(left_, Immediate(kSmiTagMask)); |
| 1265 __ j(not_zero, &allocation_ok); | 1265 __ j(not_zero, &allocation_ok); |
| 1266 } | 1266 } |
| 1267 // This trashes right_. | 1267 // This trashes right_. |
| 1268 __ AllocateHeapNumber(left_, right_, no_reg, &after_alloc_failure2); | 1268 __ AllocateHeapNumber(left_, right_, no_reg, &after_alloc_failure2); |
| 1269 __ bind(&allocation_ok); | 1269 __ bind(&allocation_ok); |
| 1270 if (Isolate::Current()->cpu_features()->IsSupported(SSE2) && | 1270 if (masm()->isolate()->cpu_features()->IsSupported(SSE2) && |
| 1271 op_ != Token::SHR) { | 1271 op_ != Token::SHR) { |
| 1272 CpuFeatures::Scope use_sse2(SSE2); | 1272 CpuFeatures::Scope use_sse2(SSE2); |
| 1273 ASSERT(Token::IsBitOp(op_)); | 1273 ASSERT(Token::IsBitOp(op_)); |
| 1274 // Signed conversion. | 1274 // Signed conversion. |
| 1275 __ cvtsi2sd(xmm0, Operand(dst_)); | 1275 __ cvtsi2sd(xmm0, Operand(dst_)); |
| 1276 __ movdbl(FieldOperand(left_, HeapNumber::kValueOffset), xmm0); | 1276 __ movdbl(FieldOperand(left_, HeapNumber::kValueOffset), xmm0); |
| 1277 } else { | 1277 } else { |
| 1278 if (op_ == Token::SHR) { | 1278 if (op_ == Token::SHR) { |
| 1279 __ push(Immediate(0)); // High word of unsigned value. | 1279 __ push(Immediate(0)); // High word of unsigned value. |
| 1280 __ push(dst_); | 1280 __ push(dst_); |
| (...skipping 1744 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3025 } else { | 3025 } else { |
| 3026 // Do the smi check, then the comparison. | 3026 // Do the smi check, then the comparison. |
| 3027 __ test(left_reg, Immediate(kSmiTagMask)); | 3027 __ test(left_reg, Immediate(kSmiTagMask)); |
| 3028 is_smi.Branch(zero, left_side, right_side); | 3028 is_smi.Branch(zero, left_side, right_side); |
| 3029 } | 3029 } |
| 3030 | 3030 |
| 3031 // Jump or fall through to here if we are comparing a non-smi to a | 3031 // Jump or fall through to here if we are comparing a non-smi to a |
| 3032 // constant smi. If the non-smi is a heap number and this is not | 3032 // constant smi. If the non-smi is a heap number and this is not |
| 3033 // a loop condition, inline the floating point code. | 3033 // a loop condition, inline the floating point code. |
| 3034 if (!is_loop_condition && | 3034 if (!is_loop_condition && |
| 3035 Isolate::Current()->cpu_features()->IsSupported(SSE2)) { | 3035 masm()->isolate()->cpu_features()->IsSupported(SSE2)) { |
| 3036 // Right side is a constant smi and left side has been checked | 3036 // Right side is a constant smi and left side has been checked |
| 3037 // not to be a smi. | 3037 // not to be a smi. |
| 3038 CpuFeatures::Scope use_sse2(SSE2); | 3038 CpuFeatures::Scope use_sse2(SSE2); |
| 3039 JumpTarget not_number; | 3039 JumpTarget not_number; |
| 3040 __ cmp(FieldOperand(left_reg, HeapObject::kMapOffset), | 3040 __ cmp(FieldOperand(left_reg, HeapObject::kMapOffset), |
| 3041 Immediate(FACTORY->heap_number_map())); | 3041 Immediate(FACTORY->heap_number_map())); |
| 3042 not_number.Branch(not_equal, left_side); | 3042 not_number.Branch(not_equal, left_side); |
| 3043 __ movdbl(xmm1, | 3043 __ movdbl(xmm1, |
| 3044 FieldOperand(left_reg, HeapNumber::kValueOffset)); | 3044 FieldOperand(left_reg, HeapNumber::kValueOffset)); |
| 3045 int value = Smi::cast(*right_val)->value(); | 3045 int value = Smi::cast(*right_val)->value(); |
| (...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3189 | 3189 |
| 3190 | 3190 |
| 3191 void CodeGenerator::GenerateInlineNumberComparison(Result* left_side, | 3191 void CodeGenerator::GenerateInlineNumberComparison(Result* left_side, |
| 3192 Result* right_side, | 3192 Result* right_side, |
| 3193 Condition cc, | 3193 Condition cc, |
| 3194 ControlDestination* dest) { | 3194 ControlDestination* dest) { |
| 3195 ASSERT(left_side->is_register()); | 3195 ASSERT(left_side->is_register()); |
| 3196 ASSERT(right_side->is_register()); | 3196 ASSERT(right_side->is_register()); |
| 3197 | 3197 |
| 3198 JumpTarget not_numbers; | 3198 JumpTarget not_numbers; |
| 3199 if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) { | 3199 if (masm()->isolate()->cpu_features()->IsSupported(SSE2)) { |
| 3200 CpuFeatures::Scope use_sse2(SSE2); | 3200 CpuFeatures::Scope use_sse2(SSE2); |
| 3201 | 3201 |
| 3202 // Load left and right operand into registers xmm0 and xmm1 and compare. | 3202 // Load left and right operand into registers xmm0 and xmm1 and compare. |
| 3203 LoadComparisonOperandSSE2(masm_, left_side, xmm0, left_side, right_side, | 3203 LoadComparisonOperandSSE2(masm_, left_side, xmm0, left_side, right_side, |
| 3204 ¬_numbers); | 3204 ¬_numbers); |
| 3205 LoadComparisonOperandSSE2(masm_, right_side, xmm1, left_side, right_side, | 3205 LoadComparisonOperandSSE2(masm_, right_side, xmm1, left_side, right_side, |
| 3206 ¬_numbers); | 3206 ¬_numbers); |
| 3207 __ ucomisd(xmm0, xmm1); | 3207 __ ucomisd(xmm0, xmm1); |
| 3208 } else { | 3208 } else { |
| 3209 Label check_right, compare; | 3209 Label check_right, compare; |
| (...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3339 __ j(below, &build_args); | 3339 __ j(below, &build_args); |
| 3340 | 3340 |
| 3341 // Check that applicand.apply is Function.prototype.apply. | 3341 // Check that applicand.apply is Function.prototype.apply. |
| 3342 __ mov(eax, Operand(esp, kPointerSize)); | 3342 __ mov(eax, Operand(esp, kPointerSize)); |
| 3343 __ test(eax, Immediate(kSmiTagMask)); | 3343 __ test(eax, Immediate(kSmiTagMask)); |
| 3344 __ j(zero, &build_args); | 3344 __ j(zero, &build_args); |
| 3345 __ CmpObjectType(eax, JS_FUNCTION_TYPE, ecx); | 3345 __ CmpObjectType(eax, JS_FUNCTION_TYPE, ecx); |
| 3346 __ j(not_equal, &build_args); | 3346 __ j(not_equal, &build_args); |
| 3347 __ mov(ecx, FieldOperand(eax, JSFunction::kCodeEntryOffset)); | 3347 __ mov(ecx, FieldOperand(eax, JSFunction::kCodeEntryOffset)); |
| 3348 __ sub(Operand(ecx), Immediate(Code::kHeaderSize - kHeapObjectTag)); | 3348 __ sub(Operand(ecx), Immediate(Code::kHeaderSize - kHeapObjectTag)); |
| 3349 Handle<Code> apply_code(Isolate::Current()->builtins()->builtin( | 3349 Handle<Code> apply_code(masm()->isolate()->builtins()->builtin( |
| 3350 Builtins::FunctionApply)); | 3350 Builtins::FunctionApply)); |
| 3351 __ cmp(Operand(ecx), Immediate(apply_code)); | 3351 __ cmp(Operand(ecx), Immediate(apply_code)); |
| 3352 __ j(not_equal, &build_args); | 3352 __ j(not_equal, &build_args); |
| 3353 | 3353 |
| 3354 // Check that applicand is a function. | 3354 // Check that applicand is a function. |
| 3355 __ mov(edi, Operand(esp, 2 * kPointerSize)); | 3355 __ mov(edi, Operand(esp, 2 * kPointerSize)); |
| 3356 __ test(edi, Immediate(kSmiTagMask)); | 3356 __ test(edi, Immediate(kSmiTagMask)); |
| 3357 __ j(zero, &build_args); | 3357 __ j(zero, &build_args); |
| 3358 __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); | 3358 __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); |
| 3359 __ j(not_equal, &build_args); | 3359 __ j(not_equal, &build_args); |
| (...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3466 | 3466 |
| 3467 void DeferredStackCheck::Generate() { | 3467 void DeferredStackCheck::Generate() { |
| 3468 StackCheckStub stub; | 3468 StackCheckStub stub; |
| 3469 __ CallStub(&stub); | 3469 __ CallStub(&stub); |
| 3470 } | 3470 } |
| 3471 | 3471 |
| 3472 | 3472 |
| 3473 void CodeGenerator::CheckStack() { | 3473 void CodeGenerator::CheckStack() { |
| 3474 DeferredStackCheck* deferred = new DeferredStackCheck; | 3474 DeferredStackCheck* deferred = new DeferredStackCheck; |
| 3475 ExternalReference stack_limit = | 3475 ExternalReference stack_limit = |
| 3476 ExternalReference::address_of_stack_limit(); | 3476 ExternalReference::address_of_stack_limit(masm()->isolate()); |
| 3477 __ cmp(esp, Operand::StaticVariable(stack_limit)); | 3477 __ cmp(esp, Operand::StaticVariable(stack_limit)); |
| 3478 deferred->Branch(below); | 3478 deferred->Branch(below); |
| 3479 deferred->BindExit(); | 3479 deferred->BindExit(); |
| 3480 } | 3480 } |
| 3481 | 3481 |
| 3482 | 3482 |
| 3483 void CodeGenerator::VisitAndSpill(Statement* statement) { | 3483 void CodeGenerator::VisitAndSpill(Statement* statement) { |
| 3484 ASSERT(in_spilled_code()); | 3484 ASSERT(in_spilled_code()); |
| 3485 set_in_spilled_code(false); | 3485 set_in_spilled_code(false); |
| 3486 Visit(statement); | 3486 Visit(statement); |
| (...skipping 1153 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4640 // After shadowing stops, the original targets are unshadowed and the | 4640 // After shadowing stops, the original targets are unshadowed and the |
| 4641 // ShadowTargets represent the formerly shadowing targets. | 4641 // ShadowTargets represent the formerly shadowing targets. |
| 4642 bool has_unlinks = false; | 4642 bool has_unlinks = false; |
| 4643 for (int i = 0; i < shadows.length(); i++) { | 4643 for (int i = 0; i < shadows.length(); i++) { |
| 4644 shadows[i]->StopShadowing(); | 4644 shadows[i]->StopShadowing(); |
| 4645 has_unlinks = has_unlinks || shadows[i]->is_linked(); | 4645 has_unlinks = has_unlinks || shadows[i]->is_linked(); |
| 4646 } | 4646 } |
| 4647 function_return_is_shadowed_ = function_return_was_shadowed; | 4647 function_return_is_shadowed_ = function_return_was_shadowed; |
| 4648 | 4648 |
| 4649 // Get an external reference to the handler address. | 4649 // Get an external reference to the handler address. |
| 4650 ExternalReference handler_address(Isolate::k_handler_address); | 4650 ExternalReference handler_address(Isolate::k_handler_address, |
| 4651 masm()->isolate()); |
| 4651 | 4652 |
| 4652 // Make sure that there's nothing left on the stack above the | 4653 // Make sure that there's nothing left on the stack above the |
| 4653 // handler structure. | 4654 // handler structure. |
| 4654 if (FLAG_debug_code) { | 4655 if (FLAG_debug_code) { |
| 4655 __ mov(eax, Operand::StaticVariable(handler_address)); | 4656 __ mov(eax, Operand::StaticVariable(handler_address)); |
| 4656 __ cmp(esp, Operand(eax)); | 4657 __ cmp(esp, Operand(eax)); |
| 4657 __ Assert(equal, "stack pointer should point to top handler"); | 4658 __ Assert(equal, "stack pointer should point to top handler"); |
| 4658 } | 4659 } |
| 4659 | 4660 |
| 4660 // If we can fall off the end of the try block, unlink from try chain. | 4661 // If we can fall off the end of the try block, unlink from try chain. |
| (...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4766 // After shadowing stops, the original targets are unshadowed and the | 4767 // After shadowing stops, the original targets are unshadowed and the |
| 4767 // ShadowTargets represent the formerly shadowing targets. | 4768 // ShadowTargets represent the formerly shadowing targets. |
| 4768 int nof_unlinks = 0; | 4769 int nof_unlinks = 0; |
| 4769 for (int i = 0; i < shadows.length(); i++) { | 4770 for (int i = 0; i < shadows.length(); i++) { |
| 4770 shadows[i]->StopShadowing(); | 4771 shadows[i]->StopShadowing(); |
| 4771 if (shadows[i]->is_linked()) nof_unlinks++; | 4772 if (shadows[i]->is_linked()) nof_unlinks++; |
| 4772 } | 4773 } |
| 4773 function_return_is_shadowed_ = function_return_was_shadowed; | 4774 function_return_is_shadowed_ = function_return_was_shadowed; |
| 4774 | 4775 |
| 4775 // Get an external reference to the handler address. | 4776 // Get an external reference to the handler address. |
| 4776 ExternalReference handler_address(Isolate::k_handler_address); | 4777 ExternalReference handler_address(Isolate::k_handler_address, |
| 4778 masm()->isolate()); |
| 4777 | 4779 |
| 4778 // If we can fall off the end of the try block, unlink from the try | 4780 // If we can fall off the end of the try block, unlink from the try |
| 4779 // chain and set the state on the frame to FALLING. | 4781 // chain and set the state on the frame to FALLING. |
| 4780 if (has_valid_frame()) { | 4782 if (has_valid_frame()) { |
| 4781 // The next handler address is on top of the frame. | 4783 // The next handler address is on top of the frame. |
| 4782 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); | 4784 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); |
| 4783 frame_->EmitPop(Operand::StaticVariable(handler_address)); | 4785 frame_->EmitPop(Operand::StaticVariable(handler_address)); |
| 4784 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1); | 4786 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1); |
| 4785 | 4787 |
| 4786 // Fake a top of stack value (unneeded when FALLING) and set the | 4788 // Fake a top of stack value (unneeded when FALLING) and set the |
| (...skipping 2652 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 7439 __ jmp(&heapnumber_allocated); | 7441 __ jmp(&heapnumber_allocated); |
| 7440 | 7442 |
| 7441 __ bind(&slow_allocate_heapnumber); | 7443 __ bind(&slow_allocate_heapnumber); |
| 7442 // Allocate a heap number. | 7444 // Allocate a heap number. |
| 7443 __ CallRuntime(Runtime::kNumberAlloc, 0); | 7445 __ CallRuntime(Runtime::kNumberAlloc, 0); |
| 7444 __ mov(edi, eax); | 7446 __ mov(edi, eax); |
| 7445 | 7447 |
| 7446 __ bind(&heapnumber_allocated); | 7448 __ bind(&heapnumber_allocated); |
| 7447 | 7449 |
| 7448 __ PrepareCallCFunction(0, ebx); | 7450 __ PrepareCallCFunction(0, ebx); |
| 7449 __ CallCFunction(ExternalReference::random_uint32_function(), 0); | 7451 __ CallCFunction(ExternalReference::random_uint32_function(masm()->isolate()), |
| 7452 0); |
| 7450 | 7453 |
| 7451 // Convert 32 random bits in eax to 0.(32 random bits) in a double | 7454 // Convert 32 random bits in eax to 0.(32 random bits) in a double |
| 7452 // by computing: | 7455 // by computing: |
| 7453 // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)). | 7456 // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)). |
| 7454 // This is implemented on both SSE2 and FPU. | 7457 // This is implemented on both SSE2 and FPU. |
| 7455 if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) { | 7458 if (masm()->isolate()->cpu_features()->IsSupported(SSE2)) { |
| 7456 CpuFeatures::Scope fscope(SSE2); | 7459 CpuFeatures::Scope fscope(SSE2); |
| 7457 __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single. | 7460 __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single. |
| 7458 __ movd(xmm1, Operand(ebx)); | 7461 __ movd(xmm1, Operand(ebx)); |
| 7459 __ movd(xmm0, Operand(eax)); | 7462 __ movd(xmm0, Operand(eax)); |
| 7460 __ cvtss2sd(xmm1, xmm1); | 7463 __ cvtss2sd(xmm1, xmm1); |
| 7461 __ pxor(xmm0, xmm1); | 7464 __ pxor(xmm0, xmm1); |
| 7462 __ subsd(xmm0, xmm1); | 7465 __ subsd(xmm0, xmm1); |
| 7463 __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0); | 7466 __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0); |
| 7464 } else { | 7467 } else { |
| 7465 // 0x4130000000000000 is 1.0 x 2^20 as a double. | 7468 // 0x4130000000000000 is 1.0 x 2^20 as a double. |
| (...skipping 196 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 7662 } | 7665 } |
| 7663 | 7666 |
| 7664 | 7667 |
| 7665 void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) { | 7668 void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) { |
| 7666 ASSERT_EQ(2, args->length()); | 7669 ASSERT_EQ(2, args->length()); |
| 7667 | 7670 |
| 7668 ASSERT_NE(NULL, args->at(0)->AsLiteral()); | 7671 ASSERT_NE(NULL, args->at(0)->AsLiteral()); |
| 7669 int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value(); | 7672 int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value(); |
| 7670 | 7673 |
| 7671 Handle<FixedArray> jsfunction_result_caches( | 7674 Handle<FixedArray> jsfunction_result_caches( |
| 7672 Isolate::Current()->global_context()->jsfunction_result_caches()); | 7675 masm()->isolate()->global_context()->jsfunction_result_caches()); |
| 7673 if (jsfunction_result_caches->length() <= cache_id) { | 7676 if (jsfunction_result_caches->length() <= cache_id) { |
| 7674 __ Abort("Attempt to use undefined cache."); | 7677 __ Abort("Attempt to use undefined cache."); |
| 7675 frame_->Push(FACTORY->undefined_value()); | 7678 frame_->Push(FACTORY->undefined_value()); |
| 7676 return; | 7679 return; |
| 7677 } | 7680 } |
| 7678 | 7681 |
| 7679 Load(args->at(1)); | 7682 Load(args->at(1)); |
| 7680 Result key = frame_->Pop(); | 7683 Result key = frame_->Pop(); |
| 7681 key.ToRegister(); | 7684 key.ToRegister(); |
| 7682 | 7685 |
| (...skipping 168 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 7851 | 7854 |
| 7852 | 7855 |
| 7853 // Generates the Math.pow method. Only handles special cases and | 7856 // Generates the Math.pow method. Only handles special cases and |
| 7854 // branches to the runtime system for everything else. Please note | 7857 // branches to the runtime system for everything else. Please note |
| 7855 // that this function assumes that the callsite has executed ToNumber | 7858 // that this function assumes that the callsite has executed ToNumber |
| 7856 // on both arguments. | 7859 // on both arguments. |
| 7857 void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) { | 7860 void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) { |
| 7858 ASSERT(args->length() == 2); | 7861 ASSERT(args->length() == 2); |
| 7859 Load(args->at(0)); | 7862 Load(args->at(0)); |
| 7860 Load(args->at(1)); | 7863 Load(args->at(1)); |
| 7861 if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) { | 7864 if (!masm()->isolate()->cpu_features()->IsSupported(SSE2)) { |
| 7862 Result res = frame_->CallRuntime(Runtime::kMath_pow, 2); | 7865 Result res = frame_->CallRuntime(Runtime::kMath_pow, 2); |
| 7863 frame_->Push(&res); | 7866 frame_->Push(&res); |
| 7864 } else { | 7867 } else { |
| 7865 CpuFeatures::Scope use_sse2(SSE2); | 7868 CpuFeatures::Scope use_sse2(SSE2); |
| 7866 Label allocate_return; | 7869 Label allocate_return; |
| 7867 // Load the two operands while leaving the values on the frame. | 7870 // Load the two operands while leaving the values on the frame. |
| 7868 frame()->Dup(); | 7871 frame()->Dup(); |
| 7869 Result exponent = frame()->Pop(); | 7872 Result exponent = frame()->Pop(); |
| 7870 exponent.ToRegister(); | 7873 exponent.ToRegister(); |
| 7871 frame()->Spill(exponent.reg()); | 7874 frame()->Spill(exponent.reg()); |
| (...skipping 196 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 8068 frame_->Push(&result); | 8071 frame_->Push(&result); |
| 8069 } | 8072 } |
| 8070 | 8073 |
| 8071 | 8074 |
| 8072 // Generates the Math.sqrt method. Please note - this function assumes that | 8075 // Generates the Math.sqrt method. Please note - this function assumes that |
| 8073 // the callsite has executed ToNumber on the argument. | 8076 // the callsite has executed ToNumber on the argument. |
| 8074 void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) { | 8077 void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) { |
| 8075 ASSERT_EQ(args->length(), 1); | 8078 ASSERT_EQ(args->length(), 1); |
| 8076 Load(args->at(0)); | 8079 Load(args->at(0)); |
| 8077 | 8080 |
| 8078 if (!Isolate::Current()->cpu_features()->IsSupported(SSE2)) { | 8081 if (!masm()->isolate()->cpu_features()->IsSupported(SSE2)) { |
| 8079 Result result = frame()->CallRuntime(Runtime::kMath_sqrt, 1); | 8082 Result result = frame()->CallRuntime(Runtime::kMath_sqrt, 1); |
| 8080 frame()->Push(&result); | 8083 frame()->Push(&result); |
| 8081 } else { | 8084 } else { |
| 8082 CpuFeatures::Scope use_sse2(SSE2); | 8085 CpuFeatures::Scope use_sse2(SSE2); |
| 8083 // Leave original value on the frame if we need to call runtime. | 8086 // Leave original value on the frame if we need to call runtime. |
| 8084 frame()->Dup(); | 8087 frame()->Dup(); |
| 8085 Result result = frame()->Pop(); | 8088 Result result = frame()->Pop(); |
| 8086 result.ToRegister(); | 8089 result.ToRegister(); |
| 8087 frame()->Spill(result.reg()); | 8090 frame()->Spill(result.reg()); |
| 8088 Label runtime; | 8091 Label runtime; |
| (...skipping 1292 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 9381 bool is_contextual_; | 9384 bool is_contextual_; |
| 9382 bool is_dont_delete_; | 9385 bool is_dont_delete_; |
| 9383 }; | 9386 }; |
| 9384 | 9387 |
| 9385 | 9388 |
| 9386 void DeferredReferenceGetNamedValue::Generate() { | 9389 void DeferredReferenceGetNamedValue::Generate() { |
| 9387 if (!receiver_.is(eax)) { | 9390 if (!receiver_.is(eax)) { |
| 9388 __ mov(eax, receiver_); | 9391 __ mov(eax, receiver_); |
| 9389 } | 9392 } |
| 9390 __ Set(ecx, Immediate(name_)); | 9393 __ Set(ecx, Immediate(name_)); |
| 9391 Handle<Code> ic(Isolate::Current()->builtins()->builtin( | 9394 Handle<Code> ic(masm()->isolate()->builtins()->builtin( |
| 9392 Builtins::LoadIC_Initialize)); | 9395 Builtins::LoadIC_Initialize)); |
| 9393 RelocInfo::Mode mode = is_contextual_ | 9396 RelocInfo::Mode mode = is_contextual_ |
| 9394 ? RelocInfo::CODE_TARGET_CONTEXT | 9397 ? RelocInfo::CODE_TARGET_CONTEXT |
| 9395 : RelocInfo::CODE_TARGET; | 9398 : RelocInfo::CODE_TARGET; |
| 9396 __ call(ic, mode); | 9399 __ call(ic, mode); |
| 9397 // The call must be followed by: | 9400 // The call must be followed by: |
| 9398 // - a test eax instruction to indicate that the inobject property | 9401 // - a test eax instruction to indicate that the inobject property |
| 9399 // case was inlined. | 9402 // case was inlined. |
| 9400 // - a mov ecx or mov edx instruction to indicate that the | 9403 // - a mov ecx or mov edx instruction to indicate that the |
| 9401 // contextual property load was inlined. | 9404 // contextual property load was inlined. |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 9461 } | 9464 } |
| 9462 } else { | 9465 } else { |
| 9463 __ xchg(edx, eax); | 9466 __ xchg(edx, eax); |
| 9464 } | 9467 } |
| 9465 // Calculate the delta from the IC call instruction to the map check | 9468 // Calculate the delta from the IC call instruction to the map check |
| 9466 // cmp instruction in the inlined version. This delta is stored in | 9469 // cmp instruction in the inlined version. This delta is stored in |
| 9467 // a test(eax, delta) instruction after the call so that we can find | 9470 // a test(eax, delta) instruction after the call so that we can find |
| 9468 // it in the IC initialization code and patch the cmp instruction. | 9471 // it in the IC initialization code and patch the cmp instruction. |
| 9469 // This means that we cannot allow test instructions after calls to | 9472 // This means that we cannot allow test instructions after calls to |
| 9470 // KeyedLoadIC stubs in other places. | 9473 // KeyedLoadIC stubs in other places. |
| 9471 Handle<Code> ic(Isolate::Current()->builtins()->builtin( | 9474 Handle<Code> ic(masm()->isolate()->builtins()->builtin( |
| 9472 Builtins::KeyedLoadIC_Initialize)); | 9475 Builtins::KeyedLoadIC_Initialize)); |
| 9473 __ call(ic, RelocInfo::CODE_TARGET); | 9476 __ call(ic, RelocInfo::CODE_TARGET); |
| 9474 // The delta from the start of the map-compare instruction to the | 9477 // The delta from the start of the map-compare instruction to the |
| 9475 // test instruction. We use masm_-> directly here instead of the __ | 9478 // test instruction. We use masm_-> directly here instead of the __ |
| 9476 // macro because the macro sometimes uses macro expansion to turn | 9479 // macro because the macro sometimes uses macro expansion to turn |
| 9477 // into something that can't return a value. This is encountered | 9480 // into something that can't return a value. This is encountered |
| 9478 // when doing generated code coverage tests. | 9481 // when doing generated code coverage tests. |
| 9479 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); | 9482 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); |
| 9480 // Here we use masm_-> instead of the __ macro because this is the | 9483 // Here we use masm_-> instead of the __ macro because this is the |
| 9481 // instruction that gets patched and coverage code gets in the way. | 9484 // instruction that gets patched and coverage code gets in the way. |
| (...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 9563 } | 9566 } |
| 9564 } | 9567 } |
| 9565 } else { // Key is not in edx or ecx. | 9568 } else { // Key is not in edx or ecx. |
| 9566 if (!receiver_.is(edx)) { | 9569 if (!receiver_.is(edx)) { |
| 9567 __ mov(edx, receiver_); | 9570 __ mov(edx, receiver_); |
| 9568 } | 9571 } |
| 9569 __ mov(ecx, key_); | 9572 __ mov(ecx, key_); |
| 9570 } | 9573 } |
| 9571 | 9574 |
| 9572 // Call the IC stub. | 9575 // Call the IC stub. |
| 9573 Handle<Code> ic(Isolate::Current()->builtins()->builtin( | 9576 Handle<Code> ic(masm()->isolate()->builtins()->builtin( |
| 9574 (strict_mode_ == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict | 9577 (strict_mode_ == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict |
| 9575 : Builtins::KeyedStoreIC_Initialize)); | 9578 : Builtins::KeyedStoreIC_Initialize)); |
| 9576 __ call(ic, RelocInfo::CODE_TARGET); | 9579 __ call(ic, RelocInfo::CODE_TARGET); |
| 9577 // The delta from the start of the map-compare instruction to the | 9580 // The delta from the start of the map-compare instruction to the |
| 9578 // test instruction. We use masm_-> directly here instead of the | 9581 // test instruction. We use masm_-> directly here instead of the |
| 9579 // __ macro because the macro sometimes uses macro expansion to turn | 9582 // __ macro because the macro sometimes uses macro expansion to turn |
| 9580 // into something that can't return a value. This is encountered | 9583 // into something that can't return a value. This is encountered |
| 9581 // when doing generated code coverage tests. | 9584 // when doing generated code coverage tests. |
| 9582 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); | 9585 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); |
| 9583 // Here we use masm_-> instead of the __ macro because this is the | 9586 // Here we use masm_-> instead of the __ macro because this is the |
| 9584 // instruction that gets patched and coverage code gets in the way. | 9587 // instruction that gets patched and coverage code gets in the way. |
| 9585 masm_->test(eax, Immediate(-delta_to_patch_site)); | 9588 masm_->test(eax, Immediate(-delta_to_patch_site)); |
| 9586 // Restore value (returned from store IC) register. | 9589 // Restore value (returned from store IC) register. |
| 9587 if (!old_value.is(eax)) __ mov(old_value, eax); | 9590 if (!old_value.is(eax)) __ mov(old_value, eax); |
| 9588 } | 9591 } |
| 9589 | 9592 |
| 9590 | 9593 |
| 9591 Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) { | 9594 Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) { |
| 9592 #ifdef DEBUG | 9595 #ifdef DEBUG |
| 9593 int original_height = frame()->height(); | 9596 int original_height = frame()->height(); |
| 9594 #endif | 9597 #endif |
| 9595 | 9598 |
| 9596 bool contextual_load_in_builtin = | 9599 bool contextual_load_in_builtin = |
| 9597 is_contextual && | 9600 is_contextual && |
| 9598 (Isolate::Current()->bootstrapper()->IsActive() || | 9601 (masm()->isolate()->bootstrapper()->IsActive() || |
| 9599 (!info_->closure().is_null() && info_->closure()->IsBuiltin())); | 9602 (!info_->closure().is_null() && info_->closure()->IsBuiltin())); |
| 9600 | 9603 |
| 9601 Result result; | 9604 Result result; |
| 9602 // Do not inline in the global code or when not in loop. | 9605 // Do not inline in the global code or when not in loop. |
| 9603 if (scope()->is_global_scope() || | 9606 if (scope()->is_global_scope() || |
| 9604 loop_nesting() == 0 || | 9607 loop_nesting() == 0 || |
| 9605 contextual_load_in_builtin) { | 9608 contextual_load_in_builtin) { |
| 9606 Comment cmnt(masm(), "[ Load from named Property"); | 9609 Comment cmnt(masm(), "[ Load from named Property"); |
| 9607 frame()->Push(name); | 9610 frame()->Push(name); |
| 9608 | 9611 |
| (...skipping 577 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 10186 int stack_offset = 0; // Update if we change the stack height. | 10189 int stack_offset = 0; // Update if we change the stack height. |
| 10187 | 10190 |
| 10188 if (FLAG_debug_code) { | 10191 if (FLAG_debug_code) { |
| 10189 __ cmp(Operand(esp, kSizeOffset + stack_offset), | 10192 __ cmp(Operand(esp, kSizeOffset + stack_offset), |
| 10190 Immediate(kMinComplexMemCopy)); | 10193 Immediate(kMinComplexMemCopy)); |
| 10191 Label ok; | 10194 Label ok; |
| 10192 __ j(greater_equal, &ok); | 10195 __ j(greater_equal, &ok); |
| 10193 __ int3(); | 10196 __ int3(); |
| 10194 __ bind(&ok); | 10197 __ bind(&ok); |
| 10195 } | 10198 } |
| 10196 if (Isolate::Current()->cpu_features()->IsSupported(SSE2)) { | 10199 if (masm.isolate()->cpu_features()->IsSupported(SSE2)) { |
| 10197 CpuFeatures::Scope enable(SSE2); | 10200 CpuFeatures::Scope enable(SSE2); |
| 10198 __ push(edi); | 10201 __ push(edi); |
| 10199 __ push(esi); | 10202 __ push(esi); |
| 10200 stack_offset += 2 * kPointerSize; | 10203 stack_offset += 2 * kPointerSize; |
| 10201 Register dst = edi; | 10204 Register dst = edi; |
| 10202 Register src = esi; | 10205 Register src = esi; |
| 10203 Register count = ecx; | 10206 Register count = ecx; |
| 10204 __ mov(dst, Operand(esp, stack_offset + kDestinationOffset)); | 10207 __ mov(dst, Operand(esp, stack_offset + kDestinationOffset)); |
| 10205 __ mov(src, Operand(esp, stack_offset + kSourceOffset)); | 10208 __ mov(src, Operand(esp, stack_offset + kSourceOffset)); |
| 10206 __ mov(count, Operand(esp, stack_offset + kSizeOffset)); | 10209 __ mov(count, Operand(esp, stack_offset + kSizeOffset)); |
| (...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 10367 memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size); | 10370 memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size); |
| 10368 CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size); | 10371 CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size); |
| 10369 return FUNCTION_CAST<MemCopyFunction>(chunk->GetStartAddress()); | 10372 return FUNCTION_CAST<MemCopyFunction>(chunk->GetStartAddress()); |
| 10370 } | 10373 } |
| 10371 | 10374 |
| 10372 #undef __ | 10375 #undef __ |
| 10373 | 10376 |
| 10374 } } // namespace v8::internal | 10377 } } // namespace v8::internal |
| 10375 | 10378 |
| 10376 #endif // V8_TARGET_ARCH_IA32 | 10379 #endif // V8_TARGET_ARCH_IA32 |
| OLD | NEW |