Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(242)

Side by Side Diff: src/ia32/codegen-ia32.cc

Issue 6670119: VM initialization refactoring. (Closed)
Patch Set: Created 9 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 538 matching lines...) Expand 10 before | Expand all | Expand 10 after
549 void CodeGenerator::ConvertInt32ResultToNumber(Result* value) { 549 void CodeGenerator::ConvertInt32ResultToNumber(Result* value) {
550 ASSERT(value->is_untagged_int32()); 550 ASSERT(value->is_untagged_int32());
551 if (value->is_register()) { 551 if (value->is_register()) {
552 Register val = value->reg(); 552 Register val = value->reg();
553 JumpTarget done; 553 JumpTarget done;
554 __ add(val, Operand(val)); 554 __ add(val, Operand(val));
555 done.Branch(no_overflow, value); 555 done.Branch(no_overflow, value);
556 __ sar(val, 1); 556 __ sar(val, 1);
557 // If there was an overflow, bits 30 and 31 of the original number disagree. 557 // If there was an overflow, bits 30 and 31 of the original number disagree.
558 __ xor_(val, 0x80000000u); 558 __ xor_(val, 0x80000000u);
559 if (masm()->isolate()->cpu_features()->IsSupported(SSE2)) { 559 if (CpuFeatures::IsSupported(SSE2)) {
560 CpuFeatures::Scope fscope(SSE2); 560 CpuFeatures::Scope fscope(SSE2);
561 __ cvtsi2sd(xmm0, Operand(val)); 561 __ cvtsi2sd(xmm0, Operand(val));
562 } else { 562 } else {
563 // Move val to ST[0] in the FPU 563 // Move val to ST[0] in the FPU
564 // Push and pop are safe with respect to the virtual frame because 564 // Push and pop are safe with respect to the virtual frame because
565 // all synced elements are below the actual stack pointer. 565 // all synced elements are below the actual stack pointer.
566 __ push(val); 566 __ push(val);
567 __ fild_s(Operand(esp, 0)); 567 __ fild_s(Operand(esp, 0));
568 __ pop(val); 568 __ pop(val);
569 } 569 }
570 Result scratch = allocator_->Allocate(); 570 Result scratch = allocator_->Allocate();
571 ASSERT(scratch.is_register()); 571 ASSERT(scratch.is_register());
572 Label allocation_failed; 572 Label allocation_failed;
573 __ AllocateHeapNumber(val, scratch.reg(), 573 __ AllocateHeapNumber(val, scratch.reg(),
574 no_reg, &allocation_failed); 574 no_reg, &allocation_failed);
575 VirtualFrame* clone = new VirtualFrame(frame_); 575 VirtualFrame* clone = new VirtualFrame(frame_);
576 scratch.Unuse(); 576 scratch.Unuse();
577 if (masm()->isolate()->cpu_features()->IsSupported(SSE2)) { 577 if (CpuFeatures::IsSupported(SSE2)) {
578 CpuFeatures::Scope fscope(SSE2); 578 CpuFeatures::Scope fscope(SSE2);
579 __ movdbl(FieldOperand(val, HeapNumber::kValueOffset), xmm0); 579 __ movdbl(FieldOperand(val, HeapNumber::kValueOffset), xmm0);
580 } else { 580 } else {
581 __ fstp_d(FieldOperand(val, HeapNumber::kValueOffset)); 581 __ fstp_d(FieldOperand(val, HeapNumber::kValueOffset));
582 } 582 }
583 done.Jump(value); 583 done.Jump(value);
584 584
585 // Establish the virtual frame, cloned from where AllocateHeapNumber 585 // Establish the virtual frame, cloned from where AllocateHeapNumber
586 // jumped to allocation_failed. 586 // jumped to allocation_failed.
587 RegisterFile empty_regs; 587 RegisterFile empty_regs;
588 SetFrame(clone, &empty_regs); 588 SetFrame(clone, &empty_regs);
589 __ bind(&allocation_failed); 589 __ bind(&allocation_failed);
590 if (!masm()->isolate()->cpu_features()->IsSupported(SSE2)) { 590 if (!CpuFeatures::IsSupported(SSE2)) {
591 // Pop the value from the floating point stack. 591 // Pop the value from the floating point stack.
592 __ fstp(0); 592 __ fstp(0);
593 } 593 }
594 unsafe_bailout_->Jump(); 594 unsafe_bailout_->Jump();
595 595
596 done.Bind(value); 596 done.Bind(value);
597 } else { 597 } else {
598 ASSERT(value->is_constant()); 598 ASSERT(value->is_constant());
599 } 599 }
600 value->set_untagged_int32(false); 600 value->set_untagged_int32(false);
601 value->set_type_info(TypeInfo::Integer32()); 601 value->set_type_info(TypeInfo::Integer32());
602 } 602 }
603 603
604 604
605 void CodeGenerator::Load(Expression* expr) { 605 void CodeGenerator::Load(Expression* expr) {
606 #ifdef DEBUG 606 #ifdef DEBUG
607 int original_height = frame_->height(); 607 int original_height = frame_->height();
608 #endif 608 #endif
609 ASSERT(!in_spilled_code()); 609 ASSERT(!in_spilled_code());
610 610
611 // If the expression should be a side-effect-free 32-bit int computation, 611 // If the expression should be a side-effect-free 32-bit int computation,
612 // compile that SafeInt32 path, and a bailout path. 612 // compile that SafeInt32 path, and a bailout path.
613 if (!in_safe_int32_mode() && 613 if (!in_safe_int32_mode() &&
614 safe_int32_mode_enabled() && 614 safe_int32_mode_enabled() &&
615 expr->side_effect_free() && 615 expr->side_effect_free() &&
616 expr->num_bit_ops() > 2 && 616 expr->num_bit_ops() > 2 &&
617 masm()->isolate()->cpu_features()->IsSupported(SSE2)) { 617 CpuFeatures::IsSupported(SSE2)) {
618 BreakTarget unsafe_bailout; 618 BreakTarget unsafe_bailout;
619 JumpTarget done; 619 JumpTarget done;
620 unsafe_bailout.set_expected_height(frame_->height()); 620 unsafe_bailout.set_expected_height(frame_->height());
621 LoadInSafeInt32Mode(expr, &unsafe_bailout); 621 LoadInSafeInt32Mode(expr, &unsafe_bailout);
622 done.Jump(); 622 done.Jump();
623 623
624 if (unsafe_bailout.is_linked()) { 624 if (unsafe_bailout.is_linked()) {
625 unsafe_bailout.Bind(); 625 unsafe_bailout.Bind();
626 LoadWithSafeInt32ModeDisabled(expr); 626 LoadWithSafeInt32ModeDisabled(expr);
627 } 627 }
(...skipping 360 matching lines...) Expand 10 before | Expand all | Expand 10 after
988 OverwriteMode mode_; 988 OverwriteMode mode_;
989 Label answer_out_of_range_; 989 Label answer_out_of_range_;
990 Label non_smi_input_; 990 Label non_smi_input_;
991 Label constant_rhs_; 991 Label constant_rhs_;
992 Smi* smi_value_; 992 Smi* smi_value_;
993 }; 993 };
994 994
995 995
996 Label* DeferredInlineBinaryOperation::NonSmiInputLabel() { 996 Label* DeferredInlineBinaryOperation::NonSmiInputLabel() {
997 if (Token::IsBitOp(op_) && 997 if (Token::IsBitOp(op_) &&
998 masm()->isolate()->cpu_features()->IsSupported(SSE2)) { 998 CpuFeatures::IsSupported(SSE2)) {
999 return &non_smi_input_; 999 return &non_smi_input_;
1000 } else { 1000 } else {
1001 return entry_label(); 1001 return entry_label();
1002 } 1002 }
1003 } 1003 }
1004 1004
1005 1005
1006 void DeferredInlineBinaryOperation::JumpToAnswerOutOfRange(Condition cond) { 1006 void DeferredInlineBinaryOperation::JumpToAnswerOutOfRange(Condition cond) {
1007 __ j(cond, &answer_out_of_range_); 1007 __ j(cond, &answer_out_of_range_);
1008 } 1008 }
1009 1009
1010 1010
1011 void DeferredInlineBinaryOperation::JumpToConstantRhs(Condition cond, 1011 void DeferredInlineBinaryOperation::JumpToConstantRhs(Condition cond,
1012 Smi* smi_value) { 1012 Smi* smi_value) {
1013 smi_value_ = smi_value; 1013 smi_value_ = smi_value;
1014 __ j(cond, &constant_rhs_); 1014 __ j(cond, &constant_rhs_);
1015 } 1015 }
1016 1016
1017 1017
1018 void DeferredInlineBinaryOperation::Generate() { 1018 void DeferredInlineBinaryOperation::Generate() {
1019 // Registers are not saved implicitly for this stub, so we should not 1019 // Registers are not saved implicitly for this stub, so we should not
1020 // tread on the registers that were not passed to us. 1020 // tread on the registers that were not passed to us.
1021 if (masm()->isolate()->cpu_features()->IsSupported(SSE2) && 1021 if (CpuFeatures::IsSupported(SSE2) &&
1022 ((op_ == Token::ADD) || 1022 ((op_ == Token::ADD) ||
1023 (op_ == Token::SUB) || 1023 (op_ == Token::SUB) ||
1024 (op_ == Token::MUL) || 1024 (op_ == Token::MUL) ||
1025 (op_ == Token::DIV))) { 1025 (op_ == Token::DIV))) {
1026 CpuFeatures::Scope use_sse2(SSE2); 1026 CpuFeatures::Scope use_sse2(SSE2);
1027 Label call_runtime, after_alloc_failure; 1027 Label call_runtime, after_alloc_failure;
1028 Label left_smi, right_smi, load_right, do_op; 1028 Label left_smi, right_smi, load_right, do_op;
1029 if (!left_info_.IsSmi()) { 1029 if (!left_info_.IsSmi()) {
1030 __ test(left_, Immediate(kSmiTagMask)); 1030 __ test(left_, Immediate(kSmiTagMask));
1031 __ j(zero, &left_smi); 1031 __ j(zero, &left_smi);
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
1147 } 1147 }
1148 1148
1149 __ bind(&non_smi_input_); 1149 __ bind(&non_smi_input_);
1150 1150
1151 if (rhs_is_constant) { 1151 if (rhs_is_constant) {
1152 __ bind(&constant_rhs_); 1152 __ bind(&constant_rhs_);
1153 // In this case the input is a heap object and it is in the dst_ register. 1153 // In this case the input is a heap object and it is in the dst_ register.
1154 // The left_ and right_ registers have not been initialized yet. 1154 // The left_ and right_ registers have not been initialized yet.
1155 __ mov(right_, Immediate(smi_value_)); 1155 __ mov(right_, Immediate(smi_value_));
1156 __ mov(left_, Operand(dst_)); 1156 __ mov(left_, Operand(dst_));
1157 if (!masm()->isolate()->cpu_features()->IsSupported(SSE2)) { 1157 if (!CpuFeatures::IsSupported(SSE2)) {
1158 __ jmp(entry_label()); 1158 __ jmp(entry_label());
1159 return; 1159 return;
1160 } else { 1160 } else {
1161 CpuFeatures::Scope use_sse2(SSE2); 1161 CpuFeatures::Scope use_sse2(SSE2);
1162 __ JumpIfNotNumber(dst_, left_info_, entry_label()); 1162 __ JumpIfNotNumber(dst_, left_info_, entry_label());
1163 __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label()); 1163 __ ConvertToInt32(dst_, left_, dst_, left_info_, entry_label());
1164 __ SmiUntag(right_); 1164 __ SmiUntag(right_);
1165 } 1165 }
1166 } else { 1166 } else {
1167 // We know we have SSE2 here because otherwise the label is not linked (see 1167 // We know we have SSE2 here because otherwise the label is not linked (see
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after
1260 // Put a heap number pointer in left_. 1260 // Put a heap number pointer in left_.
1261 __ bind(&answer_out_of_range_); 1261 __ bind(&answer_out_of_range_);
1262 SaveRegisters(); 1262 SaveRegisters();
1263 if (mode_ == OVERWRITE_LEFT) { 1263 if (mode_ == OVERWRITE_LEFT) {
1264 __ test(left_, Immediate(kSmiTagMask)); 1264 __ test(left_, Immediate(kSmiTagMask));
1265 __ j(not_zero, &allocation_ok); 1265 __ j(not_zero, &allocation_ok);
1266 } 1266 }
1267 // This trashes right_. 1267 // This trashes right_.
1268 __ AllocateHeapNumber(left_, right_, no_reg, &after_alloc_failure2); 1268 __ AllocateHeapNumber(left_, right_, no_reg, &after_alloc_failure2);
1269 __ bind(&allocation_ok); 1269 __ bind(&allocation_ok);
1270 if (masm()->isolate()->cpu_features()->IsSupported(SSE2) && 1270 if (CpuFeatures::IsSupported(SSE2) &&
1271 op_ != Token::SHR) { 1271 op_ != Token::SHR) {
1272 CpuFeatures::Scope use_sse2(SSE2); 1272 CpuFeatures::Scope use_sse2(SSE2);
1273 ASSERT(Token::IsBitOp(op_)); 1273 ASSERT(Token::IsBitOp(op_));
1274 // Signed conversion. 1274 // Signed conversion.
1275 __ cvtsi2sd(xmm0, Operand(dst_)); 1275 __ cvtsi2sd(xmm0, Operand(dst_));
1276 __ movdbl(FieldOperand(left_, HeapNumber::kValueOffset), xmm0); 1276 __ movdbl(FieldOperand(left_, HeapNumber::kValueOffset), xmm0);
1277 } else { 1277 } else {
1278 if (op_ == Token::SHR) { 1278 if (op_ == Token::SHR) {
1279 __ push(Immediate(0)); // High word of unsigned value. 1279 __ push(Immediate(0)); // High word of unsigned value.
1280 __ push(dst_); 1280 __ push(dst_);
(...skipping 1744 matching lines...) Expand 10 before | Expand all | Expand 10 after
3025 } else { 3025 } else {
3026 // Do the smi check, then the comparison. 3026 // Do the smi check, then the comparison.
3027 __ test(left_reg, Immediate(kSmiTagMask)); 3027 __ test(left_reg, Immediate(kSmiTagMask));
3028 is_smi.Branch(zero, left_side, right_side); 3028 is_smi.Branch(zero, left_side, right_side);
3029 } 3029 }
3030 3030
3031 // Jump or fall through to here if we are comparing a non-smi to a 3031 // Jump or fall through to here if we are comparing a non-smi to a
3032 // constant smi. If the non-smi is a heap number and this is not 3032 // constant smi. If the non-smi is a heap number and this is not
3033 // a loop condition, inline the floating point code. 3033 // a loop condition, inline the floating point code.
3034 if (!is_loop_condition && 3034 if (!is_loop_condition &&
3035 masm()->isolate()->cpu_features()->IsSupported(SSE2)) { 3035 CpuFeatures::IsSupported(SSE2)) {
3036 // Right side is a constant smi and left side has been checked 3036 // Right side is a constant smi and left side has been checked
3037 // not to be a smi. 3037 // not to be a smi.
3038 CpuFeatures::Scope use_sse2(SSE2); 3038 CpuFeatures::Scope use_sse2(SSE2);
3039 JumpTarget not_number; 3039 JumpTarget not_number;
3040 __ cmp(FieldOperand(left_reg, HeapObject::kMapOffset), 3040 __ cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
3041 Immediate(FACTORY->heap_number_map())); 3041 Immediate(FACTORY->heap_number_map()));
3042 not_number.Branch(not_equal, left_side); 3042 not_number.Branch(not_equal, left_side);
3043 __ movdbl(xmm1, 3043 __ movdbl(xmm1,
3044 FieldOperand(left_reg, HeapNumber::kValueOffset)); 3044 FieldOperand(left_reg, HeapNumber::kValueOffset));
3045 int value = Smi::cast(*right_val)->value(); 3045 int value = Smi::cast(*right_val)->value();
(...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after
3189 3189
3190 3190
3191 void CodeGenerator::GenerateInlineNumberComparison(Result* left_side, 3191 void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
3192 Result* right_side, 3192 Result* right_side,
3193 Condition cc, 3193 Condition cc,
3194 ControlDestination* dest) { 3194 ControlDestination* dest) {
3195 ASSERT(left_side->is_register()); 3195 ASSERT(left_side->is_register());
3196 ASSERT(right_side->is_register()); 3196 ASSERT(right_side->is_register());
3197 3197
3198 JumpTarget not_numbers; 3198 JumpTarget not_numbers;
3199 if (masm()->isolate()->cpu_features()->IsSupported(SSE2)) { 3199 if (CpuFeatures::IsSupported(SSE2)) {
3200 CpuFeatures::Scope use_sse2(SSE2); 3200 CpuFeatures::Scope use_sse2(SSE2);
3201 3201
3202 // Load left and right operand into registers xmm0 and xmm1 and compare. 3202 // Load left and right operand into registers xmm0 and xmm1 and compare.
3203 LoadComparisonOperandSSE2(masm_, left_side, xmm0, left_side, right_side, 3203 LoadComparisonOperandSSE2(masm_, left_side, xmm0, left_side, right_side,
3204 &not_numbers); 3204 &not_numbers);
3205 LoadComparisonOperandSSE2(masm_, right_side, xmm1, left_side, right_side, 3205 LoadComparisonOperandSSE2(masm_, right_side, xmm1, left_side, right_side,
3206 &not_numbers); 3206 &not_numbers);
3207 __ ucomisd(xmm0, xmm1); 3207 __ ucomisd(xmm0, xmm1);
3208 } else { 3208 } else {
3209 Label check_right, compare; 3209 Label check_right, compare;
(...skipping 4240 matching lines...) Expand 10 before | Expand all | Expand 10 after
7450 7450
7451 __ PrepareCallCFunction(1, ebx); 7451 __ PrepareCallCFunction(1, ebx);
7452 __ mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address())); 7452 __ mov(Operand(esp, 0), Immediate(ExternalReference::isolate_address()));
7453 __ CallCFunction(ExternalReference::random_uint32_function(masm()->isolate()), 7453 __ CallCFunction(ExternalReference::random_uint32_function(masm()->isolate()),
7454 1); 7454 1);
7455 7455
7456 // Convert 32 random bits in eax to 0.(32 random bits) in a double 7456 // Convert 32 random bits in eax to 0.(32 random bits) in a double
7457 // by computing: 7457 // by computing:
7458 // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)). 7458 // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
7459 // This is implemented on both SSE2 and FPU. 7459 // This is implemented on both SSE2 and FPU.
7460 if (masm()->isolate()->cpu_features()->IsSupported(SSE2)) { 7460 if (CpuFeatures::IsSupported(SSE2)) {
7461 CpuFeatures::Scope fscope(SSE2); 7461 CpuFeatures::Scope fscope(SSE2);
7462 __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single. 7462 __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
7463 __ movd(xmm1, Operand(ebx)); 7463 __ movd(xmm1, Operand(ebx));
7464 __ movd(xmm0, Operand(eax)); 7464 __ movd(xmm0, Operand(eax));
7465 __ cvtss2sd(xmm1, xmm1); 7465 __ cvtss2sd(xmm1, xmm1);
7466 __ pxor(xmm0, xmm1); 7466 __ pxor(xmm0, xmm1);
7467 __ subsd(xmm0, xmm1); 7467 __ subsd(xmm0, xmm1);
7468 __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0); 7468 __ movdbl(FieldOperand(edi, HeapNumber::kValueOffset), xmm0);
7469 } else { 7469 } else {
7470 // 0x4130000000000000 is 1.0 x 2^20 as a double. 7470 // 0x4130000000000000 is 1.0 x 2^20 as a double.
(...skipping 385 matching lines...) Expand 10 before | Expand all | Expand 10 after
7856 7856
7857 7857
7858 // Generates the Math.pow method. Only handles special cases and 7858 // Generates the Math.pow method. Only handles special cases and
7859 // branches to the runtime system for everything else. Please note 7859 // branches to the runtime system for everything else. Please note
7860 // that this function assumes that the callsite has executed ToNumber 7860 // that this function assumes that the callsite has executed ToNumber
7861 // on both arguments. 7861 // on both arguments.
7862 void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) { 7862 void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
7863 ASSERT(args->length() == 2); 7863 ASSERT(args->length() == 2);
7864 Load(args->at(0)); 7864 Load(args->at(0));
7865 Load(args->at(1)); 7865 Load(args->at(1));
7866 if (!masm()->isolate()->cpu_features()->IsSupported(SSE2)) { 7866 if (!CpuFeatures::IsSupported(SSE2)) {
7867 Result res = frame_->CallRuntime(Runtime::kMath_pow, 2); 7867 Result res = frame_->CallRuntime(Runtime::kMath_pow, 2);
7868 frame_->Push(&res); 7868 frame_->Push(&res);
7869 } else { 7869 } else {
7870 CpuFeatures::Scope use_sse2(SSE2); 7870 CpuFeatures::Scope use_sse2(SSE2);
7871 Label allocate_return; 7871 Label allocate_return;
7872 // Load the two operands while leaving the values on the frame. 7872 // Load the two operands while leaving the values on the frame.
7873 frame()->Dup(); 7873 frame()->Dup();
7874 Result exponent = frame()->Pop(); 7874 Result exponent = frame()->Pop();
7875 exponent.ToRegister(); 7875 exponent.ToRegister();
7876 frame()->Spill(exponent.reg()); 7876 frame()->Spill(exponent.reg());
(...skipping 196 matching lines...) Expand 10 before | Expand all | Expand 10 after
8073 frame_->Push(&result); 8073 frame_->Push(&result);
8074 } 8074 }
8075 8075
8076 8076
8077 // Generates the Math.sqrt method. Please note - this function assumes that 8077 // Generates the Math.sqrt method. Please note - this function assumes that
8078 // the callsite has executed ToNumber on the argument. 8078 // the callsite has executed ToNumber on the argument.
8079 void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) { 8079 void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
8080 ASSERT_EQ(args->length(), 1); 8080 ASSERT_EQ(args->length(), 1);
8081 Load(args->at(0)); 8081 Load(args->at(0));
8082 8082
8083 if (!masm()->isolate()->cpu_features()->IsSupported(SSE2)) { 8083 if (!CpuFeatures::IsSupported(SSE2)) {
8084 Result result = frame()->CallRuntime(Runtime::kMath_sqrt, 1); 8084 Result result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
8085 frame()->Push(&result); 8085 frame()->Push(&result);
8086 } else { 8086 } else {
8087 CpuFeatures::Scope use_sse2(SSE2); 8087 CpuFeatures::Scope use_sse2(SSE2);
8088 // Leave original value on the frame if we need to call runtime. 8088 // Leave original value on the frame if we need to call runtime.
8089 frame()->Dup(); 8089 frame()->Dup();
8090 Result result = frame()->Pop(); 8090 Result result = frame()->Pop();
8091 result.ToRegister(); 8091 result.ToRegister();
8092 frame()->Spill(result.reg()); 8092 frame()->Spill(result.reg());
8093 Label runtime; 8093 Label runtime;
(...skipping 2104 matching lines...) Expand 10 before | Expand all | Expand 10 after
10198 int stack_offset = 0; // Update if we change the stack height. 10198 int stack_offset = 0; // Update if we change the stack height.
10199 10199
10200 if (FLAG_debug_code) { 10200 if (FLAG_debug_code) {
10201 __ cmp(Operand(esp, kSizeOffset + stack_offset), 10201 __ cmp(Operand(esp, kSizeOffset + stack_offset),
10202 Immediate(OS::kMinComplexMemCopy)); 10202 Immediate(OS::kMinComplexMemCopy));
10203 Label ok; 10203 Label ok;
10204 __ j(greater_equal, &ok); 10204 __ j(greater_equal, &ok);
10205 __ int3(); 10205 __ int3();
10206 __ bind(&ok); 10206 __ bind(&ok);
10207 } 10207 }
10208 if (masm.isolate()->cpu_features()->IsSupported(SSE2)) { 10208 if (CpuFeatures::IsSupported(SSE2)) {
10209 CpuFeatures::Scope enable(SSE2); 10209 CpuFeatures::Scope enable(SSE2);
10210 __ push(edi); 10210 __ push(edi);
10211 __ push(esi); 10211 __ push(esi);
10212 stack_offset += 2 * kPointerSize; 10212 stack_offset += 2 * kPointerSize;
10213 Register dst = edi; 10213 Register dst = edi;
10214 Register src = esi; 10214 Register src = esi;
10215 Register count = ecx; 10215 Register count = ecx;
10216 __ mov(dst, Operand(esp, stack_offset + kDestinationOffset)); 10216 __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
10217 __ mov(src, Operand(esp, stack_offset + kSourceOffset)); 10217 __ mov(src, Operand(esp, stack_offset + kSourceOffset));
10218 __ mov(count, Operand(esp, stack_offset + kSizeOffset)); 10218 __ mov(count, Operand(esp, stack_offset + kSizeOffset));
(...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after
10379 memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size); 10379 memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size);
10380 CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size); 10380 CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size);
10381 return FUNCTION_CAST<OS::MemCopyFunction>(chunk->GetStartAddress()); 10381 return FUNCTION_CAST<OS::MemCopyFunction>(chunk->GetStartAddress());
10382 } 10382 }
10383 10383
10384 #undef __ 10384 #undef __
10385 10385
10386 } } // namespace v8::internal 10386 } } // namespace v8::internal
10387 10387
10388 #endif // V8_TARGET_ARCH_IA32 10388 #endif // V8_TARGET_ARCH_IA32
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698