Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(11)

Side by Side Diff: src/x64/codegen-x64.cc

Issue 160519: X64: Fix bug in RandomPositiveSmi (doesn't save rsi before calling C-code). (Closed)
Patch Set: Changed to use push/pop, instead of being fancy. Created 11 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2009 the V8 project authors. All rights reserved. 1 // Copyright 2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 3416 matching lines...) Expand 10 before | Expand all | Expand 10 after
3427 Result rbp_as_smi = allocator_->Allocate(); 3427 Result rbp_as_smi = allocator_->Allocate();
3428 ASSERT(rbp_as_smi.is_valid()); 3428 ASSERT(rbp_as_smi.is_valid());
3429 __ movq(rbp_as_smi.reg(), rbp); 3429 __ movq(rbp_as_smi.reg(), rbp);
3430 frame_->Push(&rbp_as_smi); 3430 frame_->Push(&rbp_as_smi);
3431 } 3431 }
3432 3432
3433 3433
3434 void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) { 3434 void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
3435 ASSERT(args->length() == 0); 3435 ASSERT(args->length() == 0);
3436 frame_->SpillAll(); 3436 frame_->SpillAll();
3437 __ push(rsi);
3437 3438
3438 // Make sure the frame is aligned like the OS expects. 3439 // Make sure the frame is aligned like the OS expects.
3439 static const int kFrameAlignment = OS::ActivationFrameAlignment(); 3440 static const int kFrameAlignment = OS::ActivationFrameAlignment();
3440 if (kFrameAlignment > 0) { 3441 if (kFrameAlignment > 0) {
3441 ASSERT(IsPowerOf2(kFrameAlignment)); 3442 ASSERT(IsPowerOf2(kFrameAlignment));
3442 __ movq(rbx, rsp); // Save in AMD-64 abi callee-saved register. 3443 __ movq(rbx, rsp); // Save in AMD-64 abi callee-saved register.
3443 __ and_(rsp, Immediate(-kFrameAlignment)); 3444 __ and_(rsp, Immediate(-kFrameAlignment));
3444 } 3445 }
3445 3446
3446 // Call V8::RandomPositiveSmi(). 3447 // Call V8::RandomPositiveSmi().
3447 __ Call(FUNCTION_ADDR(V8::RandomPositiveSmi), RelocInfo::RUNTIME_ENTRY); 3448 __ Call(FUNCTION_ADDR(V8::RandomPositiveSmi), RelocInfo::RUNTIME_ENTRY);
3448 3449
3449 // Restore stack pointer from callee-saved register edi. 3450 // Restore stack pointer from callee-saved register.
3450 if (kFrameAlignment > 0) { 3451 if (kFrameAlignment > 0) {
3451 __ movq(rsp, rbx); 3452 __ movq(rsp, rbx);
3452 } 3453 }
3453 3454
3455 __ pop(rsi);
3454 Result result = allocator_->Allocate(rax); 3456 Result result = allocator_->Allocate(rax);
3455 frame_->Push(&result); 3457 frame_->Push(&result);
3456 } 3458 }
3457 3459
3458 3460
3459 void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) { 3461 void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
3460 // TODO(X64): Use inline floating point in the fast case. 3462 // TODO(X64): Use inline floating point in the fast case.
3461 ASSERT(args->length() == 1); 3463 ASSERT(args->length() == 1);
3462 3464
3463 // Load number. 3465 // Load number.
(...skipping 3336 matching lines...) Expand 10 before | Expand all | Expand 10 after
6800 6802
6801 __ bind(&load_smi_2); 6803 __ bind(&load_smi_2);
6802 __ sar(kScratchRegister, Immediate(kSmiTagSize)); 6804 __ sar(kScratchRegister, Immediate(kSmiTagSize));
6803 __ push(kScratchRegister); 6805 __ push(kScratchRegister);
6804 __ fild_s(Operand(rsp, 0)); 6806 __ fild_s(Operand(rsp, 0));
6805 __ pop(kScratchRegister); 6807 __ pop(kScratchRegister);
6806 6808
6807 __ bind(&done); 6809 __ bind(&done);
6808 } 6810 }
6809 6811
6812
6810 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, 6813 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
6811 Register lhs, 6814 Register lhs,
6812 Register rhs) { 6815 Register rhs) {
6813 Label load_smi_lhs, load_smi_rhs, done_load_lhs, done; 6816 Label load_smi_lhs, load_smi_rhs, done_load_lhs, done;
6814 __ testl(lhs, Immediate(kSmiTagMask)); 6817 __ testl(lhs, Immediate(kSmiTagMask));
6815 __ j(zero, &load_smi_lhs); 6818 __ j(zero, &load_smi_lhs);
6816 __ fld_d(FieldOperand(lhs, HeapNumber::kValueOffset)); 6819 __ fld_d(FieldOperand(lhs, HeapNumber::kValueOffset));
6817 __ bind(&done_load_lhs); 6820 __ bind(&done_load_lhs);
6818 6821
6819 __ testl(rhs, Immediate(kSmiTagMask)); 6822 __ testl(rhs, Immediate(kSmiTagMask));
(...skipping 14 matching lines...) Expand all
6834 __ bind(&load_smi_rhs); 6837 __ bind(&load_smi_rhs);
6835 __ movsxlq(kScratchRegister, rhs); 6838 __ movsxlq(kScratchRegister, rhs);
6836 __ sar(kScratchRegister, Immediate(kSmiTagSize)); 6839 __ sar(kScratchRegister, Immediate(kSmiTagSize));
6837 __ push(kScratchRegister); 6840 __ push(kScratchRegister);
6838 __ fild_d(Operand(rsp, 0)); 6841 __ fild_d(Operand(rsp, 0));
6839 __ pop(kScratchRegister); 6842 __ pop(kScratchRegister);
6840 6843
6841 __ bind(&done); 6844 __ bind(&done);
6842 } 6845 }
6843 6846
6847
6844 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm, 6848 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
6845 Label* non_float) { 6849 Label* non_float) {
6846 Label test_other, done; 6850 Label test_other, done;
6847 // Test if both operands are numbers (heap_numbers or smis). 6851 // Test if both operands are numbers (heap_numbers or smis).
6848 // If not, jump to label non_float. 6852 // If not, jump to label non_float.
6849 __ testl(rdx, Immediate(kSmiTagMask)); 6853 __ testl(rdx, Immediate(kSmiTagMask));
6850 __ j(zero, &test_other); // argument in rdx is OK 6854 __ j(zero, &test_other); // argument in rdx is OK
6851 __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), Factory::heap_number_map()); 6855 __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), Factory::heap_number_map());
6852 __ j(not_equal, non_float); // The argument in rdx is not a number. 6856 __ j(not_equal, non_float); // The argument in rdx is not a number.
6853 6857
(...skipping 17 matching lines...) Expand all
6871 case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR"; 6875 case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
6872 case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND"; 6876 case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
6873 case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR"; 6877 case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
6874 case Token::SAR: return "GenericBinaryOpStub_SAR"; 6878 case Token::SAR: return "GenericBinaryOpStub_SAR";
6875 case Token::SHL: return "GenericBinaryOpStub_SHL"; 6879 case Token::SHL: return "GenericBinaryOpStub_SHL";
6876 case Token::SHR: return "GenericBinaryOpStub_SHR"; 6880 case Token::SHR: return "GenericBinaryOpStub_SHR";
6877 default: return "GenericBinaryOpStub"; 6881 default: return "GenericBinaryOpStub";
6878 } 6882 }
6879 } 6883 }
6880 6884
6885
6881 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { 6886 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
6882 // Perform fast-case smi code for the operation (rax <op> rbx) and 6887 // Perform fast-case smi code for the operation (rax <op> rbx) and
6883 // leave result in register rax. 6888 // leave result in register rax.
6884 6889
6885 // Smi check both operands. 6890 // Smi check both operands.
6886 __ movq(rcx, rbx); 6891 __ movq(rcx, rbx);
6887 __ or_(rcx, rax); // The value in ecx is used for negative zero test later. 6892 __ or_(rcx, rax); // The value in ecx is used for negative zero test later.
6888 __ testl(rcx, Immediate(kSmiTagMask)); 6893 __ testl(rcx, Immediate(kSmiTagMask));
6889 __ j(not_zero, slow); 6894 __ j(not_zero, slow);
6890 6895
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after
7011 7016
7012 default: 7017 default:
7013 UNREACHABLE(); 7018 UNREACHABLE();
7014 break; 7019 break;
7015 } 7020 }
7016 } 7021 }
7017 7022
7018 7023
7019 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { 7024 void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
7020 Label call_runtime; 7025 Label call_runtime;
7021
7022 if (flags_ == SMI_CODE_IN_STUB) { 7026 if (flags_ == SMI_CODE_IN_STUB) {
7023 // The fast case smi code wasn't inlined in the stub caller 7027 // The fast case smi code wasn't inlined in the stub caller
7024 // code. Generate it here to speed up common operations. 7028 // code. Generate it here to speed up common operations.
7025 Label slow; 7029 Label slow;
7026 __ movq(rbx, Operand(rsp, 1 * kPointerSize)); // get y 7030 __ movq(rbx, Operand(rsp, 1 * kPointerSize)); // get y
7027 __ movq(rax, Operand(rsp, 2 * kPointerSize)); // get x 7031 __ movq(rax, Operand(rsp, 2 * kPointerSize)); // get x
7028 GenerateSmiCode(masm, &slow); 7032 GenerateSmiCode(masm, &slow);
7029 __ ret(2 * kPointerSize); // remove both operands 7033 __ ret(2 * kPointerSize); // remove both operands
7030 7034
7031 // Too bad. The fast case smi code didn't succeed. 7035 // Too bad. The fast case smi code didn't succeed.
(...skipping 226 matching lines...) Expand 10 before | Expand all | Expand 10 after
7258 int CompareStub::MinorKey() { 7262 int CompareStub::MinorKey() {
7259 // Encode the two parameters in a unique 16 bit value. 7263 // Encode the two parameters in a unique 16 bit value.
7260 ASSERT(static_cast<unsigned>(cc_) < (1 << 15)); 7264 ASSERT(static_cast<unsigned>(cc_) < (1 << 15));
7261 return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0); 7265 return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0);
7262 } 7266 }
7263 7267
7264 7268
7265 #undef __ 7269 #undef __
7266 7270
7267 } } // namespace v8::internal 7271 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698