Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/x64/codegen-x64.cc

Issue 196077: X64: Extract all smi operations into MacroAssembler macros. (Closed)
Patch Set: Created 11 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2009 the V8 project authors. All rights reserved. 1 // Copyright 2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 702 matching lines...) Expand 10 before | Expand all | Expand 10 after
713 713
714 // Get rid of the arguments object probe. 714 // Get rid of the arguments object probe.
715 frame_->Drop(); 715 frame_->Drop();
716 716
717 // Before messing with the execution stack, we sync all 717 // Before messing with the execution stack, we sync all
718 // elements. This is bound to happen anyway because we're 718 // elements. This is bound to happen anyway because we're
719 // about to call a function. 719 // about to call a function.
720 frame_->SyncRange(0, frame_->element_count() - 1); 720 frame_->SyncRange(0, frame_->element_count() - 1);
721 721
722 // Check that the receiver really is a JavaScript object. 722 // Check that the receiver really is a JavaScript object.
723 { frame_->PushElementAt(0); 723 {
724 frame_->PushElementAt(0);
724 Result receiver = frame_->Pop(); 725 Result receiver = frame_->Pop();
725 receiver.ToRegister(); 726 receiver.ToRegister();
726 __ testl(receiver.reg(), Immediate(kSmiTagMask)); 727 Condition is_smi = masm_->CheckSmi(receiver.reg());
William Hesse 2009/09/10 11:13:22 This bugged me a lot at first, but now I see that
Lasse Reichstein 2009/09/10 12:28:11 I thought about inlining the CheckSmi call in the
Lasse Reichstein 2009/09/10 12:45:35 ... but I'll leave that for a later change (e.g. w
727 build_args.Branch(zero); 728 build_args.Branch(is_smi);
728 // We allow all JSObjects including JSFunctions. As long as 729 // We allow all JSObjects including JSFunctions. As long as
729 // JS_FUNCTION_TYPE is the last instance type and it is right 730 // JS_FUNCTION_TYPE is the last instance type and it is right
730 // after LAST_JS_OBJECT_TYPE, we do not have to check the upper 731 // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
731 // bound. 732 // bound.
732 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); 733 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
733 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1); 734 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
734 __ CmpObjectType(receiver.reg(), FIRST_JS_OBJECT_TYPE, kScratchRegister); 735 __ CmpObjectType(receiver.reg(), FIRST_JS_OBJECT_TYPE, kScratchRegister);
735 build_args.Branch(below); 736 build_args.Branch(below);
736 } 737 }
737 738
738 // Verify that we're invoking Function.prototype.apply. 739 // Verify that we're invoking Function.prototype.apply.
739 { frame_->PushElementAt(1); 740 {
741 frame_->PushElementAt(1);
740 Result apply = frame_->Pop(); 742 Result apply = frame_->Pop();
741 apply.ToRegister(); 743 apply.ToRegister();
742 __ testl(apply.reg(), Immediate(kSmiTagMask)); 744 Condition is_smi = masm_->CheckSmi(apply.reg());
743 build_args.Branch(zero); 745 build_args.Branch(is_smi);
744 Result tmp = allocator_->Allocate(); 746 Result tmp = allocator_->Allocate();
745 __ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg()); 747 __ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg());
746 build_args.Branch(not_equal); 748 build_args.Branch(not_equal);
747 __ movq(tmp.reg(), 749 __ movq(tmp.reg(),
748 FieldOperand(apply.reg(), JSFunction::kSharedFunctionInfoOffset)); 750 FieldOperand(apply.reg(), JSFunction::kSharedFunctionInfoOffset));
749 Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply)); 751 Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
750 __ Cmp(FieldOperand(tmp.reg(), SharedFunctionInfo::kCodeOffset), 752 __ Cmp(FieldOperand(tmp.reg(), SharedFunctionInfo::kCodeOffset),
751 apply_code); 753 apply_code);
752 build_args.Branch(not_equal); 754 build_args.Branch(not_equal);
753 } 755 }
754 756
755 // Get the function receiver from the stack. Check that it 757 // Get the function receiver from the stack. Check that it
756 // really is a function. 758 // really is a function.
757 __ movq(rdi, Operand(rsp, 2 * kPointerSize)); 759 __ movq(rdi, Operand(rsp, 2 * kPointerSize));
758 __ testl(rdi, Immediate(kSmiTagMask)); 760 Condition is_smi = masm_->CheckSmi(rdi);
759 build_args.Branch(zero); 761 build_args.Branch(is_smi);
760 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx); 762 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
761 build_args.Branch(not_equal); 763 build_args.Branch(not_equal);
762 764
763 // Copy the arguments to this function possibly from the 765 // Copy the arguments to this function possibly from the
764 // adaptor frame below it. 766 // adaptor frame below it.
765 Label invoke, adapted; 767 Label invoke, adapted;
766 __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); 768 __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
767 __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset)); 769 __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
768 __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 770 __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
769 __ j(equal, &adapted); 771 __ j(equal, &adapted);
770 772
771 // No arguments adaptor frame. Copy fixed number of arguments. 773 // No arguments adaptor frame. Copy fixed number of arguments.
772 __ movq(rax, Immediate(scope_->num_parameters())); 774 __ movq(rax, Immediate(scope_->num_parameters()));
773 for (int i = 0; i < scope_->num_parameters(); i++) { 775 for (int i = 0; i < scope_->num_parameters(); i++) {
774 __ push(frame_->ParameterAt(i)); 776 __ push(frame_->ParameterAt(i));
775 } 777 }
776 __ jmp(&invoke); 778 __ jmp(&invoke);
777 779
778 // Arguments adaptor frame present. Copy arguments from there, but 780 // Arguments adaptor frame present. Copy arguments from there, but
779 // avoid copying too many arguments to avoid stack overflows. 781 // avoid copying too many arguments to avoid stack overflows.
780 __ bind(&adapted); 782 __ bind(&adapted);
781 static const uint32_t kArgumentsLimit = 1 * KB; 783 static const uint32_t kArgumentsLimit = 1 * KB;
782 __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset)); 784 __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
783 __ shrl(rax, Immediate(kSmiTagSize)); 785 __ SmiToInteger32(rax, rax);
784 __ movq(rcx, rax); 786 __ movq(rcx, rax);
785 __ cmpq(rax, Immediate(kArgumentsLimit)); 787 __ cmpq(rax, Immediate(kArgumentsLimit));
786 build_args.Branch(above); 788 build_args.Branch(above);
787 789
788 // Loop through the arguments pushing them onto the execution 790 // Loop through the arguments pushing them onto the execution
789 // stack. We don't inform the virtual frame of the push, so we don't 791 // stack. We don't inform the virtual frame of the push, so we don't
790 // have to worry about getting rid of the elements from the virtual 792 // have to worry about getting rid of the elements from the virtual
791 // frame. 793 // frame.
792 Label loop; 794 Label loop;
793 __ bind(&loop); 795 __ bind(&loop);
(...skipping 856 matching lines...) Expand 10 before | Expand all | Expand 10 after
1650 1652
1651 // Stack layout in body: 1653 // Stack layout in body:
1652 // [iteration counter (smi)] <- slot 0 1654 // [iteration counter (smi)] <- slot 0
1653 // [length of array] <- slot 1 1655 // [length of array] <- slot 1
1654 // [FixedArray] <- slot 2 1656 // [FixedArray] <- slot 2
1655 // [Map or 0] <- slot 3 1657 // [Map or 0] <- slot 3
1656 // [Object] <- slot 4 1658 // [Object] <- slot 4
1657 1659
1658 // Check if enumerable is already a JSObject 1660 // Check if enumerable is already a JSObject
1659 // rax: value to be iterated over 1661 // rax: value to be iterated over
1660 __ testl(rax, Immediate(kSmiTagMask)); 1662 Condition is_smi = masm_->CheckSmi(rax);
1661 primitive.Branch(zero); 1663 primitive.Branch(is_smi);
1662 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx); 1664 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
1663 jsobject.Branch(above_equal); 1665 jsobject.Branch(above_equal);
1664 1666
1665 primitive.Bind(); 1667 primitive.Bind();
1666 frame_->EmitPush(rax); 1668 frame_->EmitPush(rax);
1667 frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1); 1669 frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
1668 // function call returns the value in rax, which is where we want it below 1670 // function call returns the value in rax, which is where we want it below
1669 1671
1670 jsobject.Bind(); 1672 jsobject.Bind();
1671 // Get the set of properties (as a FixedArray or Map). 1673 // Get the set of properties (as a FixedArray or Map).
(...skipping 16 matching lines...) Expand all
1688 // rax: map (result from call to Runtime::kGetPropertyNamesFast) 1690 // rax: map (result from call to Runtime::kGetPropertyNamesFast)
1689 __ movq(rcx, rax); 1691 __ movq(rcx, rax);
1690 __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset)); 1692 __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset));
1691 // Get the bridge array held in the enumeration index field. 1693 // Get the bridge array held in the enumeration index field.
1692 __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset)); 1694 __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
1693 // Get the cache from the bridge array. 1695 // Get the cache from the bridge array.
1694 __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset)); 1696 __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
1695 1697
1696 frame_->EmitPush(rax); // <- slot 3 1698 frame_->EmitPush(rax); // <- slot 3
1697 frame_->EmitPush(rdx); // <- slot 2 1699 frame_->EmitPush(rdx); // <- slot 2
1698 __ movsxlq(rax, FieldOperand(rdx, FixedArray::kLengthOffset)); 1700 __ movl(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
1699 __ shl(rax, Immediate(kSmiTagSize)); 1701 __ Integer32ToSmi(rax, rax);
1700 frame_->EmitPush(rax); // <- slot 1 1702 frame_->EmitPush(rax); // <- slot 1
1701 frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0 1703 frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
1702 entry.Jump(); 1704 entry.Jump();
1703 1705
1704 fixed_array.Bind(); 1706 fixed_array.Bind();
1705 // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast) 1707 // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
1706 frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 3 1708 frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 3
1707 frame_->EmitPush(rax); // <- slot 2 1709 frame_->EmitPush(rax); // <- slot 2
1708 1710
1709 // Push the length of the array and the initial index onto the stack. 1711 // Push the length of the array and the initial index onto the stack.
1710 __ movsxlq(rax, FieldOperand(rax, FixedArray::kLengthOffset)); 1712 __ movl(rax, FieldOperand(rax, FixedArray::kLengthOffset));
1711 __ shl(rax, Immediate(kSmiTagSize)); 1713 __ Integer32ToSmi(rax, rax);
1712 frame_->EmitPush(rax); // <- slot 1 1714 frame_->EmitPush(rax); // <- slot 1
1713 frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0 1715 frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
1714 1716
1715 // Condition. 1717 // Condition.
1716 entry.Bind(); 1718 entry.Bind();
1717 // Grab the current frame's height for the break and continue 1719 // Grab the current frame's height for the break and continue
1718 // targets only after all the state is pushed on the frame. 1720 // targets only after all the state is pushed on the frame.
1719 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); 1721 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
1720 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY); 1722 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
1721 1723
1722 __ movq(rax, frame_->ElementAt(0)); // load the current count 1724 __ movq(rax, frame_->ElementAt(0)); // load the current count
1723 __ cmpl(rax, frame_->ElementAt(1)); // compare to the array length 1725 __ cmpl(rax, frame_->ElementAt(1)); // compare to the array length
1724 node->break_target()->Branch(above_equal); 1726 node->break_target()->Branch(above_equal);
1725 1727
1726 // Get the i'th entry of the array. 1728 // Get the i'th entry of the array.
1727 __ movq(rdx, frame_->ElementAt(2)); 1729 __ movq(rdx, frame_->ElementAt(2));
1730 // TODO(smi): Find a way to abstract indexing by a smi value.
1728 ASSERT(kSmiTagSize == 1 && kSmiTag == 0); 1731 ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
1729 // Multiplier is times_4 since rax is already a Smi. 1732 // Multiplier is times_4 since rax is already a Smi.
1730 __ movq(rbx, FieldOperand(rdx, rax, times_4, FixedArray::kHeaderSize)); 1733 __ movq(rbx, FieldOperand(rdx, rax, times_4, FixedArray::kHeaderSize));
1731 1734
1732 // Get the expected map from the stack or a zero map in the 1735 // Get the expected map from the stack or a zero map in the
1733 // permanent slow case rax: current iteration count rbx: i'th entry 1736 // permanent slow case rax: current iteration count rbx: i'th entry
1734 // of the enum cache 1737 // of the enum cache
1735 __ movq(rdx, frame_->ElementAt(3)); 1738 __ movq(rdx, frame_->ElementAt(3));
1736 // Check if the expected map still matches that of the enumerable. 1739 // Check if the expected map still matches that of the enumerable.
1737 // If not, we have to filter the key. 1740 // If not, we have to filter the key.
(...skipping 1348 matching lines...) Expand 10 before | Expand all | Expand 10 after
3086 frame_->Push(&answer); 3089 frame_->Push(&answer);
3087 break; 3090 break;
3088 } 3091 }
3089 3092
3090 case Token::BIT_NOT: { 3093 case Token::BIT_NOT: {
3091 // Smi check. 3094 // Smi check.
3092 JumpTarget smi_label; 3095 JumpTarget smi_label;
3093 JumpTarget continue_label; 3096 JumpTarget continue_label;
3094 Result operand = frame_->Pop(); 3097 Result operand = frame_->Pop();
3095 operand.ToRegister(); 3098 operand.ToRegister();
3096 __ testl(operand.reg(), Immediate(kSmiTagMask)); 3099
3097 smi_label.Branch(zero, &operand); 3100 Condition is_smi = masm_->CheckSmi(operand.reg());
3101 smi_label.Branch(is_smi, &operand);
3098 3102
3099 frame_->Push(&operand); // undo popping of TOS 3103 frame_->Push(&operand); // undo popping of TOS
3100 Result answer = frame_->InvokeBuiltin(Builtins::BIT_NOT, 3104 Result answer = frame_->InvokeBuiltin(Builtins::BIT_NOT,
3101 CALL_FUNCTION, 1); 3105 CALL_FUNCTION, 1);
3102 continue_label.Jump(&answer); 3106 continue_label.Jump(&answer);
3103 smi_label.Bind(&answer); 3107 smi_label.Bind(&answer);
3104 answer.ToRegister(); 3108 answer.ToRegister();
3105 frame_->Spill(answer.reg()); 3109 frame_->Spill(answer.reg());
3106 __ not_(answer.reg()); 3110 __ SmiNot(answer.reg(), answer.reg());
3107 // Remove inverted smi-tag. The mask is sign-extended to 64 bits.
3108 __ xor_(answer.reg(), Immediate(kSmiTagMask));
3109 continue_label.Bind(&answer); 3111 continue_label.Bind(&answer);
3110 frame_->Push(&answer); 3112 frame_->Push(&answer);
3111 break; 3113 break;
3112 } 3114 }
3113 3115
3114 case Token::ADD: { 3116 case Token::ADD: {
3115 // Smi check. 3117 // Smi check.
3116 JumpTarget continue_label; 3118 JumpTarget continue_label;
3117 Result operand = frame_->Pop(); 3119 Result operand = frame_->Pop();
3118 operand.ToRegister(); 3120 operand.ToRegister();
3119 __ testl(operand.reg(), Immediate(kSmiTagMask)); 3121 Condition is_smi = masm_->CheckSmi(operand.reg());
3120 continue_label.Branch(zero, &operand, taken); 3122 continue_label.Branch(is_smi, &operand);
3121
3122 frame_->Push(&operand); 3123 frame_->Push(&operand);
3123 Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER, 3124 Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
3124 CALL_FUNCTION, 1); 3125 CALL_FUNCTION, 1);
3125 3126
3126 continue_label.Bind(&answer); 3127 continue_label.Bind(&answer);
3127 frame_->Push(&answer); 3128 frame_->Push(&answer);
3128 break; 3129 break;
3129 } 3130 }
3130 3131
3131 default: 3132 default:
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
3257 } 3258 }
3258 3259
3259 __ movq(kScratchRegister, new_value.reg()); 3260 __ movq(kScratchRegister, new_value.reg());
3260 if (is_increment) { 3261 if (is_increment) {
3261 __ addl(kScratchRegister, Immediate(Smi::FromInt(1))); 3262 __ addl(kScratchRegister, Immediate(Smi::FromInt(1)));
3262 } else { 3263 } else {
3263 __ subl(kScratchRegister, Immediate(Smi::FromInt(1))); 3264 __ subl(kScratchRegister, Immediate(Smi::FromInt(1)));
3264 } 3265 }
3265 // Smi test. 3266 // Smi test.
3266 deferred->Branch(overflow); 3267 deferred->Branch(overflow);
3267 __ testl(kScratchRegister, Immediate(kSmiTagMask)); 3268 __ JumpIfNotSmi(kScratchRegister, deferred->entry_label());
3268 deferred->Branch(not_zero);
3269 __ movq(new_value.reg(), kScratchRegister); 3269 __ movq(new_value.reg(), kScratchRegister);
3270 deferred->BindExit(); 3270 deferred->BindExit();
3271 3271
3272 // Postfix: store the old value in the allocated slot under the 3272 // Postfix: store the old value in the allocated slot under the
3273 // reference. 3273 // reference.
3274 if (is_postfix) frame_->SetElementAt(target.size(), &old_value); 3274 if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
3275 3275
3276 frame_->Push(&new_value); 3276 frame_->Push(&new_value);
3277 // Non-constant: update the reference. 3277 // Non-constant: update the reference.
3278 if (!is_const) target.SetValue(NOT_CONST_INIT); 3278 if (!is_const) target.SetValue(NOT_CONST_INIT);
(...skipping 184 matching lines...) Expand 10 before | Expand all | Expand 10 after
3463 (right->AsLiteral() != NULL && 3463 (right->AsLiteral() != NULL &&
3464 right->AsLiteral()->handle()->IsString())) { 3464 right->AsLiteral()->handle()->IsString())) {
3465 Handle<String> check(Handle<String>::cast(right->AsLiteral()->handle())); 3465 Handle<String> check(Handle<String>::cast(right->AsLiteral()->handle()));
3466 3466
3467 // Load the operand and move it to a register. 3467 // Load the operand and move it to a register.
3468 LoadTypeofExpression(operation->expression()); 3468 LoadTypeofExpression(operation->expression());
3469 Result answer = frame_->Pop(); 3469 Result answer = frame_->Pop();
3470 answer.ToRegister(); 3470 answer.ToRegister();
3471 3471
3472 if (check->Equals(Heap::number_symbol())) { 3472 if (check->Equals(Heap::number_symbol())) {
3473 __ testl(answer.reg(), Immediate(kSmiTagMask)); 3473 Condition is_smi = masm_->CheckSmi(answer.reg());
3474 destination()->true_target()->Branch(zero); 3474 destination()->true_target()->Branch(is_smi);
3475 frame_->Spill(answer.reg()); 3475 frame_->Spill(answer.reg());
3476 __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset)); 3476 __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
3477 __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex); 3477 __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex);
3478 answer.Unuse(); 3478 answer.Unuse();
3479 destination()->Split(equal); 3479 destination()->Split(equal);
3480 3480
3481 } else if (check->Equals(Heap::string_symbol())) { 3481 } else if (check->Equals(Heap::string_symbol())) {
3482 __ testl(answer.reg(), Immediate(kSmiTagMask)); 3482 Condition is_smi = masm_->CheckSmi(answer.reg());
3483 destination()->false_target()->Branch(zero); 3483 destination()->false_target()->Branch(is_smi);
3484 3484
3485 // It can be an undetectable string object. 3485 // It can be an undetectable string object.
3486 __ movq(kScratchRegister, 3486 __ movq(kScratchRegister,
3487 FieldOperand(answer.reg(), HeapObject::kMapOffset)); 3487 FieldOperand(answer.reg(), HeapObject::kMapOffset));
3488 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset), 3488 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
3489 Immediate(1 << Map::kIsUndetectable)); 3489 Immediate(1 << Map::kIsUndetectable));
3490 destination()->false_target()->Branch(not_zero); 3490 destination()->false_target()->Branch(not_zero);
3491 __ CmpInstanceType(kScratchRegister, FIRST_NONSTRING_TYPE); 3491 __ CmpInstanceType(kScratchRegister, FIRST_NONSTRING_TYPE);
3492 answer.Unuse(); 3492 answer.Unuse();
3493 destination()->Split(below); // Unsigned byte comparison needed. 3493 destination()->Split(below); // Unsigned byte comparison needed.
3494 3494
3495 } else if (check->Equals(Heap::boolean_symbol())) { 3495 } else if (check->Equals(Heap::boolean_symbol())) {
3496 __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex); 3496 __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex);
3497 destination()->true_target()->Branch(equal); 3497 destination()->true_target()->Branch(equal);
3498 __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex); 3498 __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex);
3499 answer.Unuse(); 3499 answer.Unuse();
3500 destination()->Split(equal); 3500 destination()->Split(equal);
3501 3501
3502 } else if (check->Equals(Heap::undefined_symbol())) { 3502 } else if (check->Equals(Heap::undefined_symbol())) {
3503 __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex); 3503 __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex);
3504 destination()->true_target()->Branch(equal); 3504 destination()->true_target()->Branch(equal);
3505 3505
3506 __ testl(answer.reg(), Immediate(kSmiTagMask)); 3506 Condition is_smi = masm_->CheckSmi(answer.reg());
3507 destination()->false_target()->Branch(zero); 3507 destination()->false_target()->Branch(is_smi);
3508 3508
3509 // It can be an undetectable object. 3509 // It can be an undetectable object.
3510 __ movq(kScratchRegister, 3510 __ movq(kScratchRegister,
3511 FieldOperand(answer.reg(), HeapObject::kMapOffset)); 3511 FieldOperand(answer.reg(), HeapObject::kMapOffset));
3512 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset), 3512 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
3513 Immediate(1 << Map::kIsUndetectable)); 3513 Immediate(1 << Map::kIsUndetectable));
3514 answer.Unuse(); 3514 answer.Unuse();
3515 destination()->Split(not_zero); 3515 destination()->Split(not_zero);
3516 3516
3517 } else if (check->Equals(Heap::function_symbol())) { 3517 } else if (check->Equals(Heap::function_symbol())) {
3518 __ testl(answer.reg(), Immediate(kSmiTagMask)); 3518 Condition is_smi = masm_->CheckSmi(answer.reg());
3519 destination()->false_target()->Branch(zero); 3519 destination()->false_target()->Branch(is_smi);
3520 frame_->Spill(answer.reg()); 3520 frame_->Spill(answer.reg());
3521 __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg()); 3521 __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
3522 answer.Unuse(); 3522 answer.Unuse();
3523 destination()->Split(equal); 3523 destination()->Split(equal);
3524 3524
3525 } else if (check->Equals(Heap::object_symbol())) { 3525 } else if (check->Equals(Heap::object_symbol())) {
3526 __ testl(answer.reg(), Immediate(kSmiTagMask)); 3526 Condition is_smi = masm_->CheckSmi(answer.reg());
3527 destination()->false_target()->Branch(zero); 3527 destination()->false_target()->Branch(is_smi);
3528 __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex); 3528 __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
3529 destination()->true_target()->Branch(equal); 3529 destination()->true_target()->Branch(equal);
3530 3530
3531 // It can be an undetectable object. 3531 // It can be an undetectable object.
3532 __ movq(kScratchRegister, 3532 __ movq(kScratchRegister,
3533 FieldOperand(answer.reg(), HeapObject::kMapOffset)); 3533 FieldOperand(answer.reg(), HeapObject::kMapOffset));
3534 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset), 3534 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
3535 Immediate(1 << Map::kIsUndetectable)); 3535 Immediate(1 << Map::kIsUndetectable));
3536 destination()->false_target()->Branch(not_zero); 3536 destination()->false_target()->Branch(not_zero);
3537 __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE); 3537 __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
3616 frame_->Push(&result); 3616 frame_->Push(&result);
3617 } 3617 }
3618 3618
3619 3619
3620 void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) { 3620 void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
3621 ASSERT(args->length() == 1); 3621 ASSERT(args->length() == 1);
3622 Load(args->at(0)); 3622 Load(args->at(0));
3623 Result value = frame_->Pop(); 3623 Result value = frame_->Pop();
3624 value.ToRegister(); 3624 value.ToRegister();
3625 ASSERT(value.is_valid()); 3625 ASSERT(value.is_valid());
3626 __ testl(value.reg(), Immediate(kSmiTagMask)); 3626 Condition is_smi = masm_->CheckSmi(value.reg());
3627 destination()->false_target()->Branch(equal); 3627 destination()->false_target()->Branch(is_smi);
3628 // It is a heap object - get map. 3628 // It is a heap object - get map.
3629 // Check if the object is a JS array or not. 3629 // Check if the object is a JS array or not.
3630 __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister); 3630 __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister);
3631 value.Unuse(); 3631 value.Unuse();
3632 destination()->Split(equal); 3632 destination()->Split(equal);
3633 } 3633 }
3634 3634
3635 3635
3636 void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) { 3636 void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
3637 ASSERT(args->length() == 0); 3637 ASSERT(args->length() == 0);
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
3720 frame_->Spill(index.reg()); 3720 frame_->Spill(index.reg());
3721 3721
3722 // We need a single extra temporary register. 3722 // We need a single extra temporary register.
3723 Result temp = allocator()->Allocate(); 3723 Result temp = allocator()->Allocate();
3724 ASSERT(temp.is_valid()); 3724 ASSERT(temp.is_valid());
3725 3725
3726 // There is no virtual frame effect from here up to the final result 3726 // There is no virtual frame effect from here up to the final result
3727 // push. 3727 // push.
3728 3728
3729 // If the receiver is a smi trigger the slow case. 3729 // If the receiver is a smi trigger the slow case.
3730 ASSERT(kSmiTag == 0); 3730 __ JumpIfSmi(object.reg(), &slow_case);
3731 __ testl(object.reg(), Immediate(kSmiTagMask));
3732 __ j(zero, &slow_case);
3733 3731
3734 // If the index is negative or non-smi trigger the slow case. 3732 // If the index is negative or non-smi trigger the slow case.
3735 ASSERT(kSmiTag == 0); 3733 __ JumpIfNotPositiveSmi(index.reg(), &slow_case);
3736 __ testl(index.reg(), 3734
3737 Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000U)));
3738 __ j(not_zero, &slow_case);
3739 // Untag the index. 3735 // Untag the index.
3740 __ sarl(index.reg(), Immediate(kSmiTagSize)); 3736 __ SmiToInteger32(index.reg(), index.reg());
3741 3737
3742 __ bind(&try_again_with_new_string); 3738 __ bind(&try_again_with_new_string);
3743 // Fetch the instance type of the receiver into rcx. 3739 // Fetch the instance type of the receiver into rcx.
3744 __ movq(rcx, FieldOperand(object.reg(), HeapObject::kMapOffset)); 3740 __ movq(rcx, FieldOperand(object.reg(), HeapObject::kMapOffset));
3745 __ movzxbl(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset)); 3741 __ movzxbl(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
3746 // If the receiver is not a string trigger the slow case. 3742 // If the receiver is not a string trigger the slow case.
3747 __ testb(rcx, Immediate(kIsNotStringMask)); 3743 __ testb(rcx, Immediate(kIsNotStringMask));
3748 __ j(not_zero, &slow_case); 3744 __ j(not_zero, &slow_case);
3749 3745
3750 // Here we make assumptions about the tag values and the shifts needed. 3746 // Here we make assumptions about the tag values and the shifts needed.
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
3783 __ jmp(&got_char_code); 3779 __ jmp(&got_char_code);
3784 3780
3785 // ASCII string. 3781 // ASCII string.
3786 __ bind(&ascii_string); 3782 __ bind(&ascii_string);
3787 // Load the byte into the temp register. 3783 // Load the byte into the temp register.
3788 __ movzxbl(temp.reg(), FieldOperand(object.reg(), 3784 __ movzxbl(temp.reg(), FieldOperand(object.reg(),
3789 index.reg(), 3785 index.reg(),
3790 times_1, 3786 times_1,
3791 SeqAsciiString::kHeaderSize)); 3787 SeqAsciiString::kHeaderSize));
3792 __ bind(&got_char_code); 3788 __ bind(&got_char_code);
3793 ASSERT(kSmiTag == 0); 3789 __ Integer32ToSmi(temp.reg(), temp.reg());
3794 __ shl(temp.reg(), Immediate(kSmiTagSize));
3795 __ jmp(&end); 3790 __ jmp(&end);
3796 3791
3797 // Handle non-flat strings. 3792 // Handle non-flat strings.
3798 __ bind(&not_a_flat_string); 3793 __ bind(&not_a_flat_string);
3799 __ and_(temp.reg(), Immediate(kStringRepresentationMask)); 3794 __ and_(temp.reg(), Immediate(kStringRepresentationMask));
3800 __ cmpb(temp.reg(), Immediate(kConsStringTag)); 3795 __ cmpb(temp.reg(), Immediate(kConsStringTag));
3801 __ j(equal, &a_cons_string); 3796 __ j(equal, &a_cons_string);
3802 __ cmpb(temp.reg(), Immediate(kSlicedStringTag)); 3797 __ cmpb(temp.reg(), Immediate(kSlicedStringTag));
3803 __ j(not_equal, &slow_case); 3798 __ j(not_equal, &slow_case);
3804 3799
(...skipping 20 matching lines...) Expand all
3825 frame_->Push(&temp); 3820 frame_->Push(&temp);
3826 } 3821 }
3827 3822
3828 3823
3829 void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) { 3824 void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
3830 ASSERT(args->length() == 1); 3825 ASSERT(args->length() == 1);
3831 Load(args->at(0)); 3826 Load(args->at(0));
3832 Result value = frame_->Pop(); 3827 Result value = frame_->Pop();
3833 value.ToRegister(); 3828 value.ToRegister();
3834 ASSERT(value.is_valid()); 3829 ASSERT(value.is_valid());
3835 __ testl(value.reg(), 3830 Condition positive_smi = masm_->CheckPositiveSmi(value.reg());
3836 Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000U)));
3837 value.Unuse(); 3831 value.Unuse();
3838 destination()->Split(zero); 3832 destination()->Split(positive_smi);
3839 } 3833 }
3840 3834
3841 3835
3842 void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) { 3836 void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
3843 ASSERT(args->length() == 1); 3837 ASSERT(args->length() == 1);
3844 Load(args->at(0)); 3838 Load(args->at(0));
3845 Result value = frame_->Pop(); 3839 Result value = frame_->Pop();
3846 value.ToRegister(); 3840 value.ToRegister();
3847 ASSERT(value.is_valid()); 3841 ASSERT(value.is_valid());
3848 __ testl(value.reg(), Immediate(kSmiTagMask)); 3842 Condition is_smi = masm_->CheckSmi(value.reg());
3849 value.Unuse(); 3843 value.Unuse();
3850 destination()->Split(zero); 3844 destination()->Split(is_smi);
3851 } 3845 }
3852 3846
3853 3847
3854 void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) { 3848 void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
3855 // Conditionally generate a log call. 3849 // Conditionally generate a log call.
3856 // Args: 3850 // Args:
3857 // 0 (literal string): The type of logging (corresponds to the flags). 3851 // 0 (literal string): The type of logging (corresponds to the flags).
3858 // This is used to determine whether or not to generate the log call. 3852 // This is used to determine whether or not to generate the log call.
3859 // 1 (string): Format string. Access the string at argument index 2 3853 // 1 (string): Format string. Access the string at argument index 2
3860 // with '%2s' (see Logger::LogRuntime for all the formats). 3854 // with '%2s' (see Logger::LogRuntime for all the formats).
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after
3995 3989
3996 void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) { 3990 void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
3997 ASSERT(args->length() == 1); 3991 ASSERT(args->length() == 1);
3998 JumpTarget leave, null, function, non_function_constructor; 3992 JumpTarget leave, null, function, non_function_constructor;
3999 Load(args->at(0)); // Load the object. 3993 Load(args->at(0)); // Load the object.
4000 Result obj = frame_->Pop(); 3994 Result obj = frame_->Pop();
4001 obj.ToRegister(); 3995 obj.ToRegister();
4002 frame_->Spill(obj.reg()); 3996 frame_->Spill(obj.reg());
4003 3997
4004 // If the object is a smi, we return null. 3998 // If the object is a smi, we return null.
4005 __ testl(obj.reg(), Immediate(kSmiTagMask)); 3999 Condition is_smi = masm_->CheckSmi(obj.reg());
4006 null.Branch(zero); 4000 null.Branch(is_smi);
4007 4001
4008 // Check that the object is a JS object but take special care of JS 4002 // Check that the object is a JS object but take special care of JS
4009 // functions to make sure they have 'Function' as their class. 4003 // functions to make sure they have 'Function' as their class.
4010 4004
4011 __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg()); 4005 __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
4012 null.Branch(below); 4006 null.Branch(below);
4013 4007
4014 // As long as JS_FUNCTION_TYPE is the last instance type and it is 4008 // As long as JS_FUNCTION_TYPE is the last instance type and it is
4015 // right after LAST_JS_OBJECT_TYPE, we can avoid checking for 4009 // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
4016 // LAST_JS_OBJECT_TYPE. 4010 // LAST_JS_OBJECT_TYPE.
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
4057 ASSERT(args->length() == 2); 4051 ASSERT(args->length() == 2);
4058 JumpTarget leave; 4052 JumpTarget leave;
4059 Load(args->at(0)); // Load the object. 4053 Load(args->at(0)); // Load the object.
4060 Load(args->at(1)); // Load the value. 4054 Load(args->at(1)); // Load the value.
4061 Result value = frame_->Pop(); 4055 Result value = frame_->Pop();
4062 Result object = frame_->Pop(); 4056 Result object = frame_->Pop();
4063 value.ToRegister(); 4057 value.ToRegister();
4064 object.ToRegister(); 4058 object.ToRegister();
4065 4059
4066 // if (object->IsSmi()) return value. 4060 // if (object->IsSmi()) return value.
4067 __ testl(object.reg(), Immediate(kSmiTagMask)); 4061 Condition is_smi = masm_->CheckSmi(object.reg());
4068 leave.Branch(zero, &value); 4062 leave.Branch(is_smi, &value);
4069 4063
4070 // It is a heap object - get its map. 4064 // It is a heap object - get its map.
4071 Result scratch = allocator_->Allocate(); 4065 Result scratch = allocator_->Allocate();
4072 ASSERT(scratch.is_valid()); 4066 ASSERT(scratch.is_valid());
4073 // if (!object->IsJSValue()) return value. 4067 // if (!object->IsJSValue()) return value.
4074 __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg()); 4068 __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
4075 leave.Branch(not_equal, &value); 4069 leave.Branch(not_equal, &value);
4076 4070
4077 // Store the value. 4071 // Store the value.
4078 __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg()); 4072 __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
(...skipping 19 matching lines...) Expand all
4098 4092
4099 void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) { 4093 void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
4100 ASSERT(args->length() == 1); 4094 ASSERT(args->length() == 1);
4101 JumpTarget leave; 4095 JumpTarget leave;
4102 Load(args->at(0)); // Load the object. 4096 Load(args->at(0)); // Load the object.
4103 frame_->Dup(); 4097 frame_->Dup();
4104 Result object = frame_->Pop(); 4098 Result object = frame_->Pop();
4105 object.ToRegister(); 4099 object.ToRegister();
4106 ASSERT(object.is_valid()); 4100 ASSERT(object.is_valid());
4107 // if (object->IsSmi()) return object. 4101 // if (object->IsSmi()) return object.
4108 __ testl(object.reg(), Immediate(kSmiTagMask)); 4102 Condition is_smi = masm_->CheckSmi(object.reg());
4109 leave.Branch(zero); 4103 leave.Branch(is_smi);
4110 // It is a heap object - get map. 4104 // It is a heap object - get map.
4111 Result temp = allocator()->Allocate(); 4105 Result temp = allocator()->Allocate();
4112 ASSERT(temp.is_valid()); 4106 ASSERT(temp.is_valid());
4113 // if (!object->IsJSValue()) return object. 4107 // if (!object->IsJSValue()) return object.
4114 __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg()); 4108 __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
4115 leave.Branch(not_equal); 4109 leave.Branch(not_equal);
4116 __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset)); 4110 __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
4117 object.Unuse(); 4111 object.Unuse();
4118 frame_->SetElementAt(0, &temp); 4112 frame_->SetElementAt(0, &temp);
4119 leave.Bind(); 4113 leave.Bind();
(...skipping 147 matching lines...) Expand 10 before | Expand all | Expand 10 after
4267 4261
4268 // 'true' => true. 4262 // 'true' => true.
4269 __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex); 4263 __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex);
4270 dest->true_target()->Branch(equal); 4264 dest->true_target()->Branch(equal);
4271 4265
4272 // 'undefined' => false. 4266 // 'undefined' => false.
4273 __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex); 4267 __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex);
4274 dest->false_target()->Branch(equal); 4268 dest->false_target()->Branch(equal);
4275 4269
4276 // Smi => false iff zero. 4270 // Smi => false iff zero.
4277 ASSERT(kSmiTag == 0); 4271 Condition equals = masm_->CheckSmiEqualsConstant(value.reg(), 0);
4278 __ testl(value.reg(), value.reg()); 4272 dest->false_target()->Branch(equals);
4279 dest->false_target()->Branch(zero); 4273 Condition is_smi = masm_->CheckSmi(value.reg());
4280 __ testl(value.reg(), Immediate(kSmiTagMask)); 4274 dest->true_target()->Branch(is_smi);
4281 dest->true_target()->Branch(zero);
4282 4275
4283 // Call the stub for all other cases. 4276 // Call the stub for all other cases.
4284 frame_->Push(&value); // Undo the Pop() from above. 4277 frame_->Push(&value); // Undo the Pop() from above.
4285 ToBooleanStub stub; 4278 ToBooleanStub stub;
4286 Result temp = frame_->CallStub(&stub, 1); 4279 Result temp = frame_->CallStub(&stub, 1);
4287 // Convert the result to a condition code. 4280 // Convert the result to a condition code.
4288 __ testq(temp.reg(), temp.reg()); 4281 __ testq(temp.reg(), temp.reg());
4289 temp.Unuse(); 4282 temp.Unuse();
4290 dest->Split(not_equal); 4283 dest->Split(not_equal);
4291 } 4284 }
(...skipping 641 matching lines...) Expand 10 before | Expand all | Expand 10 after
4933 left_side.ToRegister(); 4926 left_side.ToRegister();
4934 4927
4935 // Here we split control flow to the stub call and inlined cases 4928 // Here we split control flow to the stub call and inlined cases
4936 // before finally splitting it to the control destination. We use 4929 // before finally splitting it to the control destination. We use
4937 // a jump target and branching to duplicate the virtual frame at 4930 // a jump target and branching to duplicate the virtual frame at
4938 // the first split. We manually handle the off-frame references 4931 // the first split. We manually handle the off-frame references
4939 // by reconstituting them on the non-fall-through path. 4932 // by reconstituting them on the non-fall-through path.
4940 JumpTarget is_smi; 4933 JumpTarget is_smi;
4941 Register left_reg = left_side.reg(); 4934 Register left_reg = left_side.reg();
4942 Handle<Object> right_val = right_side.handle(); 4935 Handle<Object> right_val = right_side.handle();
4943 __ testl(left_side.reg(), Immediate(kSmiTagMask)); 4936
4944 is_smi.Branch(zero, taken); 4937 Condition left_is_smi = masm_->CheckSmi(left_side.reg());
4938 is_smi.Branch(left_is_smi);
4945 4939
4946 // Setup and call the compare stub. 4940 // Setup and call the compare stub.
4947 CompareStub stub(cc, strict); 4941 CompareStub stub(cc, strict);
4948 Result result = frame_->CallStub(&stub, &left_side, &right_side); 4942 Result result = frame_->CallStub(&stub, &left_side, &right_side);
4949 result.ToRegister(); 4943 result.ToRegister();
4950 __ testq(result.reg(), result.reg()); 4944 __ testq(result.reg(), result.reg());
4951 result.Unuse(); 4945 result.Unuse();
4952 dest->true_target()->Branch(cc); 4946 dest->true_target()->Branch(cc);
4953 dest->false_target()->Jump(); 4947 dest->false_target()->Jump();
4954 4948
(...skipping 20 matching lines...) Expand all
4975 __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex); 4969 __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex);
4976 if (strict) { 4970 if (strict) {
4977 operand.Unuse(); 4971 operand.Unuse();
4978 dest->Split(equal); 4972 dest->Split(equal);
4979 } else { 4973 } else {
4980 // The 'null' value is only equal to 'undefined' if using non-strict 4974 // The 'null' value is only equal to 'undefined' if using non-strict
4981 // comparisons. 4975 // comparisons.
4982 dest->true_target()->Branch(equal); 4976 dest->true_target()->Branch(equal);
4983 __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex); 4977 __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
4984 dest->true_target()->Branch(equal); 4978 dest->true_target()->Branch(equal);
4985 __ testl(operand.reg(), Immediate(kSmiTagMask)); 4979 Condition is_smi = masm_->CheckSmi(operand.reg());
4986 dest->false_target()->Branch(equal); 4980 dest->false_target()->Branch(is_smi);
4987 4981
4988 // It can be an undetectable object. 4982 // It can be an undetectable object.
4989 // Use a scratch register in preference to spilling operand.reg(). 4983 // Use a scratch register in preference to spilling operand.reg().
4990 Result temp = allocator()->Allocate(); 4984 Result temp = allocator()->Allocate();
4991 ASSERT(temp.is_valid()); 4985 ASSERT(temp.is_valid());
4992 __ movq(temp.reg(), 4986 __ movq(temp.reg(),
4993 FieldOperand(operand.reg(), HeapObject::kMapOffset)); 4987 FieldOperand(operand.reg(), HeapObject::kMapOffset));
4994 __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset), 4988 __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
4995 Immediate(1 << Map::kIsUndetectable)); 4989 Immediate(1 << Map::kIsUndetectable));
4996 temp.Unuse(); 4990 temp.Unuse();
(...skipping 19 matching lines...) Expand all
5016 } else { 5010 } else {
5017 // Here we split control flow to the stub call and inlined cases 5011 // Here we split control flow to the stub call and inlined cases
5018 // before finally splitting it to the control destination. We use 5012 // before finally splitting it to the control destination. We use
5019 // a jump target and branching to duplicate the virtual frame at 5013 // a jump target and branching to duplicate the virtual frame at
5020 // the first split. We manually handle the off-frame references 5014 // the first split. We manually handle the off-frame references
5021 // by reconstituting them on the non-fall-through path. 5015 // by reconstituting them on the non-fall-through path.
5022 JumpTarget is_smi; 5016 JumpTarget is_smi;
5023 Register left_reg = left_side.reg(); 5017 Register left_reg = left_side.reg();
5024 Register right_reg = right_side.reg(); 5018 Register right_reg = right_side.reg();
5025 5019
5026 __ movq(kScratchRegister, left_reg); 5020 Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg);
5027 __ or_(kScratchRegister, right_reg); 5021 is_smi.Branch(both_smi);
5028 __ testl(kScratchRegister, Immediate(kSmiTagMask));
5029 is_smi.Branch(zero, taken);
5030 // When non-smi, call out to the compare stub. 5022 // When non-smi, call out to the compare stub.
5031 CompareStub stub(cc, strict); 5023 CompareStub stub(cc, strict);
5032 Result answer = frame_->CallStub(&stub, &left_side, &right_side); 5024 Result answer = frame_->CallStub(&stub, &left_side, &right_side);
5033 __ testl(answer.reg(), answer.reg()); // Sets both zero and sign flags. 5025 __ testl(answer.reg(), answer.reg()); // Sets both zero and sign flags.
5034 answer.Unuse(); 5026 answer.Unuse();
5035 dest->true_target()->Branch(cc); 5027 dest->true_target()->Branch(cc);
5036 dest->false_target()->Jump(); 5028 dest->false_target()->Jump();
5037 5029
5038 is_smi.Bind(); 5030 is_smi.Bind();
5039 left_side = Result(left_reg); 5031 left_side = Result(left_reg);
(...skipping 270 matching lines...) Expand 10 before | Expand all | Expand 10 after
5310 DeferredCode* deferred = NULL; 5302 DeferredCode* deferred = NULL;
5311 if (reversed) { 5303 if (reversed) {
5312 deferred = new DeferredInlineSmiAddReversed(operand->reg(), 5304 deferred = new DeferredInlineSmiAddReversed(operand->reg(),
5313 smi_value, 5305 smi_value,
5314 overwrite_mode); 5306 overwrite_mode);
5315 } else { 5307 } else {
5316 deferred = new DeferredInlineSmiAdd(operand->reg(), 5308 deferred = new DeferredInlineSmiAdd(operand->reg(),
5317 smi_value, 5309 smi_value,
5318 overwrite_mode); 5310 overwrite_mode);
5319 } 5311 }
5320 __ testl(operand->reg(), Immediate(kSmiTagMask)); 5312 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5321 deferred->Branch(not_zero); 5313 __ SmiAddConstant(operand->reg(),
5322 // A smi currently fits in a 32-bit Immediate. 5314 operand->reg(),
5323 __ addl(operand->reg(), Immediate(smi_value)); 5315 int_value,
5324 Label add_success; 5316 deferred->entry_label());
5325 __ j(no_overflow, &add_success);
5326 __ subl(operand->reg(), Immediate(smi_value));
5327 deferred->Jump();
5328 __ bind(&add_success);
5329 deferred->BindExit(); 5317 deferred->BindExit();
5330 frame_->Push(operand); 5318 frame_->Push(operand);
5331 break; 5319 break;
5332 } 5320 }
5333 5321
5334 case Token::SUB: { 5322 case Token::SUB: {
5335 if (reversed) { 5323 if (reversed) {
5336 Result constant_operand(value); 5324 Result constant_operand(value);
5337 LikelySmiBinaryOperation(op, &constant_operand, operand, 5325 LikelySmiBinaryOperation(op, &constant_operand, operand,
5338 overwrite_mode); 5326 overwrite_mode);
5339 } else { 5327 } else {
5340 operand->ToRegister(); 5328 operand->ToRegister();
5341 frame_->Spill(operand->reg()); 5329 frame_->Spill(operand->reg());
5342 DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(), 5330 DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
5343 smi_value, 5331 smi_value,
5344 overwrite_mode); 5332 overwrite_mode);
5345 __ testl(operand->reg(), Immediate(kSmiTagMask)); 5333 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5346 deferred->Branch(not_zero);
5347 // A smi currently fits in a 32-bit Immediate. 5334 // A smi currently fits in a 32-bit Immediate.
5348 __ subl(operand->reg(), Immediate(smi_value)); 5335 __ SmiSubConstant(operand->reg(),
5349 Label add_success; 5336 operand->reg(),
5350 __ j(no_overflow, &add_success); 5337 int_value,
5351 __ addl(operand->reg(), Immediate(smi_value)); 5338 deferred->entry_label());
5352 deferred->Jump();
5353 __ bind(&add_success);
5354 deferred->BindExit(); 5339 deferred->BindExit();
5355 frame_->Push(operand); 5340 frame_->Push(operand);
5356 } 5341 }
5357 break; 5342 break;
5358 } 5343 }
5359 5344
5360 case Token::SAR: 5345 case Token::SAR:
5361 if (reversed) { 5346 if (reversed) {
5362 Result constant_operand(value); 5347 Result constant_operand(value);
5363 LikelySmiBinaryOperation(op, &constant_operand, operand, 5348 LikelySmiBinaryOperation(op, &constant_operand, operand,
5364 overwrite_mode); 5349 overwrite_mode);
5365 } else { 5350 } else {
5366 // Only the least significant 5 bits of the shift value are used. 5351 // Only the least significant 5 bits of the shift value are used.
5367 // In the slow case, this masking is done inside the runtime call. 5352 // In the slow case, this masking is done inside the runtime call.
5368 int shift_value = int_value & 0x1f; 5353 int shift_value = int_value & 0x1f;
5369 operand->ToRegister(); 5354 operand->ToRegister();
5370 frame_->Spill(operand->reg()); 5355 frame_->Spill(operand->reg());
5371 DeferredInlineSmiOperation* deferred = 5356 DeferredInlineSmiOperation* deferred =
5372 new DeferredInlineSmiOperation(op, 5357 new DeferredInlineSmiOperation(op,
5373 operand->reg(), 5358 operand->reg(),
5374 operand->reg(), 5359 operand->reg(),
5375 smi_value, 5360 smi_value,
5376 overwrite_mode); 5361 overwrite_mode);
5377 __ testl(operand->reg(), Immediate(kSmiTagMask)); 5362 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5378 deferred->Branch(not_zero); 5363 __ SmiShiftArithmeticRightConstant(operand->reg(),
5379 if (shift_value > 0) { 5364 operand->reg(),
5380 __ sarl(operand->reg(), Immediate(shift_value)); 5365 shift_value);
5381 __ and_(operand->reg(), Immediate(~kSmiTagMask));
5382 }
5383 deferred->BindExit(); 5366 deferred->BindExit();
5384 frame_->Push(operand); 5367 frame_->Push(operand);
5385 } 5368 }
5386 break; 5369 break;
5387 5370
5388 case Token::SHR: 5371 case Token::SHR:
5389 if (reversed) { 5372 if (reversed) {
5390 Result constant_operand(value); 5373 Result constant_operand(value);
5391 LikelySmiBinaryOperation(op, &constant_operand, operand, 5374 LikelySmiBinaryOperation(op, &constant_operand, operand,
5392 overwrite_mode); 5375 overwrite_mode);
5393 } else { 5376 } else {
5394 // Only the least significant 5 bits of the shift value are used. 5377 // Only the least significant 5 bits of the shift value are used.
5395 // In the slow case, this masking is done inside the runtime call. 5378 // In the slow case, this masking is done inside the runtime call.
5396 int shift_value = int_value & 0x1f; 5379 int shift_value = int_value & 0x1f;
5397 operand->ToRegister(); 5380 operand->ToRegister();
5398 Result answer = allocator()->Allocate(); 5381 Result answer = allocator()->Allocate();
5399 ASSERT(answer.is_valid()); 5382 ASSERT(answer.is_valid());
5400 DeferredInlineSmiOperation* deferred = 5383 DeferredInlineSmiOperation* deferred =
5401 new DeferredInlineSmiOperation(op, 5384 new DeferredInlineSmiOperation(op,
5402 answer.reg(), 5385 answer.reg(),
5403 operand->reg(), 5386 operand->reg(),
5404 smi_value, 5387 smi_value,
5405 overwrite_mode); 5388 overwrite_mode);
5406 __ testl(operand->reg(), Immediate(kSmiTagMask)); 5389 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5407 deferred->Branch(not_zero); 5390 __ SmiShiftLogicRightConstant(answer.reg(),
William Hesse 2009/09/10 11:13:22 ShiftLogicalRight
Lasse Reichstein 2009/09/10 12:28:11 Will rename LogicRight into LogicalRight.
5408 __ movl(answer.reg(), operand->reg()); 5391 operand->reg(),
5409 __ sarl(answer.reg(), Immediate(kSmiTagSize)); 5392 shift_value,
5410 __ shrl(answer.reg(), Immediate(shift_value)); 5393 deferred->entry_label());
5411 // A negative Smi shifted right two is in the positive Smi range. 5394 deferred->BindExit();
5412 if (shift_value < 2) {
5413 __ testl(answer.reg(), Immediate(0xc0000000));
5414 deferred->Branch(not_zero);
5415 }
5416 operand->Unuse(); 5395 operand->Unuse();
5417 ASSERT(kSmiTag == 0);
5418 ASSERT(kSmiTagSize == 1);
5419 __ addl(answer.reg(), answer.reg());
5420 deferred->BindExit();
5421 frame_->Push(&answer); 5396 frame_->Push(&answer);
5422 } 5397 }
5423 break; 5398 break;
5424 5399
5425 case Token::SHL: 5400 case Token::SHL:
5426 if (reversed) { 5401 if (reversed) {
5427 Result constant_operand(value); 5402 Result constant_operand(value);
5428 LikelySmiBinaryOperation(op, &constant_operand, operand, 5403 LikelySmiBinaryOperation(op, &constant_operand, operand,
5429 overwrite_mode); 5404 overwrite_mode);
5430 } else { 5405 } else {
5431 // Only the least significant 5 bits of the shift value are used. 5406 // Only the least significant 5 bits of the shift value are used.
5432 // In the slow case, this masking is done inside the runtime call. 5407 // In the slow case, this masking is done inside the runtime call.
5433 int shift_value = int_value & 0x1f; 5408 int shift_value = int_value & 0x1f;
5434 operand->ToRegister(); 5409 operand->ToRegister();
5435 if (shift_value == 0) { 5410 if (shift_value == 0) {
5436 // Spill operand so it can be overwritten in the slow case. 5411 // Spill operand so it can be overwritten in the slow case.
5437 frame_->Spill(operand->reg()); 5412 frame_->Spill(operand->reg());
5438 DeferredInlineSmiOperation* deferred = 5413 DeferredInlineSmiOperation* deferred =
5439 new DeferredInlineSmiOperation(op, 5414 new DeferredInlineSmiOperation(op,
5440 operand->reg(), 5415 operand->reg(),
5441 operand->reg(), 5416 operand->reg(),
5442 smi_value, 5417 smi_value,
5443 overwrite_mode); 5418 overwrite_mode);
5444 __ testl(operand->reg(), Immediate(kSmiTagMask)); 5419 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5445 deferred->Branch(not_zero);
5446 deferred->BindExit(); 5420 deferred->BindExit();
5447 frame_->Push(operand); 5421 frame_->Push(operand);
5448 } else { 5422 } else {
5449 // Use a fresh temporary for nonzero shift values. 5423 // Use a fresh temporary for nonzero shift values.
5450 Result answer = allocator()->Allocate(); 5424 Result answer = allocator()->Allocate();
5451 ASSERT(answer.is_valid()); 5425 ASSERT(answer.is_valid());
5452 DeferredInlineSmiOperation* deferred = 5426 DeferredInlineSmiOperation* deferred =
5453 new DeferredInlineSmiOperation(op, 5427 new DeferredInlineSmiOperation(op,
5454 answer.reg(), 5428 answer.reg(),
5455 operand->reg(), 5429 operand->reg(),
5456 smi_value, 5430 smi_value,
5457 overwrite_mode); 5431 overwrite_mode);
5458 __ testl(operand->reg(), Immediate(kSmiTagMask)); 5432 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5459 deferred->Branch(not_zero); 5433 __ SmiShiftLeftConstant(answer.reg(),
5460 __ movl(answer.reg(), operand->reg()); 5434 operand->reg(),
5461 ASSERT(kSmiTag == 0); // adjust code if not the case 5435 shift_value,
5462 // We do no shifts, only the Smi conversion, if shift_value is 1. 5436 deferred->entry_label());
5463 if (shift_value > 1) {
5464 __ shll(answer.reg(), Immediate(shift_value - 1));
5465 }
5466 // Convert int result to Smi, checking that it is in int range.
5467 ASSERT(kSmiTagSize == 1); // adjust code if not the case
5468 __ addl(answer.reg(), answer.reg());
5469 deferred->Branch(overflow);
5470 deferred->BindExit(); 5437 deferred->BindExit();
5471 operand->Unuse(); 5438 operand->Unuse();
5472 frame_->Push(&answer); 5439 frame_->Push(&answer);
5473 } 5440 }
5474 } 5441 }
5475 break; 5442 break;
5476 5443
5477 case Token::BIT_OR: 5444 case Token::BIT_OR:
5478 case Token::BIT_XOR: 5445 case Token::BIT_XOR:
5479 case Token::BIT_AND: { 5446 case Token::BIT_AND: {
5480 operand->ToRegister(); 5447 operand->ToRegister();
5481 frame_->Spill(operand->reg()); 5448 frame_->Spill(operand->reg());
5482 if (reversed) { 5449 if (reversed) {
5483 // Bit operations with a constant smi are commutative. 5450 // Bit operations with a constant smi are commutative.
5484 // We can swap left and right operands with no problem. 5451 // We can swap left and right operands with no problem.
5485 // Swap left and right overwrite modes. 0->0, 1->2, 2->1. 5452 // Swap left and right overwrite modes. 0->0, 1->2, 2->1.
5486 overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3); 5453 overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3);
5487 } 5454 }
5488 DeferredCode* deferred = new DeferredInlineSmiOperation(op, 5455 DeferredCode* deferred = new DeferredInlineSmiOperation(op,
5489 operand->reg(), 5456 operand->reg(),
5490 operand->reg(), 5457 operand->reg(),
5491 smi_value, 5458 smi_value,
5492 overwrite_mode); 5459 overwrite_mode);
5493 __ testl(operand->reg(), Immediate(kSmiTagMask)); 5460 __ JumpIfNotSmi(operand->reg(), deferred->entry_label());
5494 deferred->Branch(not_zero);
5495 if (op == Token::BIT_AND) { 5461 if (op == Token::BIT_AND) {
5496 __ and_(operand->reg(), Immediate(smi_value)); 5462 __ SmiAndConstant(operand->reg(), operand->reg(), int_value);
5497 } else if (op == Token::BIT_XOR) { 5463 } else if (op == Token::BIT_XOR) {
5498 if (int_value != 0) { 5464 if (int_value != 0) {
5499 __ xor_(operand->reg(), Immediate(smi_value)); 5465 __ SmiXorConstant(operand->reg(), operand->reg(), int_value);
5500 } 5466 }
5501 } else { 5467 } else {
5502 ASSERT(op == Token::BIT_OR); 5468 ASSERT(op == Token::BIT_OR);
5503 if (int_value != 0) { 5469 if (int_value != 0) {
5504 __ or_(operand->reg(), Immediate(smi_value)); 5470 __ SmiOrConstant(operand->reg(), operand->reg(), int_value);
5505 } 5471 }
5506 } 5472 }
5507 deferred->BindExit(); 5473 deferred->BindExit();
5508 frame_->Push(operand); 5474 frame_->Push(operand);
5509 break; 5475 break;
5510 } 5476 }
5511 5477
5512 // Generate inline code for mod of powers of 2 and negative powers of 2. 5478 // Generate inline code for mod of powers of 2 and negative powers of 2.
5513 case Token::MOD: 5479 case Token::MOD:
5514 if (!reversed && 5480 if (!reversed &&
5515 int_value != 0 && 5481 int_value != 0 &&
5516 (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) { 5482 (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
5517 operand->ToRegister(); 5483 operand->ToRegister();
5518 frame_->Spill(operand->reg()); 5484 frame_->Spill(operand->reg());
5519 DeferredCode* deferred = new DeferredInlineSmiOperation(op, 5485 DeferredCode* deferred = new DeferredInlineSmiOperation(op,
5520 operand->reg(), 5486 operand->reg(),
5521 operand->reg(), 5487 operand->reg(),
5522 smi_value, 5488 smi_value,
5523 overwrite_mode); 5489 overwrite_mode);
5524 // Check for negative or non-Smi left hand side. 5490 // Check for negative or non-Smi left hand side.
5525 __ testl(operand->reg(), 5491 __ JumpIfNotPositiveSmi(operand->reg(), deferred->entry_label());
5526 Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000)));
5527 deferred->Branch(not_zero);
5528 if (int_value < 0) int_value = -int_value; 5492 if (int_value < 0) int_value = -int_value;
5529 if (int_value == 1) { 5493 if (int_value == 1) {
5530 __ movl(operand->reg(), Immediate(Smi::FromInt(0))); 5494 __ movl(operand->reg(), Immediate(Smi::FromInt(0)));
5531 } else { 5495 } else {
5532 __ and_(operand->reg(), Immediate((int_value << kSmiTagSize) - 1)); 5496 __ SmiAndConstant(operand->reg(), operand->reg(), int_value - 1);
5533 } 5497 }
5534 deferred->BindExit(); 5498 deferred->BindExit();
5535 frame_->Push(operand); 5499 frame_->Push(operand);
5536 break; // This break only applies if we generated code for MOD. 5500 break; // This break only applies if we generated code for MOD.
5537 } 5501 }
5538 // Fall through if we did not find a power of 2 on the right hand side! 5502 // Fall through if we did not find a power of 2 on the right hand side!
5539 // The next case must be the default. 5503 // The next case must be the default.
5540 5504
5541 default: { 5505 default: {
5542 Result constant_operand(value); 5506 Result constant_operand(value);
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
5624 frame_->Spill(rax); 5588 frame_->Spill(rax);
5625 frame_->Spill(rdx); 5589 frame_->Spill(rdx);
5626 5590
5627 // Check that left and right are smi tagged. 5591 // Check that left and right are smi tagged.
5628 DeferredInlineBinaryOperation* deferred = 5592 DeferredInlineBinaryOperation* deferred =
5629 new DeferredInlineBinaryOperation(op, 5593 new DeferredInlineBinaryOperation(op,
5630 (op == Token::DIV) ? rax : rdx, 5594 (op == Token::DIV) ? rax : rdx,
5631 left->reg(), 5595 left->reg(),
5632 right->reg(), 5596 right->reg(),
5633 overwrite_mode); 5597 overwrite_mode);
5634 if (left->reg().is(right->reg())) { 5598 __ JumpIfNotBothSmi(left->reg(), right->reg(), deferred->entry_label());
5635 __ testl(left->reg(), Immediate(kSmiTagMask));
5636 } else {
5637 // Use the quotient register as a scratch for the tag check.
5638 if (!left_is_in_rax) __ movq(rax, left->reg());
5639 left_is_in_rax = false; // About to destroy the value in rax.
5640 __ or_(rax, right->reg());
5641 ASSERT(kSmiTag == 0); // Adjust test if not the case.
5642 __ testl(rax, Immediate(kSmiTagMask));
5643 }
5644 deferred->Branch(not_zero);
5645 5599
5646 // All operations on the smi values are on 32-bit registers, which are
5647 // zero-extended into 64-bits by all 32-bit operations.
5648 if (!left_is_in_rax) __ movl(rax, left->reg());
5649 // Sign extend eax into edx:eax.
5650 __ cdq();
5651 // Check for 0 divisor.
5652 __ testl(right->reg(), right->reg());
5653 deferred->Branch(zero);
5654 // Divide rdx:rax by the right operand.
5655 __ idivl(right->reg());
5656
5657 // Complete the operation.
5658 if (op == Token::DIV) { 5600 if (op == Token::DIV) {
5659 // Check for negative zero result. If the result is zero, and the 5601 __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label());
5660 // divisor is negative, return a floating point negative zero.
5661 Label non_zero_result;
5662 __ testl(left->reg(), left->reg());
5663 __ j(not_zero, &non_zero_result);
5664 __ testl(right->reg(), right->reg());
5665 deferred->Branch(negative);
5666 // The frame is identical on all paths reaching this label.
5667 __ bind(&non_zero_result);
5668 // Check for the corner case of dividing the most negative smi by
5669 // -1. We cannot use the overflow flag, since it is not set by
5670 // idiv instruction.
5671 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
5672 __ cmpl(rax, Immediate(0x40000000));
5673 deferred->Branch(equal);
5674 // Check that the remainder is zero.
5675 __ testl(rdx, rdx);
5676 deferred->Branch(not_zero);
5677 // Tag the result and store it in the quotient register.
5678 ASSERT(kSmiTagSize == times_2); // adjust code if not the case
5679 __ lea(rax, Operand(rax, rax, times_1, kSmiTag));
5680 deferred->BindExit(); 5602 deferred->BindExit();
5681 left->Unuse(); 5603 left->Unuse();
5682 right->Unuse(); 5604 right->Unuse();
5683 frame_->Push(&quotient); 5605 frame_->Push(&quotient);
5684 } else { 5606 } else {
5685 ASSERT(op == Token::MOD); 5607 ASSERT(op == Token::MOD);
5686 // Check for a negative zero result. If the result is zero, and the 5608 __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
5687 // dividend is negative, return a floating point negative zero.
5688 Label non_zero_result;
5689 __ testl(rdx, rdx);
5690 __ j(not_zero, &non_zero_result);
5691 __ testl(left->reg(), left->reg());
5692 deferred->Branch(negative);
5693 // The frame is identical on all paths reaching this label.
5694 __ bind(&non_zero_result);
5695 deferred->BindExit(); 5609 deferred->BindExit();
5696 left->Unuse(); 5610 left->Unuse();
5697 right->Unuse(); 5611 right->Unuse();
5698 frame_->Push(&remainder); 5612 frame_->Push(&remainder);
5699 } 5613 }
5700 return; 5614 return;
5701 } 5615 }
5702 5616
5703 // Special handling of shift operations because they use fixed 5617 // Special handling of shift operations because they use fixed
5704 // registers. 5618 // registers.
(...skipping 18 matching lines...) Expand all
5723 // Check that both operands are smis using the answer register as a 5637 // Check that both operands are smis using the answer register as a
5724 // temporary. 5638 // temporary.
5725 DeferredInlineBinaryOperation* deferred = 5639 DeferredInlineBinaryOperation* deferred =
5726 new DeferredInlineBinaryOperation(op, 5640 new DeferredInlineBinaryOperation(op,
5727 answer.reg(), 5641 answer.reg(),
5728 left->reg(), 5642 left->reg(),
5729 rcx, 5643 rcx,
5730 overwrite_mode); 5644 overwrite_mode);
5731 __ movq(answer.reg(), left->reg()); 5645 __ movq(answer.reg(), left->reg());
5732 __ or_(answer.reg(), rcx); 5646 __ or_(answer.reg(), rcx);
5733 __ testl(answer.reg(), Immediate(kSmiTagMask)); 5647 __ JumpIfNotSmi(answer.reg(), deferred->entry_label());
5734 deferred->Branch(not_zero);
5735 5648
5736 // Untag both operands.
5737 __ movl(answer.reg(), left->reg());
5738 __ sarl(answer.reg(), Immediate(kSmiTagSize));
5739 __ sarl(rcx, Immediate(kSmiTagSize));
5740 // Perform the operation. 5649 // Perform the operation.
5741 switch (op) { 5650 switch (op) {
5742 case Token::SAR: 5651 case Token::SAR:
5743 __ sarl(answer.reg()); 5652 __ SmiShiftArithmeticRight(answer.reg(), left->reg(), rcx);
5744 // No checks of result necessary
5745 break; 5653 break;
5746 case Token::SHR: { 5654 case Token::SHR: {
5747 Label result_ok; 5655 __ SmiShiftLogicRight(answer.reg(),
5748 __ shrl(answer.reg()); 5656 left->reg(),
5749 // Check that the *unsigned* result fits in a smi. Neither of 5657 rcx,
5750 // the two high-order bits can be set: 5658 deferred->entry_label());
5751 // * 0x80000000: high bit would be lost when smi tagging.
5752 // * 0x40000000: this number would convert to negative when smi
5753 // tagging.
5754 // These two cases can only happen with shifts by 0 or 1 when
5755 // handed a valid smi. If the answer cannot be represented by a
5756 // smi, restore the left and right arguments, and jump to slow
5757 // case. The low bit of the left argument may be lost, but only
5758 // in a case where it is dropped anyway.
5759 __ testl(answer.reg(), Immediate(0xc0000000));
5760 __ j(zero, &result_ok);
5761 ASSERT(kSmiTag == 0);
5762 __ shl(rcx, Immediate(kSmiTagSize));
5763 deferred->Jump();
5764 __ bind(&result_ok);
5765 break; 5659 break;
5766 } 5660 }
5767 case Token::SHL: { 5661 case Token::SHL: {
5768 Label result_ok; 5662 __ SmiShiftLeft(answer.reg(),
5769 __ shl(answer.reg()); 5663 left->reg(),
5770 // Check that the *signed* result fits in a smi. 5664 rcx,
5771 __ cmpl(answer.reg(), Immediate(0xc0000000)); 5665 deferred->entry_label());
5772 __ j(positive, &result_ok);
5773 ASSERT(kSmiTag == 0);
5774 __ shl(rcx, Immediate(kSmiTagSize));
5775 deferred->Jump();
5776 __ bind(&result_ok);
5777 break; 5666 break;
5778 } 5667 }
5779 default: 5668 default:
5780 UNREACHABLE(); 5669 UNREACHABLE();
5781 } 5670 }
5782 // Smi-tag the result in answer.
5783 ASSERT(kSmiTagSize == 1); // Adjust code if not the case.
5784 __ lea(answer.reg(),
5785 Operand(answer.reg(), answer.reg(), times_1, kSmiTag));
5786 deferred->BindExit(); 5671 deferred->BindExit();
5787 left->Unuse(); 5672 left->Unuse();
5788 right->Unuse(); 5673 right->Unuse();
5789 frame_->Push(&answer); 5674 frame_->Push(&answer);
5790 return; 5675 return;
5791 } 5676 }
5792 5677
5793 // Handle the other binary operations. 5678 // Handle the other binary operations.
5794 left->ToRegister(); 5679 left->ToRegister();
5795 right->ToRegister(); 5680 right->ToRegister();
5796 // A newly allocated register answer is used to hold the answer. The 5681 // A newly allocated register answer is used to hold the answer. The
5797 // registers containing left and right are not modified so they don't 5682 // registers containing left and right are not modified so they don't
5798 // need to be spilled in the fast case. 5683 // need to be spilled in the fast case.
5799 Result answer = allocator_->Allocate(); 5684 Result answer = allocator_->Allocate();
5800 ASSERT(answer.is_valid()); 5685 ASSERT(answer.is_valid());
5801 5686
5802 // Perform the smi tag check. 5687 // Perform the smi tag check.
5803 DeferredInlineBinaryOperation* deferred = 5688 DeferredInlineBinaryOperation* deferred =
5804 new DeferredInlineBinaryOperation(op, 5689 new DeferredInlineBinaryOperation(op,
5805 answer.reg(), 5690 answer.reg(),
5806 left->reg(), 5691 left->reg(),
5807 right->reg(), 5692 right->reg(),
5808 overwrite_mode); 5693 overwrite_mode);
5809 if (left->reg().is(right->reg())) { 5694 __ JumpIfNotBothSmi(left->reg(), right->reg(), deferred->entry_label());
5810 __ testl(left->reg(), Immediate(kSmiTagMask)); 5695
5811 } else {
5812 __ movq(answer.reg(), left->reg());
5813 __ or_(answer.reg(), right->reg());
5814 ASSERT(kSmiTag == 0); // Adjust test if not the case.
5815 __ testl(answer.reg(), Immediate(kSmiTagMask));
5816 }
5817 deferred->Branch(not_zero);
5818 __ movq(answer.reg(), left->reg());
5819 switch (op) { 5696 switch (op) {
5820 case Token::ADD: 5697 case Token::ADD:
5821 __ addl(answer.reg(), right->reg()); 5698 __ SmiAdd(answer.reg(),
5822 deferred->Branch(overflow); 5699 left->reg(),
5700 right->reg(),
5701 deferred->entry_label());
5823 break; 5702 break;
5824 5703
5825 case Token::SUB: 5704 case Token::SUB:
5826 __ subl(answer.reg(), right->reg()); 5705 __ SmiSub(answer.reg(),
5827 deferred->Branch(overflow); 5706 left->reg(),
5707 right->reg(),
5708 deferred->entry_label());
5828 break; 5709 break;
5829 5710
5830 case Token::MUL: { 5711 case Token::MUL: {
5831 // If the smi tag is 0 we can just leave the tag on one operand. 5712 __ SmiMul(answer.reg(),
5832 ASSERT(kSmiTag == 0); // Adjust code below if not the case. 5713 left->reg(),
5833 // Remove smi tag from the left operand (but keep sign). 5714 right->reg(),
5834 // Left-hand operand has been copied into answer. 5715 deferred->entry_label());
5835 __ sarl(answer.reg(), Immediate(kSmiTagSize));
5836 // Do multiplication of smis, leaving result in answer.
5837 __ imull(answer.reg(), right->reg());
5838 // Go slow on overflows.
5839 deferred->Branch(overflow);
5840 // Check for negative zero result. If product is zero, and one
5841 // argument is negative, go to slow case. The frame is unchanged
5842 // in this block, so local control flow can use a Label rather
5843 // than a JumpTarget.
5844 Label non_zero_result;
5845 __ testl(answer.reg(), answer.reg());
5846 __ j(not_zero, &non_zero_result);
5847 __ movq(answer.reg(), left->reg());
5848 __ or_(answer.reg(), right->reg());
5849 deferred->Branch(negative);
5850 __ xor_(answer.reg(), answer.reg()); // Positive 0 is correct.
5851 __ bind(&non_zero_result);
5852 break; 5716 break;
5853 } 5717 }
5854 5718
5855 case Token::BIT_OR: 5719 case Token::BIT_OR:
5856 __ or_(answer.reg(), right->reg()); 5720 __ SmiOr(answer.reg(), left->reg(), right->reg());
5857 break; 5721 break;
5858 5722
5859 case Token::BIT_AND: 5723 case Token::BIT_AND:
5860 __ and_(answer.reg(), right->reg()); 5724 __ SmiAnd(answer.reg(), left->reg(), right->reg());
5861 break; 5725 break;
5862 5726
5863 case Token::BIT_XOR: 5727 case Token::BIT_XOR:
5864 ASSERT(kSmiTag == 0); // Adjust code below if not the case. 5728 __ SmiXor(answer.reg(), left->reg(), right->reg());
5865 __ xor_(answer.reg(), right->reg());
5866 break; 5729 break;
5867 5730
5868 default: 5731 default:
5869 UNREACHABLE(); 5732 UNREACHABLE();
5870 break; 5733 break;
5871 } 5734 }
5872 deferred->BindExit(); 5735 deferred->BindExit();
5873 left->Unuse(); 5736 left->Unuse();
5874 right->Unuse(); 5737 right->Unuse();
5875 frame_->Push(&answer); 5738 frame_->Push(&answer);
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
5966 value = temp; 5829 value = temp;
5967 cgen_->frame()->Spill(value.reg()); // r12 may have been shared. 5830 cgen_->frame()->Spill(value.reg()); // r12 may have been shared.
5968 } 5831 }
5969 5832
5970 DeferredReferenceGetNamedValue* deferred = 5833 DeferredReferenceGetNamedValue* deferred =
5971 new DeferredReferenceGetNamedValue(value.reg(), 5834 new DeferredReferenceGetNamedValue(value.reg(),
5972 receiver.reg(), 5835 receiver.reg(),
5973 GetName()); 5836 GetName());
5974 5837
5975 // Check that the receiver is a heap object. 5838 // Check that the receiver is a heap object.
5976 __ testl(receiver.reg(), Immediate(kSmiTagMask)); 5839 __ JumpIfSmi(receiver.reg(), deferred->entry_label());
5977 deferred->Branch(zero);
5978 5840
5979 __ bind(deferred->patch_site()); 5841 __ bind(deferred->patch_site());
5980 // This is the map check instruction that will be patched (so we can't 5842 // This is the map check instruction that will be patched (so we can't
5981 // use the double underscore macro that may insert instructions). 5843 // use the double underscore macro that may insert instructions).
5982 // Initially use an invalid map to force a failure. 5844 // Initially use an invalid map to force a failure.
5983 masm->Move(kScratchRegister, Factory::null_value()); 5845 masm->Move(kScratchRegister, Factory::null_value());
5984 masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset), 5846 masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
5985 kScratchRegister); 5847 kScratchRegister);
5986 // This branch is always a forwards branch so it's always a fixed 5848 // This branch is always a forwards branch so it's always a fixed
5987 // size which allows the assert below to succeed and patching to work. 5849 // size which allows the assert below to succeed and patching to work.
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
6039 DeferredReferenceGetKeyedValue* deferred = 5901 DeferredReferenceGetKeyedValue* deferred =
6040 new DeferredReferenceGetKeyedValue(index.reg(), 5902 new DeferredReferenceGetKeyedValue(index.reg(),
6041 receiver.reg(), 5903 receiver.reg(),
6042 key.reg(), 5904 key.reg(),
6043 is_global); 5905 is_global);
6044 5906
6045 // Check that the receiver is not a smi (only needed if this 5907 // Check that the receiver is not a smi (only needed if this
6046 // is not a load from the global context) and that it has the 5908 // is not a load from the global context) and that it has the
6047 // expected map. 5909 // expected map.
6048 if (!is_global) { 5910 if (!is_global) {
6049 __ testl(receiver.reg(), Immediate(kSmiTagMask)); 5911 __ JumpIfSmi(receiver.reg(), deferred->entry_label());
6050 deferred->Branch(zero);
6051 } 5912 }
6052 5913
6053 // Initially, use an invalid map. The map is patched in the IC 5914 // Initially, use an invalid map. The map is patched in the IC
6054 // initialization code. 5915 // initialization code.
6055 __ bind(deferred->patch_site()); 5916 __ bind(deferred->patch_site());
6056 // Use masm-> here instead of the double underscore macro since extra 5917 // Use masm-> here instead of the double underscore macro since extra
6057 // coverage code can interfere with the patching. 5918 // coverage code can interfere with the patching.
6058 masm->movq(kScratchRegister, Factory::null_value(), 5919 masm->movq(kScratchRegister, Factory::null_value(),
6059 RelocInfo::EMBEDDED_OBJECT); 5920 RelocInfo::EMBEDDED_OBJECT);
6060 masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset), 5921 masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
6061 kScratchRegister); 5922 kScratchRegister);
6062 deferred->Branch(not_equal); 5923 deferred->Branch(not_equal);
6063 5924
6064 // Check that the key is a non-negative smi. 5925 // Check that the key is a non-negative smi.
6065 __ testl(key.reg(), 5926 __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
6066 Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000u)));
6067 deferred->Branch(not_zero);
6068 5927
6069 // Get the elements array from the receiver and check that it 5928 // Get the elements array from the receiver and check that it
6070 // is not a dictionary. 5929 // is not a dictionary.
6071 __ movq(elements.reg(), 5930 __ movq(elements.reg(),
6072 FieldOperand(receiver.reg(), JSObject::kElementsOffset)); 5931 FieldOperand(receiver.reg(), JSObject::kElementsOffset));
6073 __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset), 5932 __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
6074 Factory::fixed_array_map()); 5933 Factory::fixed_array_map());
6075 deferred->Branch(not_equal); 5934 deferred->Branch(not_equal);
6076 5935
6077 // Shift the key to get the actual index value and check that 5936 // Shift the key to get the actual index value and check that
6078 // it is within bounds. 5937 // it is within bounds.
6079 __ movl(index.reg(), key.reg()); 5938 __ SmiToInteger32(index.reg(), key.reg());
6080 __ shrl(index.reg(), Immediate(kSmiTagSize));
6081 __ cmpl(index.reg(), 5939 __ cmpl(index.reg(),
6082 FieldOperand(elements.reg(), FixedArray::kLengthOffset)); 5940 FieldOperand(elements.reg(), FixedArray::kLengthOffset));
6083 deferred->Branch(above_equal); 5941 deferred->Branch(above_equal);
6084 5942
6085 // The index register holds the un-smi-tagged key. It has been 5943 // The index register holds the un-smi-tagged key. It has been
6086 // zero-extended to 64-bits, so it can be used directly as index in the 5944 // zero-extended to 64-bits, so it can be used directly as index in the
6087 // operand below. 5945 // operand below.
6088 // Load and check that the result is not the hole. We could 5946 // Load and check that the result is not the hole. We could
6089 // reuse the index or elements register for the value. 5947 // reuse the index or elements register for the value.
6090 // 5948 //
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after
6221 receiver.ToRegister(); 6079 receiver.ToRegister();
6222 6080
6223 DeferredReferenceSetKeyedValue* deferred = 6081 DeferredReferenceSetKeyedValue* deferred =
6224 new DeferredReferenceSetKeyedValue(value.reg(), 6082 new DeferredReferenceSetKeyedValue(value.reg(),
6225 key.reg(), 6083 key.reg(),
6226 receiver.reg()); 6084 receiver.reg());
6227 6085
6228 // Check that the value is a smi if it is not a constant. 6086 // Check that the value is a smi if it is not a constant.
6229 // We can skip the write barrier for smis and constants. 6087 // We can skip the write barrier for smis and constants.
6230 if (!value_is_constant) { 6088 if (!value_is_constant) {
6231 __ testl(value.reg(), Immediate(kSmiTagMask)); 6089 __ JumpIfNotSmi(value.reg(), deferred->entry_label());
6232 deferred->Branch(not_zero);
6233 } 6090 }
6234 6091
6235 // Check that the key is a non-negative smi. 6092 // Check that the key is a non-negative smi.
6236 __ testl(key.reg(), 6093 __ JumpIfNotPositiveSmi(key.reg(), deferred->entry_label());
6237 Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000U)));
6238 deferred->Branch(not_zero);
6239 // Ensure that the smi is zero-extended. This is not guaranteed. 6094 // Ensure that the smi is zero-extended. This is not guaranteed.
6240 __ movl(key.reg(), key.reg()); 6095 __ movl(key.reg(), key.reg());
6241 6096
6242 // Check that the receiver is not a smi. 6097 // Check that the receiver is not a smi.
6243 __ testl(receiver.reg(), Immediate(kSmiTagMask)); 6098 __ JumpIfSmi(receiver.reg(), deferred->entry_label());
6244 deferred->Branch(zero);
6245 6099
6246 // Check that the receiver is a JSArray. 6100 // Check that the receiver is a JSArray.
6247 __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister); 6101 __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
6248 deferred->Branch(not_equal); 6102 deferred->Branch(not_equal);
6249 6103
6250 // Check that the key is within bounds. Both the key and the 6104 // Check that the key is within bounds. Both the key and the
6251 // length of the JSArray are smis, so compare only low 32 bits. 6105 // length of the JSArray are smis, so compare only low 32 bits.
6252 __ cmpl(key.reg(), 6106 __ cmpl(key.reg(),
6253 FieldOperand(receiver.reg(), JSArray::kLengthOffset)); 6107 FieldOperand(receiver.reg(), JSArray::kLengthOffset));
6254 deferred->Branch(greater_equal); 6108 deferred->Branch(greater_equal);
(...skipping 12 matching lines...) Expand all
6267 // to the map address is always the same. 6121 // to the map address is always the same.
6268 masm->movq(kScratchRegister, Factory::fixed_array_map(), 6122 masm->movq(kScratchRegister, Factory::fixed_array_map(),
6269 RelocInfo::EMBEDDED_OBJECT); 6123 RelocInfo::EMBEDDED_OBJECT);
6270 __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset), 6124 __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
6271 kScratchRegister); 6125 kScratchRegister);
6272 deferred->Branch(not_equal); 6126 deferred->Branch(not_equal);
6273 6127
6274 // Store the value. 6128 // Store the value.
6275 ASSERT_EQ(1, kSmiTagSize); 6129 ASSERT_EQ(1, kSmiTagSize);
6276 ASSERT_EQ(0, kSmiTag); 6130 ASSERT_EQ(0, kSmiTag);
6131 // TODO(lrn) Find way to abstract indexing by smi.
6277 __ movq(Operand(tmp.reg(), 6132 __ movq(Operand(tmp.reg(),
6278 key.reg(), 6133 key.reg(),
6279 times_half_pointer_size, 6134 times_half_pointer_size,
6280 FixedArray::kHeaderSize - kHeapObjectTag), 6135 FixedArray::kHeaderSize - kHeapObjectTag),
6281 value.reg()); 6136 value.reg());
6282 __ IncrementCounter(&Counters::keyed_store_inline, 1); 6137 __ IncrementCounter(&Counters::keyed_store_inline, 1);
6283 6138
6284 deferred->BindExit(); 6139 deferred->BindExit();
6285 6140
6286 cgen_->frame()->Push(&receiver); 6141 cgen_->frame()->Push(&receiver);
(...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after
6450 6305
6451 6306
6452 // End of CodeGenerator implementation. 6307 // End of CodeGenerator implementation.
6453 6308
6454 void UnarySubStub::Generate(MacroAssembler* masm) { 6309 void UnarySubStub::Generate(MacroAssembler* masm) {
6455 Label slow; 6310 Label slow;
6456 Label done; 6311 Label done;
6457 Label try_float; 6312 Label try_float;
6458 Label special; 6313 Label special;
6459 // Check whether the value is a smi. 6314 // Check whether the value is a smi.
6460 __ testl(rax, Immediate(kSmiTagMask)); 6315 __ JumpIfNotSmi(rax, &try_float);
6461 __ j(not_zero, &try_float);
6462 6316
6463 // Enter runtime system if the value of the smi is zero 6317 // Enter runtime system if the value of the smi is zero
6464 // to make sure that we switch between 0 and -0. 6318 // to make sure that we switch between 0 and -0.
6465 // Also enter it if the value of the smi is Smi::kMinValue 6319 // Also enter it if the value of the smi is Smi::kMinValue
6466 __ testl(rax, Immediate(0x7FFFFFFE)); 6320 __ testl(rax, Immediate(0x7FFFFFFE));
6467 __ j(zero, &special); 6321 __ j(zero, &special);
6468 __ neg(rax); 6322 __ neg(rax);
6469 __ jmp(&done); 6323 __ jmp(&done);
6470 6324
6471 __ bind(&special); 6325 __ bind(&special);
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
6560 6414
6561 // If we're doing a strict equality comparison, we don't have to do 6415 // If we're doing a strict equality comparison, we don't have to do
6562 // type conversion, so we generate code to do fast comparison for objects 6416 // type conversion, so we generate code to do fast comparison for objects
6563 // and oddballs. Non-smi numbers and strings still go through the usual 6417 // and oddballs. Non-smi numbers and strings still go through the usual
6564 // slow-case code. 6418 // slow-case code.
6565 if (strict_) { 6419 if (strict_) {
6566 // If either is a Smi (we know that not both are), then they can only 6420 // If either is a Smi (we know that not both are), then they can only
6567 // be equal if the other is a HeapNumber. If so, use the slow case. 6421 // be equal if the other is a HeapNumber. If so, use the slow case.
6568 { 6422 {
6569 Label not_smis; 6423 Label not_smis;
6570 ASSERT_EQ(0, kSmiTag); 6424 __ SelectNonSmi(rbx, rax, rdx, &not_smis);
6571 ASSERT_EQ(0, Smi::FromInt(0));
6572 __ movq(rcx, Immediate(kSmiTagMask));
6573 __ and_(rcx, rax);
6574 __ testq(rcx, rdx);
6575 __ j(not_zero, &not_smis);
6576 // One operand is a smi.
6577
6578 // Check whether the non-smi is a heap number.
6579 ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
6580 // rcx still holds rax & kSmiTag, which is either zero or one.
6581 __ decq(rcx); // If rax is a smi, all 1s, else all 0s.
6582 __ movq(rbx, rdx);
6583 __ xor_(rbx, rax);
6584 __ and_(rbx, rcx); // rbx holds either 0 or rax ^ rdx.
6585 __ xor_(rbx, rax);
6586 // if rax was smi, rbx is now rdx, else rax.
6587 6425
6588 // Check if the non-smi operand is a heap number. 6426 // Check if the non-smi operand is a heap number.
6589 __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset), 6427 __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
6590 Factory::heap_number_map()); 6428 Factory::heap_number_map());
6591 // If heap number, handle it in the slow case. 6429 // If heap number, handle it in the slow case.
6592 __ j(equal, &slow); 6430 __ j(equal, &slow);
6593 // Return non-equal. ebx (the lower half of rbx) is not zero. 6431 // Return non-equal. ebx (the lower half of rbx) is not zero.
6594 __ movq(rax, rbx); 6432 __ movq(rax, rbx);
6595 __ ret(0); 6433 __ ret(0);
6596 6434
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after
6705 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) 6543 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
6706 // tagged as a small integer. 6544 // tagged as a small integer.
6707 __ InvokeBuiltin(builtin, JUMP_FUNCTION); 6545 __ InvokeBuiltin(builtin, JUMP_FUNCTION);
6708 } 6546 }
6709 6547
6710 6548
6711 void CompareStub::BranchIfNonSymbol(MacroAssembler* masm, 6549 void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
6712 Label* label, 6550 Label* label,
6713 Register object, 6551 Register object,
6714 Register scratch) { 6552 Register scratch) {
6715 __ testl(object, Immediate(kSmiTagMask)); 6553 __ JumpIfSmi(object, label);
6716 __ j(zero, label);
6717 __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset)); 6554 __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
6718 __ movzxbq(scratch, 6555 __ movzxbq(scratch,
6719 FieldOperand(scratch, Map::kInstanceTypeOffset)); 6556 FieldOperand(scratch, Map::kInstanceTypeOffset));
6720 __ and_(scratch, Immediate(kIsSymbolMask | kIsNotStringMask)); 6557 __ and_(scratch, Immediate(kIsSymbolMask | kIsNotStringMask));
6721 __ cmpb(scratch, Immediate(kSymbolTag | kStringTag)); 6558 __ cmpb(scratch, Immediate(kSymbolTag | kStringTag));
6722 __ j(not_equal, label); 6559 __ j(not_equal, label);
6723 } 6560 }
6724 6561
6725 6562
6726 // Call the function just below TOS on the stack with the given 6563 // Call the function just below TOS on the stack with the given
(...skipping 23 matching lines...) Expand all
6750 void InstanceofStub::Generate(MacroAssembler* masm) { 6587 void InstanceofStub::Generate(MacroAssembler* masm) {
6751 // Implements "value instanceof function" operator. 6588 // Implements "value instanceof function" operator.
6752 // Expected input state: 6589 // Expected input state:
6753 // rsp[0] : return address 6590 // rsp[0] : return address
6754 // rsp[1] : function pointer 6591 // rsp[1] : function pointer
6755 // rsp[2] : value 6592 // rsp[2] : value
6756 6593
6757 // Get the object - go slow case if it's a smi. 6594 // Get the object - go slow case if it's a smi.
6758 Label slow; 6595 Label slow;
6759 __ movq(rax, Operand(rsp, 2 * kPointerSize)); 6596 __ movq(rax, Operand(rsp, 2 * kPointerSize));
6760 __ testl(rax, Immediate(kSmiTagMask)); 6597 __ JumpIfSmi(rax, &slow);
6761 __ j(zero, &slow);
6762 6598
6763 // Check that the left hand is a JS object. Leave its map in rax. 6599 // Check that the left hand is a JS object. Leave its map in rax.
6764 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax); 6600 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
6765 __ j(below, &slow); 6601 __ j(below, &slow);
6766 __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE); 6602 __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE);
6767 __ j(above, &slow); 6603 __ j(above, &slow);
6768 6604
6769 // Get the prototype of the function. 6605 // Get the prototype of the function.
6770 __ movq(rdx, Operand(rsp, 1 * kPointerSize)); 6606 __ movq(rdx, Operand(rsp, 1 * kPointerSize));
6771 __ TryGetFunctionPrototype(rdx, rbx, &slow); 6607 __ TryGetFunctionPrototype(rdx, rbx, &slow);
6772 6608
6773 // Check that the function prototype is a JS object. 6609 // Check that the function prototype is a JS object.
6774 __ testl(rbx, Immediate(kSmiTagMask)); 6610 __ JumpIfSmi(rbx, &slow);
6775 __ j(zero, &slow);
6776 __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister); 6611 __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
6777 __ j(below, &slow); 6612 __ j(below, &slow);
6778 __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE); 6613 __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
6779 __ j(above, &slow); 6614 __ j(above, &slow);
6780 6615
6781 // Register mapping: rax is object map and rbx is function prototype. 6616 // Register mapping: rax is object map and rbx is function prototype.
6782 __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset)); 6617 __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
6783 6618
6784 // Loop through the prototype chain looking for the function prototype. 6619 // Loop through the prototype chain looking for the function prototype.
6785 Label loop, is_instance, is_not_instance; 6620 Label loop, is_instance, is_not_instance;
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
6837 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { 6672 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
6838 // The key is in rdx and the parameter count is in rax. 6673 // The key is in rdx and the parameter count is in rax.
6839 6674
6840 // The displacement is used for skipping the frame pointer on the 6675 // The displacement is used for skipping the frame pointer on the
6841 // stack. It is the offset of the last parameter (if any) relative 6676 // stack. It is the offset of the last parameter (if any) relative
6842 // to the frame pointer. 6677 // to the frame pointer.
6843 static const int kDisplacement = 1 * kPointerSize; 6678 static const int kDisplacement = 1 * kPointerSize;
6844 6679
6845 // Check that the key is a smi. 6680 // Check that the key is a smi.
6846 Label slow; 6681 Label slow;
6847 __ testl(rdx, Immediate(kSmiTagMask)); 6682 __ JumpIfNotSmi(rdx, &slow);
6848 __ j(not_zero, &slow);
6849 6683
6850 // Check if the calling frame is an arguments adaptor frame. 6684 // Check if the calling frame is an arguments adaptor frame.
6851 Label adaptor; 6685 Label adaptor;
6852 __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); 6686 __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
6853 __ movq(rcx, Operand(rbx, StandardFrameConstants::kContextOffset)); 6687 __ movq(rcx, Operand(rbx, StandardFrameConstants::kContextOffset));
6854 __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 6688 __ cmpq(rcx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
6855 __ j(equal, &adaptor); 6689 __ j(equal, &adaptor);
6856 6690
6857 // Check index against formal parameters count limit passed in 6691 // Check index against formal parameters count limit passed in
6858 // through register rax. Use unsigned comparison to get negative 6692 // through register rax. Use unsigned comparison to get negative
6859 // check for free. 6693 // check for free.
6860 __ cmpq(rdx, rax); 6694 __ cmpq(rdx, rax);
6861 __ j(above_equal, &slow); 6695 __ j(above_equal, &slow);
6862 6696
6863 // Read the argument from the stack and return it. 6697 // Read the argument from the stack and return it.
6864 // Shifting code depends on SmiEncoding being equivalent to left shift: 6698 // Shifting code depends on SmiEncoding being equivalent to left shift:
6865 // we multiply by four to get pointer alignment. 6699 // we multiply by four to get pointer alignment.
6700 // TODO(smi): Find a way to abstract indexing by a smi.
6866 ASSERT(kSmiTagSize == 1 && kSmiTag == 0); 6701 ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
6867 __ lea(rbx, Operand(rbp, rax, times_4, 0)); 6702 __ lea(rbx, Operand(rbp, rax, times_4, 0));
6868 __ neg(rdx); 6703 __ neg(rdx); // TODO(smi): Abstract negative indexing too.
6869 __ movq(rax, Operand(rbx, rdx, times_4, kDisplacement)); 6704 __ movq(rax, Operand(rbx, rdx, times_4, kDisplacement));
6870 __ Ret(); 6705 __ Ret();
6871 6706
6872 // Arguments adaptor case: Check index against actual arguments 6707 // Arguments adaptor case: Check index against actual arguments
6873 // limit found in the arguments adaptor frame. Use unsigned 6708 // limit found in the arguments adaptor frame. Use unsigned
6874 // comparison to get negative check for free. 6709 // comparison to get negative check for free.
6875 __ bind(&adaptor); 6710 __ bind(&adaptor);
6876 __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset)); 6711 __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
6877 __ cmpq(rdx, rcx); 6712 __ cmpq(rdx, rcx);
6878 __ j(above_equal, &slow); 6713 __ j(above_equal, &slow);
6879 6714
6880 // Read the argument from the stack and return it. 6715 // Read the argument from the stack and return it.
6881 // Shifting code depends on SmiEncoding being equivalent to left shift: 6716 // Shifting code depends on SmiEncoding being equivalent to left shift:
6882 // we multiply by four to get pointer alignment. 6717 // we multiply by four to get pointer alignment.
6718 // TODO(smi): Find a way to abstract indexing by a smi.
6883 ASSERT(kSmiTagSize == 1 && kSmiTag == 0); 6719 ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
6884 __ lea(rbx, Operand(rbx, rcx, times_4, 0)); 6720 __ lea(rbx, Operand(rbx, rcx, times_4, 0));
6885 __ neg(rdx); 6721 __ neg(rdx);
6886 __ movq(rax, Operand(rbx, rdx, times_4, kDisplacement)); 6722 __ movq(rax, Operand(rbx, rdx, times_4, kDisplacement));
6887 __ Ret(); 6723 __ Ret();
6888 6724
6889 // Slow-case: Handle non-smi or out-of-bounds access to arguments 6725 // Slow-case: Handle non-smi or out-of-bounds access to arguments
6890 // by calling the runtime system. 6726 // by calling the runtime system.
6891 __ bind(&slow); 6727 __ bind(&slow);
6892 __ pop(rbx); // Return address. 6728 __ pop(rbx); // Return address.
(...skipping 239 matching lines...) Expand 10 before | Expand all | Expand 10 after
7132 6968
7133 6969
7134 void CallFunctionStub::Generate(MacroAssembler* masm) { 6970 void CallFunctionStub::Generate(MacroAssembler* masm) {
7135 Label slow; 6971 Label slow;
7136 6972
7137 // Get the function to call from the stack. 6973 // Get the function to call from the stack.
7138 // +2 ~ receiver, return address 6974 // +2 ~ receiver, return address
7139 __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize)); 6975 __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
7140 6976
7141 // Check that the function really is a JavaScript function. 6977 // Check that the function really is a JavaScript function.
7142 __ testl(rdi, Immediate(kSmiTagMask)); 6978 __ JumpIfSmi(rdi, &slow);
7143 __ j(zero, &slow);
7144 // Goto slow case if we do not have a function. 6979 // Goto slow case if we do not have a function.
7145 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx); 6980 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
7146 __ j(not_equal, &slow); 6981 __ j(not_equal, &slow);
7147 6982
7148 // Fast-case: Just invoke the function. 6983 // Fast-case: Just invoke the function.
7149 ParameterCount actual(argc_); 6984 ParameterCount actual(argc_);
7150 __ InvokeFunction(rdi, actual, JUMP_FUNCTION); 6985 __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
7151 6986
7152 // Slow-case: Non-function called. 6987 // Slow-case: Non-function called.
7153 __ bind(&slow); 6988 __ bind(&slow);
(...skipping 229 matching lines...) Expand 10 before | Expand all | Expand 10 after
7383 // Set the map and tag the result. 7218 // Set the map and tag the result.
7384 __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex); 7219 __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
7385 __ movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister); 7220 __ movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
7386 } 7221 }
7387 7222
7388 7223
7389 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, 7224 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
7390 Register number) { 7225 Register number) {
7391 Label load_smi, done; 7226 Label load_smi, done;
7392 7227
7393 __ testl(number, Immediate(kSmiTagMask)); 7228 __ JumpIfSmi(number, &load_smi);
7394 __ j(zero, &load_smi);
7395 __ fld_d(FieldOperand(number, HeapNumber::kValueOffset)); 7229 __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
7396 __ jmp(&done); 7230 __ jmp(&done);
7397 7231
7398 __ bind(&load_smi); 7232 __ bind(&load_smi);
7399 __ sarl(number, Immediate(kSmiTagSize)); 7233 __ SmiToInteger32(number, number);
7400 __ push(number); 7234 __ push(number);
7401 __ fild_s(Operand(rsp, 0)); 7235 __ fild_s(Operand(rsp, 0));
7402 __ pop(number); 7236 __ pop(number);
7403 7237
7404 __ bind(&done); 7238 __ bind(&done);
7405 } 7239 }
7406 7240
7407 7241
7408 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm, 7242 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
7409 Register src, 7243 Register src,
7410 XMMRegister dst) { 7244 XMMRegister dst) {
7411 Label load_smi, done; 7245 Label load_smi, done;
7412 7246
7413 __ testl(src, Immediate(kSmiTagMask)); 7247 __ JumpIfSmi(src, &load_smi);
7414 __ j(zero, &load_smi);
7415 __ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset)); 7248 __ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset));
7416 __ jmp(&done); 7249 __ jmp(&done);
7417 7250
7418 __ bind(&load_smi); 7251 __ bind(&load_smi);
7419 __ sarl(src, Immediate(kSmiTagSize)); 7252 __ SmiToInteger32(src, src);
7420 __ cvtlsi2sd(dst, src); 7253 __ cvtlsi2sd(dst, src);
7421 7254
7422 __ bind(&done); 7255 __ bind(&done);
7423 } 7256 }
7424 7257
7425 7258
7426 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, 7259 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
7427 XMMRegister dst1, 7260 XMMRegister dst1,
7428 XMMRegister dst2) { 7261 XMMRegister dst2) {
7429 __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize)); 7262 __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize));
7430 LoadFloatOperand(masm, kScratchRegister, dst1); 7263 LoadFloatOperand(masm, kScratchRegister, dst1);
7431 __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize)); 7264 __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
7432 LoadFloatOperand(masm, kScratchRegister, dst2); 7265 LoadFloatOperand(masm, kScratchRegister, dst2);
7433 } 7266 }
7434 7267
7435 7268
7436 void FloatingPointHelper::LoadInt32Operand(MacroAssembler* masm, 7269 void FloatingPointHelper::LoadInt32Operand(MacroAssembler* masm,
7437 const Operand& src, 7270 const Operand& src,
7438 Register dst) { 7271 Register dst) {
7439 // TODO(X64): Convert number operands to int32 values. 7272 // TODO(X64): Convert number operands to int32 values.
7440 // Don't convert a Smi to a double first. 7273 // Don't convert a Smi to a double first.
7441 UNIMPLEMENTED(); 7274 UNIMPLEMENTED();
7442 } 7275 }
7443 7276
7444 7277
7445 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm) { 7278 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm) {
7446 Label load_smi_1, load_smi_2, done_load_1, done; 7279 Label load_smi_1, load_smi_2, done_load_1, done;
7447 __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize)); 7280 __ movq(kScratchRegister, Operand(rsp, 2 * kPointerSize));
7448 __ testl(kScratchRegister, Immediate(kSmiTagMask)); 7281 __ JumpIfSmi(kScratchRegister, &load_smi_1);
7449 __ j(zero, &load_smi_1);
7450 __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset)); 7282 __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
7451 __ bind(&done_load_1); 7283 __ bind(&done_load_1);
7452 7284
7453 __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize)); 7285 __ movq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
7454 __ testl(kScratchRegister, Immediate(kSmiTagMask)); 7286 __ JumpIfSmi(kScratchRegister, &load_smi_2);
7455 __ j(zero, &load_smi_2);
7456 __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset)); 7287 __ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
7457 __ jmp(&done); 7288 __ jmp(&done);
7458 7289
7459 __ bind(&load_smi_1); 7290 __ bind(&load_smi_1);
7460 __ sarl(kScratchRegister, Immediate(kSmiTagSize)); 7291 __ SmiToInteger32(kScratchRegister, kScratchRegister);
7461 __ push(kScratchRegister); 7292 __ push(kScratchRegister);
7462 __ fild_s(Operand(rsp, 0)); 7293 __ fild_s(Operand(rsp, 0));
7463 __ pop(kScratchRegister); 7294 __ pop(kScratchRegister);
7464 __ jmp(&done_load_1); 7295 __ jmp(&done_load_1);
7465 7296
7466 __ bind(&load_smi_2); 7297 __ bind(&load_smi_2);
7467 __ sarl(kScratchRegister, Immediate(kSmiTagSize)); 7298 __ SmiToInteger32(kScratchRegister, kScratchRegister);
7468 __ push(kScratchRegister); 7299 __ push(kScratchRegister);
7469 __ fild_s(Operand(rsp, 0)); 7300 __ fild_s(Operand(rsp, 0));
7470 __ pop(kScratchRegister); 7301 __ pop(kScratchRegister);
7471 7302
7472 __ bind(&done); 7303 __ bind(&done);
7473 } 7304 }
7474 7305
7475 7306
7476 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, 7307 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
7477 Register lhs, 7308 Register lhs,
7478 Register rhs) { 7309 Register rhs) {
7479 Label load_smi_lhs, load_smi_rhs, done_load_lhs, done; 7310 Label load_smi_lhs, load_smi_rhs, done_load_lhs, done;
7480 __ testl(lhs, Immediate(kSmiTagMask)); 7311 __ JumpIfSmi(lhs, &load_smi_lhs);
7481 __ j(zero, &load_smi_lhs);
7482 __ fld_d(FieldOperand(lhs, HeapNumber::kValueOffset)); 7312 __ fld_d(FieldOperand(lhs, HeapNumber::kValueOffset));
7483 __ bind(&done_load_lhs); 7313 __ bind(&done_load_lhs);
7484 7314
7485 __ testl(rhs, Immediate(kSmiTagMask)); 7315 __ JumpIfSmi(rhs, &load_smi_rhs);
7486 __ j(zero, &load_smi_rhs);
7487 __ fld_d(FieldOperand(rhs, HeapNumber::kValueOffset)); 7316 __ fld_d(FieldOperand(rhs, HeapNumber::kValueOffset));
7488 __ jmp(&done); 7317 __ jmp(&done);
7489 7318
7490 __ bind(&load_smi_lhs); 7319 __ bind(&load_smi_lhs);
7491 ASSERT(kSmiTagSize == 1); 7320 ASSERT(kSmiTagSize == 1);
7492 ASSERT(kSmiTag == 0); 7321 ASSERT(kSmiTag == 0);
7493 __ movsxlq(kScratchRegister, lhs); 7322 __ SmiToInteger64(kScratchRegister, lhs);
7494 __ sar(kScratchRegister, Immediate(kSmiTagSize));
7495 __ push(kScratchRegister); 7323 __ push(kScratchRegister);
7496 __ fild_d(Operand(rsp, 0)); 7324 __ fild_d(Operand(rsp, 0));
7497 __ pop(kScratchRegister); 7325 __ pop(kScratchRegister);
7498 __ jmp(&done_load_lhs); 7326 __ jmp(&done_load_lhs);
7499 7327
7500 __ bind(&load_smi_rhs); 7328 __ bind(&load_smi_rhs);
7501 __ movsxlq(kScratchRegister, rhs); 7329 __ SmiToInteger64(kScratchRegister, rhs);
7502 __ sar(kScratchRegister, Immediate(kSmiTagSize));
7503 __ push(kScratchRegister); 7330 __ push(kScratchRegister);
7504 __ fild_d(Operand(rsp, 0)); 7331 __ fild_d(Operand(rsp, 0));
7505 __ pop(kScratchRegister); 7332 __ pop(kScratchRegister);
7506 7333
7507 __ bind(&done); 7334 __ bind(&done);
7508 } 7335 }
7509 7336
7510 7337
7511 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm, 7338 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
7512 Label* non_float) { 7339 Label* non_float) {
7513 Label test_other, done; 7340 Label test_other, done;
7514 // Test if both operands are numbers (heap_numbers or smis). 7341 // Test if both operands are numbers (heap_numbers or smis).
7515 // If not, jump to label non_float. 7342 // If not, jump to label non_float.
7516 __ testl(rdx, Immediate(kSmiTagMask)); 7343 __ JumpIfSmi(rdx, &test_other); // argument in rdx is OK
7517 __ j(zero, &test_other); // argument in rdx is OK
7518 __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), Factory::heap_number_map()); 7344 __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), Factory::heap_number_map());
7519 __ j(not_equal, non_float); // The argument in rdx is not a number. 7345 __ j(not_equal, non_float); // The argument in rdx is not a number.
7520 7346
7521 __ bind(&test_other); 7347 __ bind(&test_other);
7522 __ testl(rax, Immediate(kSmiTagMask)); 7348 __ JumpIfSmi(rax, &done); // argument in rax is OK
7523 __ j(zero, &done); // argument in rax is OK
7524 __ Cmp(FieldOperand(rax, HeapObject::kMapOffset), Factory::heap_number_map()); 7349 __ Cmp(FieldOperand(rax, HeapObject::kMapOffset), Factory::heap_number_map());
7525 __ j(not_equal, non_float); // The argument in rax is not a number. 7350 __ j(not_equal, non_float); // The argument in rax is not a number.
7526 7351
7527 // Fall-through: Both operands are numbers. 7352 // Fall-through: Both operands are numbers.
7528 __ bind(&done); 7353 __ bind(&done);
7529 } 7354 }
7530 7355
7531 7356
7532 const char* GenericBinaryOpStub::GetName() { 7357 const char* GenericBinaryOpStub::GetName() {
7533 switch (op_) { 7358 switch (op_) {
(...skipping 10 matching lines...) Expand all
7544 default: return "GenericBinaryOpStub"; 7369 default: return "GenericBinaryOpStub";
7545 } 7370 }
7546 } 7371 }
7547 7372
7548 7373
7549 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { 7374 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
7550 // Perform fast-case smi code for the operation (rax <op> rbx) and 7375 // Perform fast-case smi code for the operation (rax <op> rbx) and
7551 // leave result in register rax. 7376 // leave result in register rax.
7552 7377
7553 // Smi check both operands. 7378 // Smi check both operands.
7554 __ movq(rcx, rbx); 7379 __ JumpIfNotBothSmi(rax, rbx, slow);
7555 __ or_(rcx, rax); // The value in ecx is used for negative zero test later.
7556 __ testl(rcx, Immediate(kSmiTagMask));
7557 __ j(not_zero, slow);
7558 7380
7559 switch (op_) { 7381 switch (op_) {
7560 case Token::ADD: { 7382 case Token::ADD: {
7561 __ addl(rax, rbx); 7383 __ SmiAdd(rax, rax, rbx, slow);
7562 __ j(overflow, slow); // The slow case rereads operands from the stack.
7563 break; 7384 break;
7564 } 7385 }
7565 7386
7566 case Token::SUB: { 7387 case Token::SUB: {
7567 __ subl(rax, rbx); 7388 __ SmiSub(rax, rax, rbx, slow);
7568 __ j(overflow, slow); // The slow case rereads operands from the stack.
7569 break; 7389 break;
7570 } 7390 }
7571 7391
7572 case Token::MUL: 7392 case Token::MUL:
7573 // If the smi tag is 0 we can just leave the tag on one operand. 7393 __ SmiMul(rax, rax, rbx, slow);
7574 ASSERT(kSmiTag == 0); // adjust code below if not the case
7575 // Remove tag from one of the operands (but keep sign).
7576 __ sarl(rax, Immediate(kSmiTagSize));
7577 // Do multiplication.
7578 __ imull(rax, rbx); // multiplication of smis; result in eax
7579 // Go slow on overflows.
7580 __ j(overflow, slow);
7581 // Check for negative zero result.
7582 __ NegativeZeroTest(rax, rcx, slow); // ecx (not rcx) holds x | y.
7583 break; 7394 break;
7584 7395
7585 case Token::DIV: 7396 case Token::DIV:
7586 // Sign extend eax into edx:eax. 7397 __ SmiDiv(rax, rax, rbx, slow);
7587 __ cdq();
7588 // Check for 0 divisor.
7589 __ testl(rbx, rbx);
7590 __ j(zero, slow);
7591 // Divide edx:eax by ebx (where edx:eax is equivalent to the smi in eax).
7592 __ idivl(rbx);
7593 // Check that the remainder is zero.
7594 __ testl(rdx, rdx);
7595 __ j(not_zero, slow);
7596 // Check for the corner case of dividing the most negative smi
7597 // by -1. We cannot use the overflow flag, since it is not set
7598 // by idiv instruction.
7599 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
7600 // TODO(X64): TODO(Smi): Smi implementation dependent constant.
7601 // Value is Smi::fromInt(-(1<<31)) / Smi::fromInt(-1)
7602 __ cmpl(rax, Immediate(0x40000000));
7603 __ j(equal, slow);
7604 // Check for negative zero result.
7605 __ NegativeZeroTest(rax, rcx, slow); // ecx (not rcx) holds x | y.
7606 // Tag the result and store it in register rax.
7607 ASSERT(kSmiTagSize == times_2); // adjust code if not the case
7608 __ lea(rax, Operand(rax, rax, times_1, kSmiTag));
7609 break; 7398 break;
7610 7399
7611 case Token::MOD: 7400 case Token::MOD:
7612 // Sign extend eax into edx:eax 7401 __ SmiMod(rax, rax, rbx, slow);
7613 __ cdq();
7614 // Check for 0 divisor.
7615 __ testl(rbx, rbx);
7616 __ j(zero, slow);
7617 // Divide edx:eax by ebx.
7618 __ idivl(rbx);
7619 // Check for negative zero result.
7620 __ NegativeZeroTest(rdx, rcx, slow); // ecx (not rcx) holds x | y.
7621 // Move remainder to register rax.
7622 __ movl(rax, rdx);
7623 break; 7402 break;
7624 7403
7625 case Token::BIT_OR: 7404 case Token::BIT_OR:
7626 __ or_(rax, rbx); 7405 __ SmiOr(rax, rax, rbx);
7627 break; 7406 break;
7628 7407
7629 case Token::BIT_AND: 7408 case Token::BIT_AND:
7630 __ and_(rax, rbx); 7409 __ SmiAnd(rax, rax, rbx);
7631 break; 7410 break;
7632 7411
7633 case Token::BIT_XOR: 7412 case Token::BIT_XOR:
7634 ASSERT_EQ(0, kSmiTag); 7413 __ SmiXor(rax, rax, rbx);
7635 __ xor_(rax, rbx);
7636 break; 7414 break;
7637 7415
7638 case Token::SHL: 7416 case Token::SHL:
7639 case Token::SHR: 7417 case Token::SHR:
7640 case Token::SAR: 7418 case Token::SAR:
7641 // Move the second operand into register ecx. 7419 // Move the second operand into register ecx.
7642 __ movl(rcx, rbx); 7420 __ movl(rcx, rbx);
7643 // Remove tags from operands (but keep sign).
7644 __ sarl(rax, Immediate(kSmiTagSize));
7645 __ sarl(rcx, Immediate(kSmiTagSize));
7646 // Perform the operation. 7421 // Perform the operation.
7647 switch (op_) { 7422 switch (op_) {
7648 case Token::SAR: 7423 case Token::SAR:
7649 __ sarl(rax); 7424 __ SmiShiftArithmeticRight(rax, rax, rbx);
7650 // No checks of result necessary
7651 break; 7425 break;
7652 case Token::SHR: 7426 case Token::SHR:
7653 __ shrl(rax); // rcx is implicit shift register 7427 __ SmiShiftLogicRight(rax, rax, rbx, slow);
7654 // Check that the *unsigned* result fits in a smi.
7655 // Neither of the two high-order bits can be set:
7656 // - 0x80000000: high bit would be lost when smi tagging.
7657 // - 0x40000000: this number would convert to negative when
7658 // Smi tagging these two cases can only happen with shifts
7659 // by 0 or 1 when handed a valid smi.
7660 __ testl(rax, Immediate(0xc0000000));
7661 __ j(not_zero, slow);
7662 break; 7428 break;
7663 case Token::SHL: 7429 case Token::SHL:
7664 __ shll(rax); 7430 __ SmiShiftLeft(rax, rax, rbx, slow);
7665 // Check that the *signed* result fits in a smi.
7666 // It does, if the 30th and 31st bits are equal, since then
7667 // shifting the SmiTag in at the bottom doesn't change the sign.
7668 ASSERT(kSmiTagSize == 1);
7669 __ cmpl(rax, Immediate(0xc0000000));
7670 __ j(sign, slow);
7671 break; 7431 break;
7672 default: 7432 default:
7673 UNREACHABLE(); 7433 UNREACHABLE();
7674 } 7434 }
7675 // Tag the result and store it in register eax.
7676 ASSERT(kSmiTagSize == times_2); // adjust code if not the case
7677 __ lea(rax, Operand(rax, rax, times_1, kSmiTag));
7678 break; 7435 break;
7679 7436
7680 default: 7437 default:
7681 UNREACHABLE(); 7438 UNREACHABLE();
7682 break; 7439 break;
7683 } 7440 }
7684 } 7441 }
7685 7442
7686 7443
7687 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { 7444 void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
(...skipping 27 matching lines...) Expand all
7715 // Fast-case: Both operands are numbers. 7472 // Fast-case: Both operands are numbers.
7716 // Allocate a heap number, if needed. 7473 // Allocate a heap number, if needed.
7717 Label skip_allocation; 7474 Label skip_allocation;
7718 switch (mode_) { 7475 switch (mode_) {
7719 case OVERWRITE_LEFT: 7476 case OVERWRITE_LEFT:
7720 __ movq(rax, rdx); 7477 __ movq(rax, rdx);
7721 // Fall through! 7478 // Fall through!
7722 case OVERWRITE_RIGHT: 7479 case OVERWRITE_RIGHT:
7723 // If the argument in rax is already an object, we skip the 7480 // If the argument in rax is already an object, we skip the
7724 // allocation of a heap number. 7481 // allocation of a heap number.
7725 __ testl(rax, Immediate(kSmiTagMask)); 7482 __ JumpIfNotSmi(rax, &skip_allocation);
7726 __ j(not_zero, &skip_allocation);
7727 // Fall through! 7483 // Fall through!
7728 case NO_OVERWRITE: 7484 case NO_OVERWRITE:
7729 FloatingPointHelper::AllocateHeapNumber(masm, 7485 FloatingPointHelper::AllocateHeapNumber(masm,
7730 &call_runtime, 7486 &call_runtime,
7731 rcx, 7487 rcx,
7732 rax); 7488 rax);
7733 __ bind(&skip_allocation); 7489 __ bind(&skip_allocation);
7734 break; 7490 break;
7735 default: UNREACHABLE(); 7491 default: UNREACHABLE();
7736 } 7492 }
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
7822 if (op_ == Token::SHR) { 7578 if (op_ == Token::SHR) {
7823 // Check if result is non-negative and fits in a smi. 7579 // Check if result is non-negative and fits in a smi.
7824 __ testl(rax, Immediate(0xc0000000)); 7580 __ testl(rax, Immediate(0xc0000000));
7825 __ j(not_zero, &non_smi_result); 7581 __ j(not_zero, &non_smi_result);
7826 } else { 7582 } else {
7827 // Check if result fits in a smi. 7583 // Check if result fits in a smi.
7828 __ cmpl(rax, Immediate(0xc0000000)); 7584 __ cmpl(rax, Immediate(0xc0000000));
7829 __ j(negative, &non_smi_result); 7585 __ j(negative, &non_smi_result);
7830 } 7586 }
7831 // Tag smi result and return. 7587 // Tag smi result and return.
7832 ASSERT(kSmiTagSize == 1); // adjust code if not the case 7588 __ Integer32ToSmi(rax, rax);
7833 __ lea(rax, Operand(rax, rax, times_1, kSmiTag));
7834 __ ret(2 * kPointerSize); 7589 __ ret(2 * kPointerSize);
7835 7590
7836 // All ops except SHR return a signed int32 that we load in a HeapNumber. 7591 // All ops except SHR return a signed int32 that we load in a HeapNumber.
7837 if (op_ != Token::SHR) { 7592 if (op_ != Token::SHR) {
7838 __ bind(&non_smi_result); 7593 __ bind(&non_smi_result);
7839 // Allocate a heap number if needed. 7594 // Allocate a heap number if needed.
7840 __ movsxlq(rbx, rax); // rbx: sign extended 32-bit result 7595 __ movsxlq(rbx, rax); // rbx: sign extended 32-bit result
7841 switch (mode_) { 7596 switch (mode_) {
7842 case OVERWRITE_LEFT: 7597 case OVERWRITE_LEFT:
7843 case OVERWRITE_RIGHT: 7598 case OVERWRITE_RIGHT:
7844 // If the operand was an object, we skip the 7599 // If the operand was an object, we skip the
7845 // allocation of a heap number. 7600 // allocation of a heap number.
7846 __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ? 7601 __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
7847 1 * kPointerSize : 2 * kPointerSize)); 7602 1 * kPointerSize : 2 * kPointerSize));
7848 __ testl(rax, Immediate(kSmiTagMask)); 7603 __ JumpIfNotSmi(rax, &skip_allocation);
7849 __ j(not_zero, &skip_allocation);
7850 // Fall through! 7604 // Fall through!
7851 case NO_OVERWRITE: 7605 case NO_OVERWRITE:
7852 FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime, 7606 FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime,
7853 rcx, rax); 7607 rcx, rax);
7854 __ bind(&skip_allocation); 7608 __ bind(&skip_allocation);
7855 break; 7609 break;
7856 default: UNREACHABLE(); 7610 default: UNREACHABLE();
7857 } 7611 }
7858 // Store the result in the HeapNumber and return. 7612 // Store the result in the HeapNumber and return.
7859 __ movq(Operand(rsp, 1 * kPointerSize), rbx); 7613 __ movq(Operand(rsp, 1 * kPointerSize), rbx);
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
7935 int CompareStub::MinorKey() { 7689 int CompareStub::MinorKey() {
7936 // Encode the two parameters in a unique 16 bit value. 7690 // Encode the two parameters in a unique 16 bit value.
7937 ASSERT(static_cast<unsigned>(cc_) < (1 << 15)); 7691 ASSERT(static_cast<unsigned>(cc_) < (1 << 15));
7938 return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0); 7692 return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0);
7939 } 7693 }
7940 7694
7941 7695
7942 #undef __ 7696 #undef __
7943 7697
7944 } } // namespace v8::internal 7698 } } // namespace v8::internal
OLDNEW
« src/x64/builtins-x64.cc ('K') | « src/x64/builtins-x64.cc ('k') | src/x64/ic-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698