Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(395)

Side by Side Diff: src/arm/codegen-arm.cc

Issue 6529032: Merge 6168:6800 from bleeding_edge to experimental/gc branch. (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/gc/
Patch Set: Created 9 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/codegen-arm.h ('k') | src/arm/codegen-arm-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 582 matching lines...) Expand 10 before | Expand all | Expand 10 after
593 593
594 void CodeGenerator::StoreArgumentsObject(bool initial) { 594 void CodeGenerator::StoreArgumentsObject(bool initial) {
595 ArgumentsAllocationMode mode = ArgumentsMode(); 595 ArgumentsAllocationMode mode = ArgumentsMode();
596 ASSERT(mode != NO_ARGUMENTS_ALLOCATION); 596 ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
597 597
598 Comment cmnt(masm_, "[ store arguments object"); 598 Comment cmnt(masm_, "[ store arguments object");
599 if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) { 599 if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
600 // When using lazy arguments allocation, we store the hole value 600 // When using lazy arguments allocation, we store the hole value
601 // as a sentinel indicating that the arguments object hasn't been 601 // as a sentinel indicating that the arguments object hasn't been
602 // allocated yet. 602 // allocated yet.
603 frame_->EmitPushRoot(Heap::kTheHoleValueRootIndex); 603 frame_->EmitPushRoot(Heap::kArgumentsMarkerRootIndex);
604 } else { 604 } else {
605 frame_->SpillAll(); 605 frame_->SpillAll();
606 ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT); 606 ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
607 __ ldr(r2, frame_->Function()); 607 __ ldr(r2, frame_->Function());
608 // The receiver is below the arguments, the return address, and the 608 // The receiver is below the arguments, the return address, and the
609 // frame pointer on the stack. 609 // frame pointer on the stack.
610 const int kReceiverDisplacement = 2 + scope()->num_parameters(); 610 const int kReceiverDisplacement = 2 + scope()->num_parameters();
611 __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize)); 611 __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
612 __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters()))); 612 __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
613 frame_->Adjust(3); 613 frame_->Adjust(3);
614 __ Push(r2, r1, r0); 614 __ Push(r2, r1, r0);
615 frame_->CallStub(&stub, 3); 615 frame_->CallStub(&stub, 3);
616 frame_->EmitPush(r0); 616 frame_->EmitPush(r0);
617 } 617 }
618 618
619 Variable* arguments = scope()->arguments(); 619 Variable* arguments = scope()->arguments();
620 Variable* shadow = scope()->arguments_shadow(); 620 Variable* shadow = scope()->arguments_shadow();
621 ASSERT(arguments != NULL && arguments->AsSlot() != NULL); 621 ASSERT(arguments != NULL && arguments->AsSlot() != NULL);
622 ASSERT(shadow != NULL && shadow->AsSlot() != NULL); 622 ASSERT(shadow != NULL && shadow->AsSlot() != NULL);
623 JumpTarget done; 623 JumpTarget done;
624 if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) { 624 if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
625 // We have to skip storing into the arguments slot if it has 625 // We have to skip storing into the arguments slot if it has
626 // already been written to. This can happen if the a function 626 // already been written to. This can happen if the a function
627 // has a local variable named 'arguments'. 627 // has a local variable named 'arguments'.
628 LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF); 628 LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF);
629 Register arguments = frame_->PopToRegister(); 629 Register arguments = frame_->PopToRegister();
630 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 630 __ LoadRoot(ip, Heap::kArgumentsMarkerRootIndex);
631 __ cmp(arguments, ip); 631 __ cmp(arguments, ip);
632 done.Branch(ne); 632 done.Branch(ne);
633 } 633 }
634 StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT); 634 StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT);
635 if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind(); 635 if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
636 StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT); 636 StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT);
637 } 637 }
638 638
639 639
640 void CodeGenerator::LoadTypeofExpression(Expression* expr) { 640 void CodeGenerator::LoadTypeofExpression(Expression* expr) {
(...skipping 466 matching lines...) Expand 10 before | Expand all | Expand 10 after
1107 1107
1108 Register heap_number_map = r7; 1108 Register heap_number_map = r7;
1109 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 1109 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1110 __ ldr(r3, FieldMemOperand(tos_register_, HeapNumber::kMapOffset)); 1110 __ ldr(r3, FieldMemOperand(tos_register_, HeapNumber::kMapOffset));
1111 __ cmp(r3, heap_number_map); 1111 __ cmp(r3, heap_number_map);
1112 // Not a number, fall back to the GenericBinaryOpStub. 1112 // Not a number, fall back to the GenericBinaryOpStub.
1113 __ b(ne, entry_label()); 1113 __ b(ne, entry_label());
1114 1114
1115 Register int32 = r2; 1115 Register int32 = r2;
1116 // Not a 32bits signed int, fall back to the GenericBinaryOpStub. 1116 // Not a 32bits signed int, fall back to the GenericBinaryOpStub.
1117 __ ConvertToInt32(tos_register_, int32, r4, r5, entry_label()); 1117 __ ConvertToInt32(tos_register_, int32, r4, r5, d0, entry_label());
1118 1118
1119 // tos_register_ (r0 or r1): Original heap number. 1119 // tos_register_ (r0 or r1): Original heap number.
1120 // int32: signed 32bits int. 1120 // int32: signed 32bits int.
1121 1121
1122 Label result_not_a_smi; 1122 Label result_not_a_smi;
1123 int shift_value = value_ & 0x1f; 1123 int shift_value = value_ & 0x1f;
1124 switch (op_) { 1124 switch (op_) {
1125 case Token::BIT_OR: __ orr(int32, int32, Operand(value_)); break; 1125 case Token::BIT_OR: __ orr(int32, int32, Operand(value_)); break;
1126 case Token::BIT_XOR: __ eor(int32, int32, Operand(value_)); break; 1126 case Token::BIT_XOR: __ eor(int32, int32, Operand(value_)); break;
1127 case Token::BIT_AND: __ and_(int32, int32, Operand(value_)); break; 1127 case Token::BIT_AND: __ and_(int32, int32, Operand(value_)); break;
(...skipping 458 matching lines...) Expand 10 before | Expand all | Expand 10 after
1586 break; 1586 break;
1587 } 1587 }
1588 1588
1589 default: 1589 default:
1590 UNREACHABLE(); 1590 UNREACHABLE();
1591 break; 1591 break;
1592 } 1592 }
1593 } 1593 }
1594 1594
1595 1595
1596 void CodeGenerator::Comparison(Condition cc, 1596 void CodeGenerator::Comparison(Condition cond,
1597 Expression* left, 1597 Expression* left,
1598 Expression* right, 1598 Expression* right,
1599 bool strict) { 1599 bool strict) {
1600 VirtualFrame::RegisterAllocationScope scope(this); 1600 VirtualFrame::RegisterAllocationScope scope(this);
1601 1601
1602 if (left != NULL) Load(left); 1602 if (left != NULL) Load(left);
1603 if (right != NULL) Load(right); 1603 if (right != NULL) Load(right);
1604 1604
1605 // sp[0] : y 1605 // sp[0] : y
1606 // sp[1] : x 1606 // sp[1] : x
1607 // result : cc register 1607 // result : cc register
1608 1608
1609 // Strict only makes sense for equality comparisons. 1609 // Strict only makes sense for equality comparisons.
1610 ASSERT(!strict || cc == eq); 1610 ASSERT(!strict || cond == eq);
1611 1611
1612 Register lhs; 1612 Register lhs;
1613 Register rhs; 1613 Register rhs;
1614 1614
1615 bool lhs_is_smi; 1615 bool lhs_is_smi;
1616 bool rhs_is_smi; 1616 bool rhs_is_smi;
1617 1617
1618 // We load the top two stack positions into registers chosen by the virtual 1618 // We load the top two stack positions into registers chosen by the virtual
1619 // frame. This should keep the register shuffling to a minimum. 1619 // frame. This should keep the register shuffling to a minimum.
1620 // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order. 1620 // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
1621 if (cc == gt || cc == le) { 1621 if (cond == gt || cond == le) {
1622 cc = ReverseCondition(cc); 1622 cond = ReverseCondition(cond);
1623 lhs_is_smi = frame_->KnownSmiAt(0); 1623 lhs_is_smi = frame_->KnownSmiAt(0);
1624 rhs_is_smi = frame_->KnownSmiAt(1); 1624 rhs_is_smi = frame_->KnownSmiAt(1);
1625 lhs = frame_->PopToRegister(); 1625 lhs = frame_->PopToRegister();
1626 rhs = frame_->PopToRegister(lhs); // Don't pop to the same register again! 1626 rhs = frame_->PopToRegister(lhs); // Don't pop to the same register again!
1627 } else { 1627 } else {
1628 rhs_is_smi = frame_->KnownSmiAt(0); 1628 rhs_is_smi = frame_->KnownSmiAt(0);
1629 lhs_is_smi = frame_->KnownSmiAt(1); 1629 lhs_is_smi = frame_->KnownSmiAt(1);
1630 rhs = frame_->PopToRegister(); 1630 rhs = frame_->PopToRegister();
1631 lhs = frame_->PopToRegister(rhs); // Don't pop to the same register again! 1631 lhs = frame_->PopToRegister(rhs); // Don't pop to the same register again!
1632 } 1632 }
(...skipping 19 matching lines...) Expand all
1652 __ orr(scratch, lhs, Operand(rhs)); 1652 __ orr(scratch, lhs, Operand(rhs));
1653 smi_test_reg = scratch; 1653 smi_test_reg = scratch;
1654 } 1654 }
1655 __ tst(smi_test_reg, Operand(kSmiTagMask)); 1655 __ tst(smi_test_reg, Operand(kSmiTagMask));
1656 JumpTarget smi; 1656 JumpTarget smi;
1657 smi.Branch(eq); 1657 smi.Branch(eq);
1658 1658
1659 // Perform non-smi comparison by stub. 1659 // Perform non-smi comparison by stub.
1660 // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0. 1660 // CompareStub takes arguments in r0 and r1, returns <0, >0 or 0 in r0.
1661 // We call with 0 args because there are 0 on the stack. 1661 // We call with 0 args because there are 0 on the stack.
1662 CompareStub stub(cc, strict, NO_SMI_COMPARE_IN_STUB, lhs, rhs); 1662 CompareStub stub(cond, strict, NO_SMI_COMPARE_IN_STUB, lhs, rhs);
1663 frame_->CallStub(&stub, 0); 1663 frame_->CallStub(&stub, 0);
1664 __ cmp(r0, Operand(0, RelocInfo::NONE)); 1664 __ cmp(r0, Operand(0, RelocInfo::NONE));
1665 exit.Jump(); 1665 exit.Jump();
1666 1666
1667 smi.Bind(); 1667 smi.Bind();
1668 } 1668 }
1669 1669
1670 // Do smi comparisons by pointer comparison. 1670 // Do smi comparisons by pointer comparison.
1671 __ cmp(lhs, Operand(rhs)); 1671 __ cmp(lhs, Operand(rhs));
1672 1672
1673 exit.Bind(); 1673 exit.Bind();
1674 cc_reg_ = cc; 1674 cc_reg_ = cond;
1675 } 1675 }
1676 1676
1677 1677
1678 // Call the function on the stack with the given arguments. 1678 // Call the function on the stack with the given arguments.
1679 void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args, 1679 void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
1680 CallFunctionFlags flags, 1680 CallFunctionFlags flags,
1681 int position) { 1681 int position) {
1682 // Push the arguments ("left-to-right") on the stack. 1682 // Push the arguments ("left-to-right") on the stack.
1683 int arg_count = args->length(); 1683 int arg_count = args->length();
1684 for (int i = 0; i < arg_count; i++) { 1684 for (int i = 0; i < arg_count; i++) {
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
1745 // sp[1]: receiver 1745 // sp[1]: receiver
1746 // sp[2]: applicand.apply 1746 // sp[2]: applicand.apply
1747 // sp[3]: applicand. 1747 // sp[3]: applicand.
1748 1748
1749 // Check if the arguments object has been lazily allocated 1749 // Check if the arguments object has been lazily allocated
1750 // already. If so, just use that instead of copying the arguments 1750 // already. If so, just use that instead of copying the arguments
1751 // from the stack. This also deals with cases where a local variable 1751 // from the stack. This also deals with cases where a local variable
1752 // named 'arguments' has been introduced. 1752 // named 'arguments' has been introduced.
1753 JumpTarget slow; 1753 JumpTarget slow;
1754 Label done; 1754 Label done;
1755 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 1755 __ LoadRoot(ip, Heap::kArgumentsMarkerRootIndex);
1756 __ cmp(ip, arguments_reg); 1756 __ cmp(ip, arguments_reg);
1757 slow.Branch(ne); 1757 slow.Branch(ne);
1758 1758
1759 Label build_args; 1759 Label build_args;
1760 // Get rid of the arguments object probe. 1760 // Get rid of the arguments object probe.
1761 frame_->Drop(); 1761 frame_->Drop();
1762 // Stack now has 3 elements on it. 1762 // Stack now has 3 elements on it.
1763 // Contents of stack at this point: 1763 // Contents of stack at this point:
1764 // sp[0]: receiver - in the receiver_reg register. 1764 // sp[0]: receiver - in the receiver_reg register.
1765 // sp[1]: applicand.apply 1765 // sp[1]: applicand.apply
1766 // sp[2]: applicand. 1766 // sp[2]: applicand.
1767 1767
1768 // Check that the receiver really is a JavaScript object. 1768 // Check that the receiver really is a JavaScript object.
1769 __ BranchOnSmi(receiver_reg, &build_args); 1769 __ JumpIfSmi(receiver_reg, &build_args);
1770 // We allow all JSObjects including JSFunctions. As long as 1770 // We allow all JSObjects including JSFunctions. As long as
1771 // JS_FUNCTION_TYPE is the last instance type and it is right 1771 // JS_FUNCTION_TYPE is the last instance type and it is right
1772 // after LAST_JS_OBJECT_TYPE, we do not have to check the upper 1772 // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
1773 // bound. 1773 // bound.
1774 STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); 1774 STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
1775 STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1); 1775 STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
1776 __ CompareObjectType(receiver_reg, r2, r3, FIRST_JS_OBJECT_TYPE); 1776 __ CompareObjectType(receiver_reg, r2, r3, FIRST_JS_OBJECT_TYPE);
1777 __ b(lt, &build_args); 1777 __ b(lt, &build_args);
1778 1778
1779 // Check that applicand.apply is Function.prototype.apply. 1779 // Check that applicand.apply is Function.prototype.apply.
1780 __ ldr(r0, MemOperand(sp, kPointerSize)); 1780 __ ldr(r0, MemOperand(sp, kPointerSize));
1781 __ BranchOnSmi(r0, &build_args); 1781 __ JumpIfSmi(r0, &build_args);
1782 __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE); 1782 __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
1783 __ b(ne, &build_args); 1783 __ b(ne, &build_args);
1784 Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply)); 1784 Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
1785 __ ldr(r1, FieldMemOperand(r0, JSFunction::kCodeEntryOffset)); 1785 __ ldr(r1, FieldMemOperand(r0, JSFunction::kCodeEntryOffset));
1786 __ sub(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); 1786 __ sub(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
1787 __ cmp(r1, Operand(apply_code)); 1787 __ cmp(r1, Operand(apply_code));
1788 __ b(ne, &build_args); 1788 __ b(ne, &build_args);
1789 1789
1790 // Check that applicand is a function. 1790 // Check that applicand is a function.
1791 __ ldr(r1, MemOperand(sp, 2 * kPointerSize)); 1791 __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
1792 __ BranchOnSmi(r1, &build_args); 1792 __ JumpIfSmi(r1, &build_args);
1793 __ CompareObjectType(r1, r2, r3, JS_FUNCTION_TYPE); 1793 __ CompareObjectType(r1, r2, r3, JS_FUNCTION_TYPE);
1794 __ b(ne, &build_args); 1794 __ b(ne, &build_args);
1795 1795
1796 // Copy the arguments to this function possibly from the 1796 // Copy the arguments to this function possibly from the
1797 // adaptor frame below it. 1797 // adaptor frame below it.
1798 Label invoke, adapted; 1798 Label invoke, adapted;
1799 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 1799 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1800 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); 1800 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
1801 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 1801 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1802 __ b(eq, &adapted); 1802 __ b(eq, &adapted);
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
1882 // sp[0]: result 1882 // sp[0]: result
1883 __ bind(&done); 1883 __ bind(&done);
1884 1884
1885 // Restore the context register after a call. 1885 // Restore the context register after a call.
1886 __ ldr(cp, frame_->Context()); 1886 __ ldr(cp, frame_->Context());
1887 } 1887 }
1888 1888
1889 1889
1890 void CodeGenerator::Branch(bool if_true, JumpTarget* target) { 1890 void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
1891 ASSERT(has_cc()); 1891 ASSERT(has_cc());
1892 Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_); 1892 Condition cond = if_true ? cc_reg_ : NegateCondition(cc_reg_);
1893 target->Branch(cc); 1893 target->Branch(cond);
1894 cc_reg_ = al; 1894 cc_reg_ = al;
1895 } 1895 }
1896 1896
1897 1897
1898 void CodeGenerator::CheckStack() { 1898 void CodeGenerator::CheckStack() {
1899 frame_->SpillAll(); 1899 frame_->SpillAll();
1900 Comment cmnt(masm_, "[ check stack"); 1900 Comment cmnt(masm_, "[ check stack");
1901 __ LoadRoot(ip, Heap::kStackLimitRootIndex); 1901 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
1902 masm_->cmp(sp, Operand(ip)); 1902 masm_->cmp(sp, Operand(ip));
1903 StackCheckStub stub; 1903 StackCheckStub stub;
(...skipping 285 matching lines...) Expand 10 before | Expand all | Expand 10 after
2189 frame_->Exit(); 2189 frame_->Exit();
2190 2190
2191 // Here we use masm_-> instead of the __ macro to avoid the code coverage 2191 // Here we use masm_-> instead of the __ macro to avoid the code coverage
2192 // tool from instrumenting as we rely on the code size here. 2192 // tool from instrumenting as we rely on the code size here.
2193 int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize; 2193 int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
2194 masm_->add(sp, sp, Operand(sp_delta)); 2194 masm_->add(sp, sp, Operand(sp_delta));
2195 masm_->Jump(lr); 2195 masm_->Jump(lr);
2196 DeleteFrame(); 2196 DeleteFrame();
2197 2197
2198 #ifdef DEBUG 2198 #ifdef DEBUG
2199 // Check that the size of the code used for returning matches what is 2199 // Check that the size of the code used for returning is large enough
2200 // expected by the debugger. If the sp_delts above cannot be encoded in 2200 // for the debugger's requirements.
2201 // the add instruction the add will generate two instructions. 2201 ASSERT(Assembler::kJSReturnSequenceInstructions <=
2202 int return_sequence_length = 2202 masm_->InstructionsGeneratedSince(&check_exit_codesize));
2203 masm_->InstructionsGeneratedSince(&check_exit_codesize);
2204 CHECK(return_sequence_length ==
2205 Assembler::kJSReturnSequenceInstructions ||
2206 return_sequence_length ==
2207 Assembler::kJSReturnSequenceInstructions + 1);
2208 #endif 2203 #endif
2209 } 2204 }
2210 } 2205 }
2211 2206
2212 2207
2213 void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) { 2208 void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
2214 #ifdef DEBUG 2209 #ifdef DEBUG
2215 int original_height = frame_->height(); 2210 int original_height = frame_->height();
2216 #endif 2211 #endif
2217 Comment cmnt(masm_, "[ WithEnterStatement"); 2212 Comment cmnt(masm_, "[ WithEnterStatement");
(...skipping 1034 matching lines...) Expand 10 before | Expand all | Expand 10 after
3252 // ... or if the slot isn't a non-parameter arguments slot. 3247 // ... or if the slot isn't a non-parameter arguments slot.
3253 if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return; 3248 if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
3254 3249
3255 // Load the loaded value from the stack into a register but leave it on the 3250 // Load the loaded value from the stack into a register but leave it on the
3256 // stack. 3251 // stack.
3257 Register tos = frame_->Peek(); 3252 Register tos = frame_->Peek();
3258 3253
3259 // If the loaded value is the sentinel that indicates that we 3254 // If the loaded value is the sentinel that indicates that we
3260 // haven't loaded the arguments object yet, we need to do it now. 3255 // haven't loaded the arguments object yet, we need to do it now.
3261 JumpTarget exit; 3256 JumpTarget exit;
3262 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 3257 __ LoadRoot(ip, Heap::kArgumentsMarkerRootIndex);
3263 __ cmp(tos, ip); 3258 __ cmp(tos, ip);
3264 exit.Branch(ne); 3259 exit.Branch(ne);
3265 frame_->Drop(); 3260 frame_->Drop();
3266 StoreArgumentsObject(false); 3261 StoreArgumentsObject(false);
3267 exit.Bind(); 3262 exit.Bind();
3268 } 3263 }
3269 3264
3270 3265
3271 void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) { 3266 void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
3272 ASSERT(slot != NULL); 3267 ASSERT(slot != NULL);
(...skipping 905 matching lines...) Expand 10 before | Expand all | Expand 10 after
4178 frame_->EmitPush(r0); 4173 frame_->EmitPush(r0);
4179 if (arg_count > 0) { 4174 if (arg_count > 0) {
4180 __ ldr(r1, MemOperand(sp, arg_count * kPointerSize)); 4175 __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
4181 frame_->EmitPush(r1); 4176 frame_->EmitPush(r1);
4182 } else { 4177 } else {
4183 frame_->EmitPush(r2); 4178 frame_->EmitPush(r2);
4184 } 4179 }
4185 __ ldr(r1, frame_->Receiver()); 4180 __ ldr(r1, frame_->Receiver());
4186 frame_->EmitPush(r1); 4181 frame_->EmitPush(r1);
4187 4182
4188 frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 3); 4183 // Push the strict mode flag.
4184 frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
4185
4186 frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4);
4189 4187
4190 done.Jump(); 4188 done.Jump();
4191 slow.Bind(); 4189 slow.Bind();
4192 } 4190 }
4193 4191
4194 // Prepare the stack for the call to ResolvePossiblyDirectEval by 4192 // Prepare the stack for the call to ResolvePossiblyDirectEval by
4195 // pushing the loaded function, the first argument to the eval 4193 // pushing the loaded function, the first argument to the eval
4196 // call and the receiver. 4194 // call and the receiver.
4197 __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize)); 4195 __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
4198 frame_->EmitPush(r1); 4196 frame_->EmitPush(r1);
4199 if (arg_count > 0) { 4197 if (arg_count > 0) {
4200 __ ldr(r1, MemOperand(sp, arg_count * kPointerSize)); 4198 __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
4201 frame_->EmitPush(r1); 4199 frame_->EmitPush(r1);
4202 } else { 4200 } else {
4203 frame_->EmitPush(r2); 4201 frame_->EmitPush(r2);
4204 } 4202 }
4205 __ ldr(r1, frame_->Receiver()); 4203 __ ldr(r1, frame_->Receiver());
4206 frame_->EmitPush(r1); 4204 frame_->EmitPush(r1);
4207 4205
4206 // Push the strict mode flag.
4207 frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
4208
4208 // Resolve the call. 4209 // Resolve the call.
4209 frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3); 4210 frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
4210 4211
4211 // If we generated fast-case code bind the jump-target where fast 4212 // If we generated fast-case code bind the jump-target where fast
4212 // and slow case merge. 4213 // and slow case merge.
4213 if (done.is_linked()) done.Bind(); 4214 if (done.is_linked()) done.Bind();
4214 4215
4215 // Touch up stack with the right values for the function and the receiver. 4216 // Touch up stack with the right values for the function and the receiver.
4216 __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize)); 4217 __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
4217 __ str(r1, MemOperand(sp, arg_count * kPointerSize)); 4218 __ str(r1, MemOperand(sp, arg_count * kPointerSize));
4218 4219
4219 // Call the function. 4220 // Call the function.
(...skipping 401 matching lines...) Expand 10 before | Expand all | Expand 10 after
4621 // Get base and exponent to registers. 4622 // Get base and exponent to registers.
4622 Register exponent = frame_->PopToRegister(); 4623 Register exponent = frame_->PopToRegister();
4623 Register base = frame_->PopToRegister(exponent); 4624 Register base = frame_->PopToRegister(exponent);
4624 Register heap_number_map = no_reg; 4625 Register heap_number_map = no_reg;
4625 4626
4626 // Set the frame for the runtime jump target. The code below jumps to the 4627 // Set the frame for the runtime jump target. The code below jumps to the
4627 // jump target label so the frame needs to be established before that. 4628 // jump target label so the frame needs to be established before that.
4628 ASSERT(runtime.entry_frame() == NULL); 4629 ASSERT(runtime.entry_frame() == NULL);
4629 runtime.set_entry_frame(frame_); 4630 runtime.set_entry_frame(frame_);
4630 4631
4631 __ BranchOnNotSmi(exponent, &exponent_nonsmi); 4632 __ JumpIfNotSmi(exponent, &exponent_nonsmi);
4632 __ BranchOnNotSmi(base, &base_nonsmi); 4633 __ JumpIfNotSmi(base, &base_nonsmi);
4633 4634
4634 heap_number_map = r6; 4635 heap_number_map = r6;
4635 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 4636 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4636 4637
4637 // Exponent is a smi and base is a smi. Get the smi value into vfp register 4638 // Exponent is a smi and base is a smi. Get the smi value into vfp register
4638 // d1. 4639 // d1.
4639 __ SmiToDoubleVFPRegister(base, d1, scratch1, s0); 4640 __ SmiToDoubleVFPRegister(base, d1, scratch1, s0);
4640 __ b(&powi); 4641 __ b(&powi);
4641 4642
4642 __ bind(&base_nonsmi); 4643 __ bind(&base_nonsmi);
(...skipping 27 matching lines...) Expand all
4670 // If exponent is positive we are done. 4671 // If exponent is positive we are done.
4671 __ cmp(exponent, Operand(0, RelocInfo::NONE)); 4672 __ cmp(exponent, Operand(0, RelocInfo::NONE));
4672 __ b(ge, &allocate_return); 4673 __ b(ge, &allocate_return);
4673 4674
4674 // If exponent is negative result is 1/result (d2 already holds 1.0 in that 4675 // If exponent is negative result is 1/result (d2 already holds 1.0 in that
4675 // case). However if d0 has reached infinity this will not provide the 4676 // case). However if d0 has reached infinity this will not provide the
4676 // correct result, so call runtime if that is the case. 4677 // correct result, so call runtime if that is the case.
4677 __ mov(scratch2, Operand(0x7FF00000)); 4678 __ mov(scratch2, Operand(0x7FF00000));
4678 __ mov(scratch1, Operand(0, RelocInfo::NONE)); 4679 __ mov(scratch1, Operand(0, RelocInfo::NONE));
4679 __ vmov(d1, scratch1, scratch2); // Load infinity into d1. 4680 __ vmov(d1, scratch1, scratch2); // Load infinity into d1.
4680 __ vcmp(d0, d1); 4681 __ VFPCompareAndSetFlags(d0, d1);
4681 __ vmrs(pc);
4682 runtime.Branch(eq); // d0 reached infinity. 4682 runtime.Branch(eq); // d0 reached infinity.
4683 __ vdiv(d0, d2, d0); 4683 __ vdiv(d0, d2, d0);
4684 __ b(&allocate_return); 4684 __ b(&allocate_return);
4685 4685
4686 __ bind(&exponent_nonsmi); 4686 __ bind(&exponent_nonsmi);
4687 // Special handling of raising to the power of -0.5 and 0.5. First check 4687 // Special handling of raising to the power of -0.5 and 0.5. First check
4688 // that the value is a heap number and that the lower bits (which for both 4688 // that the value is a heap number and that the lower bits (which for both
4689 // values are zero). 4689 // values are zero).
4690 heap_number_map = r6; 4690 heap_number_map = r6;
4691 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 4691 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
(...skipping 10 matching lines...) Expand all
4702 // Compare exponent with -0.5. 4702 // Compare exponent with -0.5.
4703 __ cmp(scratch1, Operand(0xbfe00000)); 4703 __ cmp(scratch1, Operand(0xbfe00000));
4704 __ b(ne, &not_minus_half); 4704 __ b(ne, &not_minus_half);
4705 4705
4706 // Get the double value from the base into vfp register d0. 4706 // Get the double value from the base into vfp register d0.
4707 __ ObjectToDoubleVFPRegister(base, d0, 4707 __ ObjectToDoubleVFPRegister(base, d0,
4708 scratch1, scratch2, heap_number_map, s0, 4708 scratch1, scratch2, heap_number_map, s0,
4709 runtime.entry_label(), 4709 runtime.entry_label(),
4710 AVOID_NANS_AND_INFINITIES); 4710 AVOID_NANS_AND_INFINITIES);
4711 4711
4712 // Convert -0 into +0 by adding +0.
4713 __ vmov(d2, 0.0);
4714 __ vadd(d0, d2, d0);
4712 // Load 1.0 into d2. 4715 // Load 1.0 into d2.
4713 __ vmov(d2, 1.0); 4716 __ vmov(d2, 1.0);
4714 4717
4715 // Calculate the reciprocal of the square root. 1/sqrt(x) = sqrt(1/x). 4718 // Calculate the reciprocal of the square root.
4719 __ vsqrt(d0, d0);
4716 __ vdiv(d0, d2, d0); 4720 __ vdiv(d0, d2, d0);
4717 __ vsqrt(d0, d0);
4718 4721
4719 __ b(&allocate_return); 4722 __ b(&allocate_return);
4720 4723
4721 __ bind(&not_minus_half); 4724 __ bind(&not_minus_half);
4722 // Compare exponent with 0.5. 4725 // Compare exponent with 0.5.
4723 __ cmp(scratch1, Operand(0x3fe00000)); 4726 __ cmp(scratch1, Operand(0x3fe00000));
4724 runtime.Branch(ne); 4727 runtime.Branch(ne);
4725 4728
4726 // Get the double value from the base into vfp register d0. 4729 // Get the double value from the base into vfp register d0.
4727 __ ObjectToDoubleVFPRegister(base, d0, 4730 __ ObjectToDoubleVFPRegister(base, d0,
4728 scratch1, scratch2, heap_number_map, s0, 4731 scratch1, scratch2, heap_number_map, s0,
4729 runtime.entry_label(), 4732 runtime.entry_label(),
4730 AVOID_NANS_AND_INFINITIES); 4733 AVOID_NANS_AND_INFINITIES);
4734 // Convert -0 into +0 by adding +0.
4735 __ vmov(d2, 0.0);
4736 __ vadd(d0, d2, d0);
4731 __ vsqrt(d0, d0); 4737 __ vsqrt(d0, d0);
4732 4738
4733 __ bind(&allocate_return); 4739 __ bind(&allocate_return);
4734 Register scratch3 = r5; 4740 Register scratch3 = r5;
4735 __ AllocateHeapNumberWithValue(scratch3, d0, scratch1, scratch2, 4741 __ AllocateHeapNumberWithValue(scratch3, d0, scratch1, scratch2,
4736 heap_number_map, runtime.entry_label()); 4742 heap_number_map, runtime.entry_label());
4737 __ mov(base, scratch3); 4743 __ mov(base, scratch3);
4738 done.Jump(); 4744 done.Jump();
4739 4745
4740 runtime.Bind(); 4746 runtime.Bind();
(...skipping 835 matching lines...) Expand 10 before | Expand all | Expand 10 after
5576 DeferredSwapElements* deferred = 5582 DeferredSwapElements* deferred =
5577 new DeferredSwapElements(object, index1, index2); 5583 new DeferredSwapElements(object, index1, index2);
5578 5584
5579 // Fetch the map and check if array is in fast case. 5585 // Fetch the map and check if array is in fast case.
5580 // Check that object doesn't require security checks and 5586 // Check that object doesn't require security checks and
5581 // has no indexed interceptor. 5587 // has no indexed interceptor.
5582 __ CompareObjectType(object, tmp1, tmp2, FIRST_JS_OBJECT_TYPE); 5588 __ CompareObjectType(object, tmp1, tmp2, FIRST_JS_OBJECT_TYPE);
5583 deferred->Branch(lt); 5589 deferred->Branch(lt);
5584 __ ldrb(tmp2, FieldMemOperand(tmp1, Map::kBitFieldOffset)); 5590 __ ldrb(tmp2, FieldMemOperand(tmp1, Map::kBitFieldOffset));
5585 __ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask)); 5591 __ tst(tmp2, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
5586 deferred->Branch(nz); 5592 deferred->Branch(ne);
5587 5593
5588 // Check the object's elements are in fast case and writable. 5594 // Check the object's elements are in fast case and writable.
5589 __ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset)); 5595 __ ldr(tmp1, FieldMemOperand(object, JSObject::kElementsOffset));
5590 __ ldr(tmp2, FieldMemOperand(tmp1, HeapObject::kMapOffset)); 5596 __ ldr(tmp2, FieldMemOperand(tmp1, HeapObject::kMapOffset));
5591 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); 5597 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
5592 __ cmp(tmp2, ip); 5598 __ cmp(tmp2, ip);
5593 deferred->Branch(ne); 5599 deferred->Branch(ne);
5594 5600
5595 // Smi-tagging is equivalent to multiplying by 2. 5601 // Smi-tagging is equivalent to multiplying by 2.
5596 STATIC_ASSERT(kSmiTag == 0); 5602 STATIC_ASSERT(kSmiTag == 0);
5597 STATIC_ASSERT(kSmiTagSize == 1); 5603 STATIC_ASSERT(kSmiTagSize == 1);
5598 5604
5599 // Check that both indices are smis. 5605 // Check that both indices are smis.
5600 __ mov(tmp2, index1); 5606 __ mov(tmp2, index1);
5601 __ orr(tmp2, tmp2, index2); 5607 __ orr(tmp2, tmp2, index2);
5602 __ tst(tmp2, Operand(kSmiTagMask)); 5608 __ tst(tmp2, Operand(kSmiTagMask));
5603 deferred->Branch(nz); 5609 deferred->Branch(ne);
5604 5610
5605 // Check that both indices are valid. 5611 // Check that both indices are valid.
5606 __ ldr(tmp2, FieldMemOperand(object, JSArray::kLengthOffset)); 5612 __ ldr(tmp2, FieldMemOperand(object, JSArray::kLengthOffset));
5607 __ cmp(tmp2, index1); 5613 __ cmp(tmp2, index1);
5608 __ cmp(tmp2, index2, hi); 5614 __ cmp(tmp2, index2, hi);
5609 deferred->Branch(ls); 5615 deferred->Branch(ls);
5610 5616
5611 // Bring the offsets into the fixed array in tmp1 into index1 and 5617 // Bring the offsets into the fixed array in tmp1 into index1 and
5612 // index2. 5618 // index2.
5613 __ mov(tmp2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 5619 __ mov(tmp2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
(...skipping 229 matching lines...) Expand 10 before | Expand all | Expand 10 after
5843 // LoadCondition may (and usually does) leave a test and branch to 5849 // LoadCondition may (and usually does) leave a test and branch to
5844 // be emitted by the caller. In that case, negate the condition. 5850 // be emitted by the caller. In that case, negate the condition.
5845 if (has_cc()) cc_reg_ = NegateCondition(cc_reg_); 5851 if (has_cc()) cc_reg_ = NegateCondition(cc_reg_);
5846 5852
5847 } else if (op == Token::DELETE) { 5853 } else if (op == Token::DELETE) {
5848 Property* property = node->expression()->AsProperty(); 5854 Property* property = node->expression()->AsProperty();
5849 Variable* variable = node->expression()->AsVariableProxy()->AsVariable(); 5855 Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
5850 if (property != NULL) { 5856 if (property != NULL) {
5851 Load(property->obj()); 5857 Load(property->obj());
5852 Load(property->key()); 5858 Load(property->key());
5853 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2); 5859 frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
5860 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 3);
5854 frame_->EmitPush(r0); 5861 frame_->EmitPush(r0);
5855 5862
5856 } else if (variable != NULL) { 5863 } else if (variable != NULL) {
5864 // Delete of an unqualified identifier is disallowed in strict mode
5865 // so this code can only be reached in non-strict mode.
5866 ASSERT(strict_mode_flag() == kNonStrictMode);
5857 Slot* slot = variable->AsSlot(); 5867 Slot* slot = variable->AsSlot();
5858 if (variable->is_global()) { 5868 if (variable->is_global()) {
5859 LoadGlobal(); 5869 LoadGlobal();
5860 frame_->EmitPush(Operand(variable->name())); 5870 frame_->EmitPush(Operand(variable->name()));
5861 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2); 5871 frame_->EmitPush(Operand(Smi::FromInt(kNonStrictMode)));
5872 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 3);
5862 frame_->EmitPush(r0); 5873 frame_->EmitPush(r0);
5863 5874
5864 } else if (slot != NULL && slot->type() == Slot::LOOKUP) { 5875 } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
5865 // lookup the context holding the named variable 5876 // Delete from the context holding the named variable.
5866 frame_->EmitPush(cp); 5877 frame_->EmitPush(cp);
5867 frame_->EmitPush(Operand(variable->name())); 5878 frame_->EmitPush(Operand(variable->name()));
5868 frame_->CallRuntime(Runtime::kLookupContext, 2); 5879 frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
5869 // r0: context
5870 frame_->EmitPush(r0);
5871 frame_->EmitPush(Operand(variable->name()));
5872 frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
5873 frame_->EmitPush(r0); 5880 frame_->EmitPush(r0);
5874 5881
5875 } else { 5882 } else {
5876 // Default: Result of deleting non-global, not dynamically 5883 // Default: Result of deleting non-global, not dynamically
5877 // introduced variables is false. 5884 // introduced variables is false.
5878 frame_->EmitPushRoot(Heap::kFalseValueRootIndex); 5885 frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
5879 } 5886 }
5880 5887
5881 } else { 5888 } else {
5882 // Default: Result of deleting expressions is true. 5889 // Default: Result of deleting expressions is true.
(...skipping 1046 matching lines...) Expand 10 before | Expand all | Expand 10 after
6929 } 6936 }
6930 6937
6931 6938
6932 void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) { 6939 void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
6933 #ifdef DEBUG 6940 #ifdef DEBUG
6934 int expected_height = frame()->height() - (is_contextual ? 1 : 2); 6941 int expected_height = frame()->height() - (is_contextual ? 1 : 2);
6935 #endif 6942 #endif
6936 6943
6937 Result result; 6944 Result result;
6938 if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) { 6945 if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
6939 frame()->CallStoreIC(name, is_contextual); 6946 frame()->CallStoreIC(name, is_contextual, strict_mode_flag());
6940 } else { 6947 } else {
6941 // Inline the in-object property case. 6948 // Inline the in-object property case.
6942 JumpTarget slow, done; 6949 JumpTarget slow, done;
6943 6950
6944 // Get the value and receiver from the stack. 6951 // Get the value and receiver from the stack.
6945 frame()->PopToR0(); 6952 frame()->PopToR0();
6946 Register value = r0; 6953 Register value = r0;
6947 frame()->PopToR1(); 6954 frame()->PopToR1();
6948 Register receiver = r1; 6955 Register receiver = r1;
6949 6956
(...skipping 432 matching lines...) Expand 10 before | Expand all | Expand 10 after
7382 BinaryOpIC::GetName(runtime_operands_type_)); 7389 BinaryOpIC::GetName(runtime_operands_type_));
7383 return name_; 7390 return name_;
7384 } 7391 }
7385 7392
7386 7393
7387 #undef __ 7394 #undef __
7388 7395
7389 } } // namespace v8::internal 7396 } } // namespace v8::internal
7390 7397
7391 #endif // V8_TARGET_ARCH_ARM 7398 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/codegen-arm.h ('k') | src/arm/codegen-arm-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698