OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 1700 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1711 __ li(a0, Operand(Smi::FromInt(ncr))); | 1711 __ li(a0, Operand(Smi::FromInt(ncr))); |
1712 __ push(a0); | 1712 __ push(a0); |
1713 } | 1713 } |
1714 | 1714 |
1715 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) | 1715 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) |
1716 // tagged as a small integer. | 1716 // tagged as a small integer. |
1717 __ InvokeBuiltin(native, JUMP_FUNCTION); | 1717 __ InvokeBuiltin(native, JUMP_FUNCTION); |
1718 } | 1718 } |
1719 | 1719 |
1720 | 1720 |
1721 // This stub does not handle the inlined cases (Smis, Booleans, undefined). | |
1722 // The stub returns zero for false, and a non-zero value for true. | 1721 // The stub returns zero for false, and a non-zero value for true. |
1723 void ToBooleanStub::Generate(MacroAssembler* masm) { | 1722 void ToBooleanStub::Generate(MacroAssembler* masm) { |
1724 // This stub uses FPU instructions. | 1723 // This stub uses FPU instructions. |
1725 CpuFeatures::Scope scope(FPU); | 1724 CpuFeatures::Scope scope(FPU); |
1726 | 1725 |
1727 Label false_result; | 1726 Label false_result; |
1728 Label not_heap_number; | 1727 Label not_heap_number; |
1729 Register scratch0 = t5.is(tos_) ? t3 : t5; | 1728 Register scratch0 = t5.is(tos_) ? t3 : t5; |
1730 | 1729 |
1731 // undefined -> false | 1730 // undefined -> false |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1775 __ And(scratch0, scratch0, Operand(1 << Map::kIsUndetectable)); | 1774 __ And(scratch0, scratch0, Operand(1 << Map::kIsUndetectable)); |
1776 __ Branch(&false_result, eq, scratch0, Operand(1 << Map::kIsUndetectable)); | 1775 __ Branch(&false_result, eq, scratch0, Operand(1 << Map::kIsUndetectable)); |
1777 | 1776 |
1778 // JavaScript object => true. | 1777 // JavaScript object => true. |
1779 __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset)); | 1778 __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset)); |
1780 __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset)); | 1779 __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset)); |
1781 | 1780 |
1782 // "tos_" is a register and contains a non-zero value. | 1781 // "tos_" is a register and contains a non-zero value. |
1783 // Hence we implicitly return true if the greater than | 1782 // Hence we implicitly return true if the greater than |
1784 // condition is satisfied. | 1783 // condition is satisfied. |
1785 __ Ret(gt, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE)); | 1784 __ Ret(ge, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE)); |
1786 | 1785 |
1787 // Check for string. | 1786 // Check for string. |
1788 __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset)); | 1787 __ lw(scratch0, FieldMemOperand(tos_, HeapObject::kMapOffset)); |
1789 __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset)); | 1788 __ lbu(scratch0, FieldMemOperand(scratch0, Map::kInstanceTypeOffset)); |
1790 // "tos_" is a register and contains a non-zero value. | 1789 // "tos_" is a register and contains a non-zero value. |
1791 // Hence we implicitly return true if the greater than | 1790 // Hence we implicitly return true if the greater than |
1792 // condition is satisfied. | 1791 // condition is satisfied. |
1793 __ Ret(gt, scratch0, Operand(FIRST_NONSTRING_TYPE)); | 1792 __ Ret(ge, scratch0, Operand(FIRST_NONSTRING_TYPE)); |
1794 | 1793 |
1795 // String value => false iff empty, i.e., length is zero. | 1794 // String value => false iff empty, i.e., length is zero. |
1796 __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset)); | 1795 __ lw(tos_, FieldMemOperand(tos_, String::kLengthOffset)); |
1797 // If length is zero, "tos_" contains zero ==> false. | 1796 // If length is zero, "tos_" contains zero ==> false. |
1798 // If length is not zero, "tos_" contains a non-zero value ==> true. | 1797 // If length is not zero, "tos_" contains a non-zero value ==> true. |
1799 __ Ret(); | 1798 __ Ret(); |
1800 | 1799 |
1801 // Return 0 in "tos_" for false. | 1800 // Return 0 in "tos_" for false. |
1802 __ bind(&false_result); | 1801 __ bind(&false_result); |
1803 __ mov(tos_, zero_reg); | 1802 __ mov(tos_, zero_reg); |
(...skipping 978 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2782 __ Branch(&return_heap_number, ne, scratch2, Operand(zero_reg)); | 2781 __ Branch(&return_heap_number, ne, scratch2, Operand(zero_reg)); |
2783 __ bind(¬_zero); | 2782 __ bind(¬_zero); |
2784 | 2783 |
2785 // Tag the result and return. | 2784 // Tag the result and return. |
2786 __ SmiTag(v0, scratch1); | 2785 __ SmiTag(v0, scratch1); |
2787 __ Ret(); | 2786 __ Ret(); |
2788 } else { | 2787 } else { |
2789 // DIV just falls through to allocating a heap number. | 2788 // DIV just falls through to allocating a heap number. |
2790 } | 2789 } |
2791 | 2790 |
| 2791 __ bind(&return_heap_number); |
| 2792 // Return a heap number, or fall through to type transition or runtime |
| 2793 // call if we can't. |
2792 if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER | 2794 if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER |
2793 : BinaryOpIC::INT32)) { | 2795 : BinaryOpIC::INT32)) { |
2794 __ bind(&return_heap_number); | |
2795 // We are using FPU registers so s0 is available. | 2796 // We are using FPU registers so s0 is available. |
2796 heap_number_result = s0; | 2797 heap_number_result = s0; |
2797 GenerateHeapResultAllocation(masm, | 2798 GenerateHeapResultAllocation(masm, |
2798 heap_number_result, | 2799 heap_number_result, |
2799 heap_number_map, | 2800 heap_number_map, |
2800 scratch1, | 2801 scratch1, |
2801 scratch2, | 2802 scratch2, |
2802 &call_runtime); | 2803 &call_runtime); |
2803 __ mov(v0, heap_number_result); | 2804 __ mov(v0, heap_number_result); |
2804 __ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset)); | 2805 __ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset)); |
(...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2963 __ TailCallStub(&stub); | 2964 __ TailCallStub(&stub); |
2964 } | 2965 } |
2965 | 2966 |
2966 break; | 2967 break; |
2967 } | 2968 } |
2968 | 2969 |
2969 default: | 2970 default: |
2970 UNREACHABLE(); | 2971 UNREACHABLE(); |
2971 } | 2972 } |
2972 | 2973 |
2973 if (transition.is_linked()) { | 2974 // We never expect DIV to yield an integer result, so we always generate |
| 2975 // type transition code for DIV operations expecting an integer result: the |
| 2976 // code will fall through to this type transition. |
| 2977 if (transition.is_linked() || |
| 2978 ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) { |
2974 __ bind(&transition); | 2979 __ bind(&transition); |
2975 GenerateTypeTransition(masm); | 2980 GenerateTypeTransition(masm); |
2976 } | 2981 } |
2977 | 2982 |
2978 __ bind(&call_runtime); | 2983 __ bind(&call_runtime); |
2979 GenerateCallRuntime(masm); | 2984 GenerateCallRuntime(masm); |
2980 } | 2985 } |
2981 | 2986 |
2982 | 2987 |
2983 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { | 2988 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { |
(...skipping 551 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3535 __ mov(a0, s0); | 3540 __ mov(a0, s0); |
3536 __ mov(a1, s1); | 3541 __ mov(a1, s1); |
3537 | 3542 |
3538 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We | 3543 // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We |
3539 // also need to reserve the 4 argument slots on the stack. | 3544 // also need to reserve the 4 argument slots on the stack. |
3540 | 3545 |
3541 __ AssertStackIsAligned(); | 3546 __ AssertStackIsAligned(); |
3542 | 3547 |
3543 __ li(a2, Operand(ExternalReference::isolate_address())); | 3548 __ li(a2, Operand(ExternalReference::isolate_address())); |
3544 | 3549 |
3545 // From arm version of this function: | 3550 // To let the GC traverse the return address of the exit frames, we need to |
3546 // TODO(1242173): To let the GC traverse the return address of the exit | 3551 // know where the return address is. The CEntryStub is unmovable, so |
3547 // frames, we need to know where the return address is. Right now, | 3552 // we can store the address on the stack to be able to find it again and |
3548 // we push it on the stack to be able to find it again, but we never | 3553 // we never have to restore it, because it will not change. |
3549 // restore from it in case of changes, which makes it impossible to | |
3550 // support moving the C entry code stub. This should be fixed, but currently | |
3551 // this is OK because the CEntryStub gets generated so early in the V8 boot | |
3552 // sequence that it is not moving ever. | |
3553 | |
3554 { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm); | 3554 { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm); |
3555 // This branch-and-link sequence is needed to find the current PC on mips, | 3555 // This branch-and-link sequence is needed to find the current PC on mips, |
3556 // saved to the ra register. | 3556 // saved to the ra register. |
3557 // Use masm-> here instead of the double-underscore macro since extra | 3557 // Use masm-> here instead of the double-underscore macro since extra |
3558 // coverage code can interfere with the proper calculation of ra. | 3558 // coverage code can interfere with the proper calculation of ra. |
3559 Label find_ra; | 3559 Label find_ra; |
3560 masm->bal(&find_ra); // bal exposes branch delay slot. | 3560 masm->bal(&find_ra); // bal exposes branch delay slot. |
3561 masm->nop(); // Branch delay slot nop. | 3561 masm->nop(); // Branch delay slot nop. |
3562 masm->bind(&find_ra); | 3562 masm->bind(&find_ra); |
3563 | 3563 |
(...skipping 504 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4068 __ Ret(); | 4068 __ Ret(); |
4069 | 4069 |
4070 // Slow-case: Handle non-smi or out-of-bounds access to arguments | 4070 // Slow-case: Handle non-smi or out-of-bounds access to arguments |
4071 // by calling the runtime system. | 4071 // by calling the runtime system. |
4072 __ bind(&slow); | 4072 __ bind(&slow); |
4073 __ push(a1); | 4073 __ push(a1); |
4074 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); | 4074 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); |
4075 } | 4075 } |
4076 | 4076 |
4077 | 4077 |
4078 void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { | 4078 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) { |
4079 // sp[0] : number of parameters | 4079 // sp[0] : number of parameters |
4080 // sp[4] : receiver displacement | 4080 // sp[4] : receiver displacement |
4081 // sp[8] : function | 4081 // sp[8] : function |
4082 | 4082 // Check if the calling frame is an arguments adaptor frame. |
| 4083 Label runtime; |
| 4084 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
| 4085 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset)); |
| 4086 __ Branch(&runtime, ne, |
| 4087 a2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
| 4088 |
| 4089 // Patch the arguments.length and the parameters pointer in the current frame. |
| 4090 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
| 4091 __ sw(a2, MemOperand(sp, 0 * kPointerSize)); |
| 4092 __ sll(t3, a2, 1); |
| 4093 __ Addu(a3, a3, Operand(t3)); |
| 4094 __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset); |
| 4095 __ sw(a3, MemOperand(sp, 1 * kPointerSize)); |
| 4096 |
| 4097 __ bind(&runtime); |
| 4098 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); |
| 4099 } |
| 4100 |
| 4101 |
| 4102 void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { |
| 4103 // Stack layout: |
| 4104 // sp[0] : number of parameters (tagged) |
| 4105 // sp[4] : address of receiver argument |
| 4106 // sp[8] : function |
| 4107 // Registers used over whole function: |
| 4108 // t2 : allocated object (tagged) |
| 4109 // t5 : mapped parameter count (tagged) |
| 4110 |
| 4111 __ lw(a1, MemOperand(sp, 0 * kPointerSize)); |
| 4112 // a1 = parameter count (tagged) |
| 4113 |
| 4114 // Check if the calling frame is an arguments adaptor frame. |
| 4115 Label runtime; |
| 4116 Label adaptor_frame, try_allocate; |
| 4117 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
| 4118 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset)); |
| 4119 __ Branch(&adaptor_frame, eq, a2, |
| 4120 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
| 4121 |
| 4122 // No adaptor, parameter count = argument count. |
| 4123 __ mov(a2, a1); |
| 4124 __ b(&try_allocate); |
| 4125 __ nop(); // Branch delay slot nop. |
| 4126 |
| 4127 // We have an adaptor frame. Patch the parameters pointer. |
| 4128 __ bind(&adaptor_frame); |
| 4129 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
| 4130 __ sll(t6, a2, 1); |
| 4131 __ Addu(a3, a3, Operand(t6)); |
| 4132 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset)); |
| 4133 __ sw(a3, MemOperand(sp, 1 * kPointerSize)); |
| 4134 |
| 4135 // a1 = parameter count (tagged) |
| 4136 // a2 = argument count (tagged) |
| 4137 // Compute the mapped parameter count = min(a1, a2) in a1. |
| 4138 Label skip_min; |
| 4139 __ Branch(&skip_min, lt, a1, Operand(a2)); |
| 4140 __ mov(a1, a2); |
| 4141 __ bind(&skip_min); |
| 4142 |
| 4143 __ bind(&try_allocate); |
| 4144 |
| 4145 // Compute the sizes of backing store, parameter map, and arguments object. |
| 4146 // 1. Parameter map, has 2 extra words containing context and backing store. |
| 4147 const int kParameterMapHeaderSize = |
| 4148 FixedArray::kHeaderSize + 2 * kPointerSize; |
| 4149 // If there are no mapped parameters, we do not need the parameter_map. |
| 4150 Label param_map_size; |
| 4151 ASSERT_EQ(0, Smi::FromInt(0)); |
| 4152 __ Branch(USE_DELAY_SLOT, ¶m_map_size, eq, a1, Operand(zero_reg)); |
| 4153 __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0. |
| 4154 __ sll(t5, a1, 1); |
| 4155 __ addiu(t5, t5, kParameterMapHeaderSize); |
| 4156 __ bind(¶m_map_size); |
| 4157 |
| 4158 // 2. Backing store. |
| 4159 __ sll(t6, a2, 1); |
| 4160 __ Addu(t5, t5, Operand(t6)); |
| 4161 __ Addu(t5, t5, Operand(FixedArray::kHeaderSize)); |
| 4162 |
| 4163 // 3. Arguments object. |
| 4164 __ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize)); |
| 4165 |
| 4166 // Do the allocation of all three objects in one go. |
| 4167 __ AllocateInNewSpace(t5, v0, a3, t0, &runtime, TAG_OBJECT); |
| 4168 |
| 4169 // v0 = address of new object(s) (tagged) |
| 4170 // a2 = argument count (tagged) |
| 4171 // Get the arguments boilerplate from the current (global) context into t0. |
| 4172 const int kNormalOffset = |
| 4173 Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); |
| 4174 const int kAliasedOffset = |
| 4175 Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX); |
| 4176 |
| 4177 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); |
| 4178 __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset)); |
| 4179 Label skip2_ne, skip2_eq; |
| 4180 __ Branch(&skip2_ne, ne, a1, Operand(zero_reg)); |
| 4181 __ lw(t0, MemOperand(t0, kNormalOffset)); |
| 4182 __ bind(&skip2_ne); |
| 4183 |
| 4184 __ Branch(&skip2_eq, eq, a1, Operand(zero_reg)); |
| 4185 __ lw(t0, MemOperand(t0, kAliasedOffset)); |
| 4186 __ bind(&skip2_eq); |
| 4187 |
| 4188 // v0 = address of new object (tagged) |
| 4189 // a1 = mapped parameter count (tagged) |
| 4190 // a2 = argument count (tagged) |
| 4191 // t0 = address of boilerplate object (tagged) |
| 4192 // Copy the JS object part. |
| 4193 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { |
| 4194 __ lw(a3, FieldMemOperand(t0, i)); |
| 4195 __ sw(a3, FieldMemOperand(v0, i)); |
| 4196 } |
| 4197 |
| 4198 // Setup the callee in-object property. |
| 4199 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); |
| 4200 __ lw(a3, MemOperand(sp, 2 * kPointerSize)); |
| 4201 const int kCalleeOffset = JSObject::kHeaderSize + |
| 4202 Heap::kArgumentsCalleeIndex * kPointerSize; |
| 4203 __ sw(a3, FieldMemOperand(v0, kCalleeOffset)); |
| 4204 |
| 4205 // Use the length (smi tagged) and set that as an in-object property too. |
| 4206 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); |
| 4207 const int kLengthOffset = JSObject::kHeaderSize + |
| 4208 Heap::kArgumentsLengthIndex * kPointerSize; |
| 4209 __ sw(a2, FieldMemOperand(v0, kLengthOffset)); |
| 4210 |
| 4211 // Setup the elements pointer in the allocated arguments object. |
| 4212 // If we allocated a parameter map, t0 will point there, otherwise |
| 4213 // it will point to the backing store. |
| 4214 __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize)); |
| 4215 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset)); |
| 4216 |
| 4217 // v0 = address of new object (tagged) |
| 4218 // a1 = mapped parameter count (tagged) |
| 4219 // a2 = argument count (tagged) |
| 4220 // t0 = address of parameter map or backing store (tagged) |
| 4221 // Initialize parameter map. If there are no mapped arguments, we're done. |
| 4222 Label skip_parameter_map; |
| 4223 Label skip3; |
| 4224 __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0))); |
| 4225 // Move backing store address to a3, because it is |
| 4226 // expected there when filling in the unmapped arguments. |
| 4227 __ mov(a3, t0); |
| 4228 __ bind(&skip3); |
| 4229 |
| 4230 __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0))); |
| 4231 |
| 4232 __ LoadRoot(t2, Heap::kNonStrictArgumentsElementsMapRootIndex); |
| 4233 __ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset)); |
| 4234 __ Addu(t2, a1, Operand(Smi::FromInt(2))); |
| 4235 __ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset)); |
| 4236 __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize)); |
| 4237 __ sll(t6, a1, 1); |
| 4238 __ Addu(t2, t0, Operand(t6)); |
| 4239 __ Addu(t2, t2, Operand(kParameterMapHeaderSize)); |
| 4240 __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize)); |
| 4241 |
| 4242 // Copy the parameter slots and the holes in the arguments. |
| 4243 // We need to fill in mapped_parameter_count slots. They index the context, |
| 4244 // where parameters are stored in reverse order, at |
| 4245 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1 |
| 4246 // The mapped parameter thus need to get indices |
| 4247 // MIN_CONTEXT_SLOTS+parameter_count-1 .. |
| 4248 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count |
| 4249 // We loop from right to left. |
| 4250 Label parameters_loop, parameters_test; |
| 4251 __ mov(t2, a1); |
| 4252 __ lw(t5, MemOperand(sp, 0 * kPointerSize)); |
| 4253 __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); |
| 4254 __ Subu(t5, t5, Operand(a1)); |
| 4255 __ LoadRoot(t3, Heap::kTheHoleValueRootIndex); |
| 4256 __ sll(t6, t2, 1); |
| 4257 __ Addu(a3, t0, Operand(t6)); |
| 4258 __ Addu(a3, a3, Operand(kParameterMapHeaderSize)); |
| 4259 |
| 4260 // t2 = loop variable (tagged) |
| 4261 // a1 = mapping index (tagged) |
| 4262 // a3 = address of backing store (tagged) |
| 4263 // t0 = address of parameter map (tagged) |
| 4264 // t1 = temporary scratch (a.o., for address calculation) |
| 4265 // t3 = the hole value |
| 4266 __ jmp(¶meters_test); |
| 4267 |
| 4268 __ bind(¶meters_loop); |
| 4269 __ Subu(t2, t2, Operand(Smi::FromInt(1))); |
| 4270 __ sll(t1, t2, 1); |
| 4271 __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag)); |
| 4272 __ Addu(t6, t0, t1); |
| 4273 __ sw(t5, MemOperand(t6)); |
| 4274 __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize)); |
| 4275 __ Addu(t6, a3, t1); |
| 4276 __ sw(t3, MemOperand(t6)); |
| 4277 __ Addu(t5, t5, Operand(Smi::FromInt(1))); |
| 4278 __ bind(¶meters_test); |
| 4279 __ Branch(¶meters_loop, ne, t2, Operand(Smi::FromInt(0))); |
| 4280 |
| 4281 __ bind(&skip_parameter_map); |
| 4282 // a2 = argument count (tagged) |
| 4283 // a3 = address of backing store (tagged) |
| 4284 // t1 = scratch |
| 4285 // Copy arguments header and remaining slots (if there are any). |
| 4286 __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex); |
| 4287 __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset)); |
| 4288 __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset)); |
| 4289 |
| 4290 Label arguments_loop, arguments_test; |
| 4291 __ mov(t5, a1); |
| 4292 __ lw(t0, MemOperand(sp, 1 * kPointerSize)); |
| 4293 __ sll(t6, t5, 1); |
| 4294 __ Subu(t0, t0, Operand(t6)); |
| 4295 __ jmp(&arguments_test); |
| 4296 |
| 4297 __ bind(&arguments_loop); |
| 4298 __ Subu(t0, t0, Operand(kPointerSize)); |
| 4299 __ lw(t2, MemOperand(t0, 0)); |
| 4300 __ sll(t6, t5, 1); |
| 4301 __ Addu(t1, a3, Operand(t6)); |
| 4302 __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize)); |
| 4303 __ Addu(t5, t5, Operand(Smi::FromInt(1))); |
| 4304 |
| 4305 __ bind(&arguments_test); |
| 4306 __ Branch(&arguments_loop, lt, t5, Operand(a2)); |
| 4307 |
| 4308 // Return and remove the on-stack parameters. |
| 4309 __ Addu(sp, sp, Operand(3 * kPointerSize)); |
| 4310 __ Ret(); |
| 4311 |
| 4312 // Do the runtime call to allocate the arguments object. |
| 4313 // a2 = argument count (taggged) |
| 4314 __ bind(&runtime); |
| 4315 __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count. |
| 4316 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); |
| 4317 } |
| 4318 |
| 4319 |
| 4320 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { |
| 4321 // sp[0] : number of parameters |
| 4322 // sp[4] : receiver displacement |
| 4323 // sp[8] : function |
4083 // Check if the calling frame is an arguments adaptor frame. | 4324 // Check if the calling frame is an arguments adaptor frame. |
4084 Label adaptor_frame, try_allocate, runtime; | 4325 Label adaptor_frame, try_allocate, runtime; |
4085 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 4326 __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
4086 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset)); | 4327 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset)); |
4087 __ Branch(&adaptor_frame, | 4328 __ Branch(&adaptor_frame, |
4088 eq, | 4329 eq, |
4089 a3, | 4330 a3, |
4090 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | 4331 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
4091 | 4332 |
4092 // Get the length from the frame. | 4333 // Get the length from the frame. |
(...skipping 12 matching lines...) Expand all Loading... |
4105 | 4346 |
4106 // Try the new space allocation. Start out with computing the size | 4347 // Try the new space allocation. Start out with computing the size |
4107 // of the arguments object and the elements array in words. | 4348 // of the arguments object and the elements array in words. |
4108 Label add_arguments_object; | 4349 Label add_arguments_object; |
4109 __ bind(&try_allocate); | 4350 __ bind(&try_allocate); |
4110 __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg)); | 4351 __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg)); |
4111 __ srl(a1, a1, kSmiTagSize); | 4352 __ srl(a1, a1, kSmiTagSize); |
4112 | 4353 |
4113 __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize)); | 4354 __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize)); |
4114 __ bind(&add_arguments_object); | 4355 __ bind(&add_arguments_object); |
4115 __ Addu(a1, a1, Operand(GetArgumentsObjectSize() / kPointerSize)); | 4356 __ Addu(a1, a1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize)); |
4116 | 4357 |
4117 // Do the allocation of both objects in one go. | 4358 // Do the allocation of both objects in one go. |
4118 __ AllocateInNewSpace( | 4359 __ AllocateInNewSpace(a1, |
4119 a1, | 4360 v0, |
4120 v0, | 4361 a2, |
4121 a2, | 4362 a3, |
4122 a3, | 4363 &runtime, |
4123 &runtime, | 4364 static_cast<AllocationFlags>(TAG_OBJECT | |
4124 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); | 4365 SIZE_IN_WORDS)); |
4125 | 4366 |
4126 // Get the arguments boilerplate from the current (global) context. | 4367 // Get the arguments boilerplate from the current (global) context. |
4127 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); | 4368 __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); |
4128 __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset)); | 4369 __ lw(t0, FieldMemOperand(t0, GlobalObject::kGlobalContextOffset)); |
4129 __ lw(t0, MemOperand(t0, | 4370 __ lw(t0, MemOperand(t0, Context::SlotOffset( |
4130 Context::SlotOffset(GetArgumentsBoilerplateIndex()))); | 4371 Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX))); |
4131 | 4372 |
4132 // Copy the JS object part. | 4373 // Copy the JS object part. |
4133 __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize); | 4374 __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize); |
4134 | 4375 |
4135 if (type_ == NEW_NON_STRICT) { | |
4136 // Setup the callee in-object property. | |
4137 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); | |
4138 __ lw(a3, MemOperand(sp, 2 * kPointerSize)); | |
4139 const int kCalleeOffset = JSObject::kHeaderSize + | |
4140 Heap::kArgumentsCalleeIndex * kPointerSize; | |
4141 __ sw(a3, FieldMemOperand(v0, kCalleeOffset)); | |
4142 } | |
4143 | |
4144 // Get the length (smi tagged) and set that as an in-object property too. | 4376 // Get the length (smi tagged) and set that as an in-object property too. |
4145 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); | 4377 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); |
4146 __ lw(a1, MemOperand(sp, 0 * kPointerSize)); | 4378 __ lw(a1, MemOperand(sp, 0 * kPointerSize)); |
4147 __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize + | 4379 __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize + |
4148 Heap::kArgumentsLengthIndex * kPointerSize)); | 4380 Heap::kArgumentsLengthIndex * kPointerSize)); |
4149 | 4381 |
4150 Label done; | 4382 Label done; |
4151 __ Branch(&done, eq, a1, Operand(zero_reg)); | 4383 __ Branch(&done, eq, a1, Operand(zero_reg)); |
4152 | 4384 |
4153 // Get the parameters pointer from the stack. | 4385 // Get the parameters pointer from the stack. |
4154 __ lw(a2, MemOperand(sp, 1 * kPointerSize)); | 4386 __ lw(a2, MemOperand(sp, 1 * kPointerSize)); |
4155 | 4387 |
4156 // Setup the elements pointer in the allocated arguments object and | 4388 // Setup the elements pointer in the allocated arguments object and |
4157 // initialize the header in the elements fixed array. | 4389 // initialize the header in the elements fixed array. |
4158 __ Addu(t0, v0, Operand(GetArgumentsObjectSize())); | 4390 __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict)); |
4159 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset)); | 4391 __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset)); |
4160 __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex); | 4392 __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex); |
4161 __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset)); | 4393 __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset)); |
4162 __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset)); | 4394 __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset)); |
4163 __ srl(a1, a1, kSmiTagSize); // Untag the length for the loop. | 4395 // Untag the length for the loop. |
| 4396 __ srl(a1, a1, kSmiTagSize); |
4164 | 4397 |
4165 // Copy the fixed array slots. | 4398 // Copy the fixed array slots. |
4166 Label loop; | 4399 Label loop; |
4167 // Setup t0 to point to the first array slot. | 4400 // Setup t0 to point to the first array slot. |
4168 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 4401 __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
4169 __ bind(&loop); | 4402 __ bind(&loop); |
4170 // Pre-decrement a2 with kPointerSize on each iteration. | 4403 // Pre-decrement a2 with kPointerSize on each iteration. |
4171 // Pre-decrement in order to skip receiver. | 4404 // Pre-decrement in order to skip receiver. |
4172 __ Addu(a2, a2, Operand(-kPointerSize)); | 4405 __ Addu(a2, a2, Operand(-kPointerSize)); |
4173 __ lw(a3, MemOperand(a2)); | 4406 __ lw(a3, MemOperand(a2)); |
4174 // Post-increment t0 with kPointerSize on each iteration. | 4407 // Post-increment t0 with kPointerSize on each iteration. |
4175 __ sw(a3, MemOperand(t0)); | 4408 __ sw(a3, MemOperand(t0)); |
4176 __ Addu(t0, t0, Operand(kPointerSize)); | 4409 __ Addu(t0, t0, Operand(kPointerSize)); |
4177 __ Subu(a1, a1, Operand(1)); | 4410 __ Subu(a1, a1, Operand(1)); |
4178 __ Branch(&loop, ne, a1, Operand(zero_reg)); | 4411 __ Branch(&loop, ne, a1, Operand(zero_reg)); |
4179 | 4412 |
4180 // Return and remove the on-stack parameters. | 4413 // Return and remove the on-stack parameters. |
4181 __ bind(&done); | 4414 __ bind(&done); |
4182 __ Addu(sp, sp, Operand(3 * kPointerSize)); | 4415 __ Addu(sp, sp, Operand(3 * kPointerSize)); |
4183 __ Ret(); | 4416 __ Ret(); |
4184 | 4417 |
4185 // Do the runtime call to allocate the arguments object. | 4418 // Do the runtime call to allocate the arguments object. |
4186 __ bind(&runtime); | 4419 __ bind(&runtime); |
4187 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); | 4420 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1); |
4188 } | 4421 } |
4189 | 4422 |
4190 | 4423 |
4191 void RegExpExecStub::Generate(MacroAssembler* masm) { | 4424 void RegExpExecStub::Generate(MacroAssembler* masm) { |
4192 // Just jump directly to runtime if native RegExp is not selected at compile | 4425 // Just jump directly to runtime if native RegExp is not selected at compile |
4193 // time or if regexp entry in generated code is turned off runtime switch or | 4426 // time or if regexp entry in generated code is turned off runtime switch or |
4194 // at compilation. | 4427 // at compilation. |
4195 #ifdef V8_INTERPRETED_REGEXP | 4428 #ifdef V8_INTERPRETED_REGEXP |
4196 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); | 4429 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); |
4197 #else // V8_INTERPRETED_REGEXP | 4430 #else // V8_INTERPRETED_REGEXP |
(...skipping 2456 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6654 __ mov(result, zero_reg); | 6887 __ mov(result, zero_reg); |
6655 __ Ret(); | 6888 __ Ret(); |
6656 } | 6889 } |
6657 | 6890 |
6658 | 6891 |
6659 #undef __ | 6892 #undef __ |
6660 | 6893 |
6661 } } // namespace v8::internal | 6894 } } // namespace v8::internal |
6662 | 6895 |
6663 #endif // V8_TARGET_ARCH_MIPS | 6896 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |