| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 | 5 |
| 6 | 6 |
| 7 #include "src/v8.h" | 7 #include "src/v8.h" |
| 8 | 8 |
| 9 #if V8_TARGET_ARCH_MIPS | 9 #if V8_TARGET_ARCH_MIPS64 |
| 10 | 10 |
| 11 #include "src/codegen.h" | 11 #include "src/codegen.h" |
| 12 #include "src/debug.h" | 12 #include "src/debug.h" |
| 13 #include "src/deoptimizer.h" | 13 #include "src/deoptimizer.h" |
| 14 #include "src/full-codegen.h" | 14 #include "src/full-codegen.h" |
| 15 #include "src/runtime.h" | 15 #include "src/runtime.h" |
| 16 #include "src/stub-cache.h" | 16 #include "src/stub-cache.h" |
| 17 | 17 |
| 18 namespace v8 { | 18 namespace v8 { |
| 19 namespace internal { | 19 namespace internal { |
| 20 | 20 |
| 21 | 21 |
| 22 #define __ ACCESS_MASM(masm) | 22 #define __ ACCESS_MASM(masm) |
| 23 | 23 |
| 24 | 24 |
| 25 void Builtins::Generate_Adaptor(MacroAssembler* masm, | 25 void Builtins::Generate_Adaptor(MacroAssembler* masm, |
| 26 CFunctionId id, | 26 CFunctionId id, |
| 27 BuiltinExtraArguments extra_args) { | 27 BuiltinExtraArguments extra_args) { |
| 28 // ----------- S t a t e ------------- | 28 // ----------- S t a t e ------------- |
| 29 // -- a0 : number of arguments excluding receiver | 29 // -- a0 : number of arguments excluding receiver |
| 30 // -- a1 : called function (only guaranteed when | 30 // -- a1 : called function (only guaranteed when |
| 31 // -- extra_args requires it) | 31 // -- extra_args requires it) |
| 32 // -- cp : context | 32 // -- cp : context |
| 33 // -- sp[0] : last argument | 33 // -- sp[0] : last argument |
| 34 // -- ... | 34 // -- ... |
| 35 // -- sp[4 * (argc - 1)] : first argument | 35 // -- sp[8 * (argc - 1)] : first argument |
| 36 // -- sp[4 * agrc] : receiver | 36 // -- sp[8 * agrc] : receiver |
| 37 // ----------------------------------- | 37 // ----------------------------------- |
| 38 | 38 |
| 39 // Insert extra arguments. | 39 // Insert extra arguments. |
| 40 int num_extra_args = 0; | 40 int num_extra_args = 0; |
| 41 if (extra_args == NEEDS_CALLED_FUNCTION) { | 41 if (extra_args == NEEDS_CALLED_FUNCTION) { |
| 42 num_extra_args = 1; | 42 num_extra_args = 1; |
| 43 __ push(a1); | 43 __ push(a1); |
| 44 } else { | 44 } else { |
| 45 ASSERT(extra_args == NO_EXTRA_ARGUMENTS); | 45 ASSERT(extra_args == NO_EXTRA_ARGUMENTS); |
| 46 } | 46 } |
| 47 | 47 |
| 48 // JumpToExternalReference expects s0 to contain the number of arguments | 48 // JumpToExternalReference expects s0 to contain the number of arguments |
| 49 // including the receiver and the extra arguments. | 49 // including the receiver and the extra arguments. |
| 50 __ Addu(s0, a0, num_extra_args + 1); | 50 __ Daddu(s0, a0, num_extra_args + 1); |
| 51 __ sll(s1, s0, kPointerSizeLog2); | 51 __ dsll(s1, s0, kPointerSizeLog2); |
| 52 __ Subu(s1, s1, kPointerSize); | 52 __ Dsubu(s1, s1, kPointerSize); |
| 53 __ JumpToExternalReference(ExternalReference(id, masm->isolate())); | 53 __ JumpToExternalReference(ExternalReference(id, masm->isolate())); |
| 54 } | 54 } |
| 55 | 55 |
| 56 | 56 |
| 57 // Load the built-in InternalArray function from the current context. | 57 // Load the built-in InternalArray function from the current context. |
| 58 static void GenerateLoadInternalArrayFunction(MacroAssembler* masm, | 58 static void GenerateLoadInternalArrayFunction(MacroAssembler* masm, |
| 59 Register result) { | 59 Register result) { |
| 60 // Load the native context. | 60 // Load the native context. |
| 61 | 61 |
| 62 __ lw(result, | 62 __ ld(result, |
| 63 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | 63 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
| 64 __ lw(result, | 64 __ ld(result, |
| 65 FieldMemOperand(result, GlobalObject::kNativeContextOffset)); | 65 FieldMemOperand(result, GlobalObject::kNativeContextOffset)); |
| 66 // Load the InternalArray function from the native context. | 66 // Load the InternalArray function from the native context. |
| 67 __ lw(result, | 67 __ ld(result, |
| 68 MemOperand(result, | 68 MemOperand(result, |
| 69 Context::SlotOffset( | 69 Context::SlotOffset( |
| 70 Context::INTERNAL_ARRAY_FUNCTION_INDEX))); | 70 Context::INTERNAL_ARRAY_FUNCTION_INDEX))); |
| 71 } | 71 } |
| 72 | 72 |
| 73 | 73 |
| 74 // Load the built-in Array function from the current context. | 74 // Load the built-in Array function from the current context. |
| 75 static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) { | 75 static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) { |
| 76 // Load the native context. | 76 // Load the native context. |
| 77 | 77 |
| 78 __ lw(result, | 78 __ ld(result, |
| 79 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | 79 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
| 80 __ lw(result, | 80 __ ld(result, |
| 81 FieldMemOperand(result, GlobalObject::kNativeContextOffset)); | 81 FieldMemOperand(result, GlobalObject::kNativeContextOffset)); |
| 82 // Load the Array function from the native context. | 82 // Load the Array function from the native context. |
| 83 __ lw(result, | 83 __ ld(result, |
| 84 MemOperand(result, | 84 MemOperand(result, |
| 85 Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX))); | 85 Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX))); |
| 86 } | 86 } |
| 87 | 87 |
| 88 | 88 |
| 89 void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) { | 89 void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) { |
| 90 // ----------- S t a t e ------------- | 90 // ----------- S t a t e ------------- |
| 91 // -- a0 : number of arguments | 91 // -- a0 : number of arguments |
| 92 // -- ra : return address | 92 // -- ra : return address |
| 93 // -- sp[...]: constructor arguments | 93 // -- sp[...]: constructor arguments |
| 94 // ----------------------------------- | 94 // ----------------------------------- |
| 95 Label generic_array_code, one_or_more_arguments, two_or_more_arguments; | 95 Label generic_array_code, one_or_more_arguments, two_or_more_arguments; |
| 96 | 96 |
| 97 // Get the InternalArray function. | 97 // Get the InternalArray function. |
| 98 GenerateLoadInternalArrayFunction(masm, a1); | 98 GenerateLoadInternalArrayFunction(masm, a1); |
| 99 | 99 |
| 100 if (FLAG_debug_code) { | 100 if (FLAG_debug_code) { |
| 101 // Initial map for the builtin InternalArray functions should be maps. | 101 // Initial map for the builtin InternalArray functions should be maps. |
| 102 __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); | 102 __ ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); |
| 103 __ SmiTst(a2, t0); | 103 __ SmiTst(a2, a4); |
| 104 __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction, | 104 __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction, |
| 105 t0, Operand(zero_reg)); | 105 a4, Operand(zero_reg)); |
| 106 __ GetObjectType(a2, a3, t0); | 106 __ GetObjectType(a2, a3, a4); |
| 107 __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction, | 107 __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction, |
| 108 t0, Operand(MAP_TYPE)); | 108 a4, Operand(MAP_TYPE)); |
| 109 } | 109 } |
| 110 | 110 |
| 111 // Run the native code for the InternalArray function called as a normal | 111 // Run the native code for the InternalArray function called as a normal |
| 112 // function. | 112 // function. |
| 113 // Tail call a stub. | 113 // Tail call a stub. |
| 114 InternalArrayConstructorStub stub(masm->isolate()); | 114 InternalArrayConstructorStub stub(masm->isolate()); |
| 115 __ TailCallStub(&stub); | 115 __ TailCallStub(&stub); |
| 116 } | 116 } |
| 117 | 117 |
| 118 | 118 |
| 119 void Builtins::Generate_ArrayCode(MacroAssembler* masm) { | 119 void Builtins::Generate_ArrayCode(MacroAssembler* masm) { |
| 120 // ----------- S t a t e ------------- | 120 // ----------- S t a t e ------------- |
| 121 // -- a0 : number of arguments | 121 // -- a0 : number of arguments |
| 122 // -- ra : return address | 122 // -- ra : return address |
| 123 // -- sp[...]: constructor arguments | 123 // -- sp[...]: constructor arguments |
| 124 // ----------------------------------- | 124 // ----------------------------------- |
| 125 Label generic_array_code; | 125 Label generic_array_code; |
| 126 | 126 |
| 127 // Get the Array function. | 127 // Get the Array function. |
| 128 GenerateLoadArrayFunction(masm, a1); | 128 GenerateLoadArrayFunction(masm, a1); |
| 129 | 129 |
| 130 if (FLAG_debug_code) { | 130 if (FLAG_debug_code) { |
| 131 // Initial map for the builtin Array functions should be maps. | 131 // Initial map for the builtin Array functions should be maps. |
| 132 __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); | 132 __ ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); |
| 133 __ SmiTst(a2, t0); | 133 __ SmiTst(a2, a4); |
| 134 __ Assert(ne, kUnexpectedInitialMapForArrayFunction1, | 134 __ Assert(ne, kUnexpectedInitialMapForArrayFunction1, |
| 135 t0, Operand(zero_reg)); | 135 a4, Operand(zero_reg)); |
| 136 __ GetObjectType(a2, a3, t0); | 136 __ GetObjectType(a2, a3, a4); |
| 137 __ Assert(eq, kUnexpectedInitialMapForArrayFunction2, | 137 __ Assert(eq, kUnexpectedInitialMapForArrayFunction2, |
| 138 t0, Operand(MAP_TYPE)); | 138 a4, Operand(MAP_TYPE)); |
| 139 } | 139 } |
| 140 | 140 |
| 141 // Run the native code for the Array function called as a normal function. | 141 // Run the native code for the Array function called as a normal function. |
| 142 // Tail call a stub. | 142 // Tail call a stub. |
| 143 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); | 143 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); |
| 144 ArrayConstructorStub stub(masm->isolate()); | 144 ArrayConstructorStub stub(masm->isolate()); |
| 145 __ TailCallStub(&stub); | 145 __ TailCallStub(&stub); |
| 146 } | 146 } |
| 147 | 147 |
| 148 | 148 |
| 149 void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { | 149 void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { |
| 150 // ----------- S t a t e ------------- | 150 // ----------- S t a t e ------------- |
| 151 // -- a0 : number of arguments | 151 // -- a0 : number of arguments |
| 152 // -- a1 : constructor function | 152 // -- a1 : constructor function |
| 153 // -- ra : return address | 153 // -- ra : return address |
| 154 // -- sp[(argc - n - 1) * 4] : arg[n] (zero based) | 154 // -- sp[(argc - n - 1) * 8] : arg[n] (zero based) |
| 155 // -- sp[argc * 4] : receiver | 155 // -- sp[argc * 8] : receiver |
| 156 // ----------------------------------- | 156 // ----------------------------------- |
| 157 Counters* counters = masm->isolate()->counters(); | 157 Counters* counters = masm->isolate()->counters(); |
| 158 __ IncrementCounter(counters->string_ctor_calls(), 1, a2, a3); | 158 __ IncrementCounter(counters->string_ctor_calls(), 1, a2, a3); |
| 159 | 159 |
| 160 Register function = a1; | 160 Register function = a1; |
| 161 if (FLAG_debug_code) { | 161 if (FLAG_debug_code) { |
| 162 __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, a2); | 162 __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, a2); |
| 163 __ Assert(eq, kUnexpectedStringFunction, function, Operand(a2)); | 163 __ Assert(eq, kUnexpectedStringFunction, function, Operand(a2)); |
| 164 } | 164 } |
| 165 | 165 |
| 166 // Load the first arguments in a0 and get rid of the rest. | 166 // Load the first arguments in a0 and get rid of the rest. |
| 167 Label no_arguments; | 167 Label no_arguments; |
| 168 __ Branch(&no_arguments, eq, a0, Operand(zero_reg)); | 168 __ Branch(&no_arguments, eq, a0, Operand(zero_reg)); |
| 169 // First args = sp[(argc - 1) * 4]. | 169 // First args = sp[(argc - 1) * 8]. |
| 170 __ Subu(a0, a0, Operand(1)); | 170 __ Dsubu(a0, a0, Operand(1)); |
| 171 __ sll(a0, a0, kPointerSizeLog2); | 171 __ dsll(a0, a0, kPointerSizeLog2); |
| 172 __ Addu(sp, a0, sp); | 172 __ Daddu(sp, a0, sp); |
| 173 __ lw(a0, MemOperand(sp)); | 173 __ ld(a0, MemOperand(sp)); |
| 174 // sp now point to args[0], drop args[0] + receiver. | 174 // sp now point to args[0], drop args[0] + receiver. |
| 175 __ Drop(2); | 175 __ Drop(2); |
| 176 | 176 |
| 177 Register argument = a2; | 177 Register argument = a2; |
| 178 Label not_cached, argument_is_string; | 178 Label not_cached, argument_is_string; |
| 179 __ LookupNumberStringCache(a0, // Input. | 179 __ LookupNumberStringCache(a0, // Input. |
| 180 argument, // Result. | 180 argument, // Result. |
| 181 a3, // Scratch. | 181 a3, // Scratch. |
| 182 t0, // Scratch. | 182 a4, // Scratch. |
| 183 t1, // Scratch. | 183 a5, // Scratch. |
| 184 ¬_cached); | 184 ¬_cached); |
| 185 __ IncrementCounter(counters->string_ctor_cached_number(), 1, a3, t0); | 185 __ IncrementCounter(counters->string_ctor_cached_number(), 1, a3, a4); |
| 186 __ bind(&argument_is_string); | 186 __ bind(&argument_is_string); |
| 187 | 187 |
| 188 // ----------- S t a t e ------------- | 188 // ----------- S t a t e ------------- |
| 189 // -- a2 : argument converted to string | 189 // -- a2 : argument converted to string |
| 190 // -- a1 : constructor function | 190 // -- a1 : constructor function |
| 191 // -- ra : return address | 191 // -- ra : return address |
| 192 // ----------------------------------- | 192 // ----------------------------------- |
| 193 | 193 |
| 194 Label gc_required; | 194 Label gc_required; |
| 195 __ Allocate(JSValue::kSize, | 195 __ Allocate(JSValue::kSize, |
| 196 v0, // Result. | 196 v0, // Result. |
| 197 a3, // Scratch. | 197 a3, // Scratch. |
| 198 t0, // Scratch. | 198 a4, // Scratch. |
| 199 &gc_required, | 199 &gc_required, |
| 200 TAG_OBJECT); | 200 TAG_OBJECT); |
| 201 | 201 |
| 202 // Initialising the String Object. | 202 // Initialising the String Object. |
| 203 Register map = a3; | 203 Register map = a3; |
| 204 __ LoadGlobalFunctionInitialMap(function, map, t0); | 204 __ LoadGlobalFunctionInitialMap(function, map, a4); |
| 205 if (FLAG_debug_code) { | 205 if (FLAG_debug_code) { |
| 206 __ lbu(t0, FieldMemOperand(map, Map::kInstanceSizeOffset)); | 206 __ lbu(a4, FieldMemOperand(map, Map::kInstanceSizeOffset)); |
| 207 __ Assert(eq, kUnexpectedStringWrapperInstanceSize, | 207 __ Assert(eq, kUnexpectedStringWrapperInstanceSize, |
| 208 t0, Operand(JSValue::kSize >> kPointerSizeLog2)); | 208 a4, Operand(JSValue::kSize >> kPointerSizeLog2)); |
| 209 __ lbu(t0, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset)); | 209 __ lbu(a4, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset)); |
| 210 __ Assert(eq, kUnexpectedUnusedPropertiesOfStringWrapper, | 210 __ Assert(eq, kUnexpectedUnusedPropertiesOfStringWrapper, |
| 211 t0, Operand(zero_reg)); | 211 a4, Operand(zero_reg)); |
| 212 } | 212 } |
| 213 __ sw(map, FieldMemOperand(v0, HeapObject::kMapOffset)); | 213 __ sd(map, FieldMemOperand(v0, HeapObject::kMapOffset)); |
| 214 | 214 |
| 215 __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex); | 215 __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex); |
| 216 __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset)); | 216 __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset)); |
| 217 __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset)); | 217 __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset)); |
| 218 | 218 |
| 219 __ sw(argument, FieldMemOperand(v0, JSValue::kValueOffset)); | 219 __ sd(argument, FieldMemOperand(v0, JSValue::kValueOffset)); |
| 220 | 220 |
| 221 // Ensure the object is fully initialized. | 221 // Ensure the object is fully initialized. |
| 222 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize); | 222 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize); |
| 223 | 223 |
| 224 __ Ret(); | 224 __ Ret(); |
| 225 | 225 |
| 226 // The argument was not found in the number to string cache. Check | 226 // The argument was not found in the number to string cache. Check |
| 227 // if it's a string already before calling the conversion builtin. | 227 // if it's a string already before calling the conversion builtin. |
| 228 Label convert_argument; | 228 Label convert_argument; |
| 229 __ bind(¬_cached); | 229 __ bind(¬_cached); |
| 230 __ JumpIfSmi(a0, &convert_argument); | 230 __ JumpIfSmi(a0, &convert_argument); |
| 231 | 231 |
| 232 // Is it a String? | 232 // Is it a String? |
| 233 __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset)); | 233 __ ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset)); |
| 234 __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset)); | 234 __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset)); |
| 235 STATIC_ASSERT(kNotStringTag != 0); | 235 STATIC_ASSERT(kNotStringTag != 0); |
| 236 __ And(t0, a3, Operand(kIsNotStringMask)); | 236 __ And(a4, a3, Operand(kIsNotStringMask)); |
| 237 __ Branch(&convert_argument, ne, t0, Operand(zero_reg)); | 237 __ Branch(&convert_argument, ne, a4, Operand(zero_reg)); |
| 238 __ mov(argument, a0); | 238 __ mov(argument, a0); |
| 239 __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0); | 239 __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, a4); |
| 240 __ Branch(&argument_is_string); | 240 __ Branch(&argument_is_string); |
| 241 | 241 |
| 242 // Invoke the conversion builtin and put the result into a2. | 242 // Invoke the conversion builtin and put the result into a2. |
| 243 __ bind(&convert_argument); | 243 __ bind(&convert_argument); |
| 244 __ push(function); // Preserve the function. | 244 __ push(function); // Preserve the function. |
| 245 __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0); | 245 __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, a4); |
| 246 { | 246 { |
| 247 FrameScope scope(masm, StackFrame::INTERNAL); | 247 FrameScope scope(masm, StackFrame::INTERNAL); |
| 248 __ push(a0); | 248 __ push(a0); |
| 249 __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION); | 249 __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION); |
| 250 } | 250 } |
| 251 __ pop(function); | 251 __ pop(function); |
| 252 __ mov(argument, v0); | 252 __ mov(argument, v0); |
| 253 __ Branch(&argument_is_string); | 253 __ Branch(&argument_is_string); |
| 254 | 254 |
| 255 // Load the empty string into a2, remove the receiver from the | 255 // Load the empty string into a2, remove the receiver from the |
| 256 // stack, and jump back to the case where the argument is a string. | 256 // stack, and jump back to the case where the argument is a string. |
| 257 __ bind(&no_arguments); | 257 __ bind(&no_arguments); |
| 258 __ LoadRoot(argument, Heap::kempty_stringRootIndex); | 258 __ LoadRoot(argument, Heap::kempty_stringRootIndex); |
| 259 __ Drop(1); | 259 __ Drop(1); |
| 260 __ Branch(&argument_is_string); | 260 __ Branch(&argument_is_string); |
| 261 | 261 |
| 262 // At this point the argument is already a string. Call runtime to | 262 // At this point the argument is already a string. Call runtime to |
| 263 // create a string wrapper. | 263 // create a string wrapper. |
| 264 __ bind(&gc_required); | 264 __ bind(&gc_required); |
| 265 __ IncrementCounter(counters->string_ctor_gc_required(), 1, a3, t0); | 265 __ IncrementCounter(counters->string_ctor_gc_required(), 1, a3, a4); |
| 266 { | 266 { |
| 267 FrameScope scope(masm, StackFrame::INTERNAL); | 267 FrameScope scope(masm, StackFrame::INTERNAL); |
| 268 __ push(argument); | 268 __ push(argument); |
| 269 __ CallRuntime(Runtime::kNewStringWrapper, 1); | 269 __ CallRuntime(Runtime::kNewStringWrapper, 1); |
| 270 } | 270 } |
| 271 __ Ret(); | 271 __ Ret(); |
| 272 } | 272 } |
| 273 | 273 |
| 274 | 274 |
| 275 static void CallRuntimePassFunction( | 275 static void CallRuntimePassFunction( |
| 276 MacroAssembler* masm, Runtime::FunctionId function_id) { | 276 MacroAssembler* masm, Runtime::FunctionId function_id) { |
| 277 FrameScope scope(masm, StackFrame::INTERNAL); | 277 FrameScope scope(masm, StackFrame::INTERNAL); |
| 278 // Push a copy of the function onto the stack. | 278 // Push a copy of the function onto the stack. |
| 279 // Push call kind information and function as parameter to the runtime call. | 279 // Push call kind information and function as parameter to the runtime call. |
| 280 __ Push(a1, a1); | 280 __ Push(a1, a1); |
| 281 | 281 |
| 282 __ CallRuntime(function_id, 1); | 282 __ CallRuntime(function_id, 1); |
| 283 // Restore call kind information and receiver. | 283 // Restore call kind information and receiver. |
| 284 __ Pop(a1); | 284 __ Pop(a1); |
| 285 } | 285 } |
| 286 | 286 |
| 287 | 287 |
| 288 static void GenerateTailCallToSharedCode(MacroAssembler* masm) { | 288 static void GenerateTailCallToSharedCode(MacroAssembler* masm) { |
| 289 __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); | 289 __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); |
| 290 __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset)); | 290 __ ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset)); |
| 291 __ Addu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag)); | 291 __ Daddu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 292 __ Jump(at); | 292 __ Jump(at); |
| 293 } | 293 } |
| 294 | 294 |
| 295 | 295 |
| 296 static void GenerateTailCallToReturnedCode(MacroAssembler* masm) { | 296 static void GenerateTailCallToReturnedCode(MacroAssembler* masm) { |
| 297 __ Addu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag)); | 297 __ Daddu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 298 __ Jump(at); | 298 __ Jump(at); |
| 299 } | 299 } |
| 300 | 300 |
| 301 | 301 |
| 302 void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) { | 302 void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) { |
| 303 // Checking whether the queued function is ready for install is optional, | 303 // Checking whether the queued function is ready for install is optional, |
| 304 // since we come across interrupts and stack checks elsewhere. However, | 304 // since we come across interrupts and stack checks elsewhere. However, |
| 305 // not checking may delay installing ready functions, and always checking | 305 // not checking may delay installing ready functions, and always checking |
| 306 // would be quite expensive. A good compromise is to first check against | 306 // would be quite expensive. A good compromise is to first check against |
| 307 // stack limit as a cue for an interrupt signal. | 307 // stack limit as a cue for an interrupt signal. |
| 308 Label ok; | 308 Label ok; |
| 309 __ LoadRoot(t0, Heap::kStackLimitRootIndex); | 309 __ LoadRoot(a4, Heap::kStackLimitRootIndex); |
| 310 __ Branch(&ok, hs, sp, Operand(t0)); | 310 __ Branch(&ok, hs, sp, Operand(a4)); |
| 311 | 311 |
| 312 CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode); | 312 CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode); |
| 313 GenerateTailCallToReturnedCode(masm); | 313 GenerateTailCallToReturnedCode(masm); |
| 314 | 314 |
| 315 __ bind(&ok); | 315 __ bind(&ok); |
| 316 GenerateTailCallToSharedCode(masm); | 316 GenerateTailCallToSharedCode(masm); |
| 317 } | 317 } |
| 318 | 318 |
| 319 | 319 |
| 320 static void Generate_JSConstructStubHelper(MacroAssembler* masm, | 320 static void Generate_JSConstructStubHelper(MacroAssembler* masm, |
| (...skipping 22 matching lines...) Expand all Loading... |
| 343 // Enter a construct frame. | 343 // Enter a construct frame. |
| 344 { | 344 { |
| 345 FrameScope scope(masm, StackFrame::CONSTRUCT); | 345 FrameScope scope(masm, StackFrame::CONSTRUCT); |
| 346 | 346 |
| 347 if (create_memento) { | 347 if (create_memento) { |
| 348 __ AssertUndefinedOrAllocationSite(a2, a3); | 348 __ AssertUndefinedOrAllocationSite(a2, a3); |
| 349 __ push(a2); | 349 __ push(a2); |
| 350 } | 350 } |
| 351 | 351 |
| 352 // Preserve the two incoming parameters on the stack. | 352 // Preserve the two incoming parameters on the stack. |
| 353 __ sll(a0, a0, kSmiTagSize); // Tag arguments count. | 353 // Tag arguments count. |
| 354 __ dsll32(a0, a0, 0); |
| 354 __ MultiPushReversed(a0.bit() | a1.bit()); | 355 __ MultiPushReversed(a0.bit() | a1.bit()); |
| 355 | 356 |
| 356 Label rt_call, allocated; | 357 Label rt_call, allocated; |
| 357 // Try to allocate the object without transitioning into C code. If any of | 358 // Try to allocate the object without transitioning into C code. If any of |
| 358 // the preconditions is not met, the code bails out to the runtime call. | 359 // the preconditions is not met, the code bails out to the runtime call. |
| 359 if (FLAG_inline_new) { | 360 if (FLAG_inline_new) { |
| 360 Label undo_allocation; | 361 Label undo_allocation; |
| 361 ExternalReference debug_step_in_fp = | 362 ExternalReference debug_step_in_fp = |
| 362 ExternalReference::debug_step_in_fp_address(isolate); | 363 ExternalReference::debug_step_in_fp_address(isolate); |
| 363 __ li(a2, Operand(debug_step_in_fp)); | 364 __ li(a2, Operand(debug_step_in_fp)); |
| 364 __ lw(a2, MemOperand(a2)); | 365 __ ld(a2, MemOperand(a2)); |
| 365 __ Branch(&rt_call, ne, a2, Operand(zero_reg)); | 366 __ Branch(&rt_call, ne, a2, Operand(zero_reg)); |
| 366 | 367 |
| 367 // Load the initial map and verify that it is in fact a map. | 368 // Load the initial map and verify that it is in fact a map. |
| 368 // a1: constructor function | 369 // a1: constructor function |
| 369 __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); | 370 __ ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); |
| 370 __ JumpIfSmi(a2, &rt_call); | 371 __ JumpIfSmi(a2, &rt_call); |
| 371 __ GetObjectType(a2, a3, t4); | 372 __ GetObjectType(a2, a3, t0); |
| 372 __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE)); | 373 __ Branch(&rt_call, ne, t0, Operand(MAP_TYPE)); |
| 373 | 374 |
| 374 // Check that the constructor is not constructing a JSFunction (see | 375 // Check that the constructor is not constructing a JSFunction (see |
| 375 // comments in Runtime_NewObject in runtime.cc). In which case the | 376 // comments in Runtime_NewObject in runtime.cc). In which case the |
| 376 // initial map's instance type would be JS_FUNCTION_TYPE. | 377 // initial map's instance type would be JS_FUNCTION_TYPE. |
| 377 // a1: constructor function | 378 // a1: constructor function |
| 378 // a2: initial map | 379 // a2: initial map |
| 379 __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset)); | 380 __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset)); |
| 380 __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE)); | 381 __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE)); |
| 381 | 382 |
| 382 if (!is_api_function) { | 383 if (!is_api_function) { |
| 383 Label allocate; | 384 Label allocate; |
| 384 MemOperand bit_field3 = FieldMemOperand(a2, Map::kBitField3Offset); | 385 MemOperand bit_field3 = FieldMemOperand(a2, Map::kBitField3Offset); |
| 385 // Check if slack tracking is enabled. | 386 // Check if slack tracking is enabled. |
| 386 __ lw(t0, bit_field3); | 387 __ lwu(a4, bit_field3); |
| 387 __ DecodeField<Map::ConstructionCount>(t2, t0); | 388 __ DecodeField<Map::ConstructionCount>(a6, a4); |
| 388 __ Branch(&allocate, eq, t2, Operand(JSFunction::kNoSlackTracking)); | 389 __ Branch(&allocate, |
| 390 eq, |
| 391 a6, |
| 392 Operand(static_cast<int64_t>(JSFunction::kNoSlackTracking))); |
| 389 // Decrease generous allocation count. | 393 // Decrease generous allocation count. |
| 390 __ Subu(t0, t0, Operand(1 << Map::ConstructionCount::kShift)); | 394 __ Dsubu(a4, a4, Operand(1 << Map::ConstructionCount::kShift)); |
| 391 __ Branch(USE_DELAY_SLOT, | 395 __ Branch(USE_DELAY_SLOT, |
| 392 &allocate, ne, t2, Operand(JSFunction::kFinishSlackTracking)); | 396 &allocate, ne, a6, Operand(JSFunction::kFinishSlackTracking)); |
| 393 __ sw(t0, bit_field3); // In delay slot. | 397 __ sw(a4, bit_field3); // In delay slot. |
| 394 | 398 |
| 395 __ Push(a1, a2, a1); // a1 = Constructor. | 399 __ Push(a1, a2, a1); // a1 = Constructor. |
| 396 __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); | 400 __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); |
| 397 | 401 |
| 398 __ Pop(a1, a2); | 402 __ Pop(a1, a2); |
| 399 // Slack tracking counter is kNoSlackTracking after runtime call. | 403 // Slack tracking counter is kNoSlackTracking after runtime call. |
| 400 ASSERT(JSFunction::kNoSlackTracking == 0); | 404 ASSERT(JSFunction::kNoSlackTracking == 0); |
| 401 __ mov(t2, zero_reg); | 405 __ mov(a6, zero_reg); |
| 402 | 406 |
| 403 __ bind(&allocate); | 407 __ bind(&allocate); |
| 404 } | 408 } |
| 405 | 409 |
| 406 // Now allocate the JSObject on the heap. | 410 // Now allocate the JSObject on the heap. |
| 407 // a1: constructor function | 411 // a1: constructor function |
| 408 // a2: initial map | 412 // a2: initial map |
| 409 __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset)); | 413 __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset)); |
| 410 if (create_memento) { | 414 if (create_memento) { |
| 411 __ Addu(a3, a3, Operand(AllocationMemento::kSize / kPointerSize)); | 415 __ Daddu(a3, a3, Operand(AllocationMemento::kSize / kPointerSize)); |
| 412 } | 416 } |
| 413 | 417 |
| 414 __ Allocate(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS); | 418 __ Allocate(a3, t0, t1, t2, &rt_call, SIZE_IN_WORDS); |
| 415 | 419 |
| 416 // Allocated the JSObject, now initialize the fields. Map is set to | 420 // Allocated the JSObject, now initialize the fields. Map is set to |
| 417 // initial map and properties and elements are set to empty fixed array. | 421 // initial map and properties and elements are set to empty fixed array. |
| 418 // a1: constructor function | 422 // a1: constructor function |
| 419 // a2: initial map | 423 // a2: initial map |
| 420 // a3: object size (not including memento if create_memento) | 424 // a3: object size (not including memento if create_memento) |
| 421 // t4: JSObject (not tagged) | 425 // t0: JSObject (not tagged) |
| 422 __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex); | 426 __ LoadRoot(t2, Heap::kEmptyFixedArrayRootIndex); |
| 423 __ mov(t5, t4); | 427 __ mov(t1, t0); |
| 424 __ sw(a2, MemOperand(t5, JSObject::kMapOffset)); | 428 __ sd(a2, MemOperand(t1, JSObject::kMapOffset)); |
| 425 __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset)); | 429 __ sd(t2, MemOperand(t1, JSObject::kPropertiesOffset)); |
| 426 __ sw(t6, MemOperand(t5, JSObject::kElementsOffset)); | 430 __ sd(t2, MemOperand(t1, JSObject::kElementsOffset)); |
| 427 __ Addu(t5, t5, Operand(3*kPointerSize)); | 431 __ Daddu(t1, t1, Operand(3*kPointerSize)); |
| 428 ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); | 432 ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); |
| 429 ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset); | 433 ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset); |
| 430 ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset); | 434 ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset); |
| 431 | 435 |
| 432 // Fill all the in-object properties with appropriate filler. | 436 // Fill all the in-object properties with appropriate filler. |
| 433 // a1: constructor function | 437 // a1: constructor function |
| 434 // a2: initial map | 438 // a2: initial map |
| 435 // a3: object size (in words, including memento if create_memento) | 439 // a3: object size (in words, including memento if create_memento) |
| 436 // t4: JSObject (not tagged) | 440 // t0: JSObject (not tagged) |
| 437 // t5: First in-object property of JSObject (not tagged) | 441 // t1: First in-object property of JSObject (not tagged) |
| 438 // t2: slack tracking counter (non-API function case) | 442 // a6: slack tracking counter (non-API function case) |
| 439 ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize); | 443 ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize); |
| 440 | 444 |
| 441 // Use t7 to hold undefined, which is used in several places below. | 445 // Use t3 to hold undefined, which is used in several places below. |
| 442 __ LoadRoot(t7, Heap::kUndefinedValueRootIndex); | 446 __ LoadRoot(t3, Heap::kUndefinedValueRootIndex); |
| 443 | 447 |
| 444 if (!is_api_function) { | 448 if (!is_api_function) { |
| 445 Label no_inobject_slack_tracking; | 449 Label no_inobject_slack_tracking; |
| 446 | 450 |
| 447 // Check if slack tracking is enabled. | 451 // Check if slack tracking is enabled. |
| 448 __ Branch(&no_inobject_slack_tracking, | 452 __ Branch(&no_inobject_slack_tracking, |
| 449 eq, t2, Operand(JSFunction::kNoSlackTracking)); | 453 eq, |
| 454 a6, |
| 455 Operand(static_cast<int64_t>(JSFunction::kNoSlackTracking))); |
| 450 | 456 |
| 451 // Allocate object with a slack. | 457 // Allocate object with a slack. |
| 452 __ lbu(a0, FieldMemOperand(a2, Map::kPreAllocatedPropertyFieldsOffset)); | 458 __ lwu(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset)); |
| 453 __ sll(at, a0, kPointerSizeLog2); | 459 __ Ext(a0, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte, |
| 454 __ addu(a0, t5, at); | 460 kBitsPerByte); |
| 461 __ dsll(at, a0, kPointerSizeLog2); |
| 462 __ daddu(a0, t1, at); |
| 455 // a0: offset of first field after pre-allocated fields | 463 // a0: offset of first field after pre-allocated fields |
| 456 if (FLAG_debug_code) { | 464 if (FLAG_debug_code) { |
| 457 __ sll(at, a3, kPointerSizeLog2); | 465 __ dsll(at, a3, kPointerSizeLog2); |
| 458 __ Addu(t6, t4, Operand(at)); // End of object. | 466 __ Daddu(t2, t0, Operand(at)); // End of object. |
| 459 __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields, | 467 __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields, |
| 460 a0, Operand(t6)); | 468 a0, Operand(t2)); |
| 461 } | 469 } |
| 462 __ InitializeFieldsWithFiller(t5, a0, t7); | 470 __ InitializeFieldsWithFiller(t1, a0, t3); |
| 463 // To allow for truncation. | 471 // To allow for truncation. |
| 464 __ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex); | 472 __ LoadRoot(t3, Heap::kOnePointerFillerMapRootIndex); |
| 465 // Fill the remaining fields with one pointer filler map. | 473 // Fill the remaining fields with one pointer filler map. |
| 466 | 474 |
| 467 __ bind(&no_inobject_slack_tracking); | 475 __ bind(&no_inobject_slack_tracking); |
| 468 } | 476 } |
| 469 | 477 |
| 470 if (create_memento) { | 478 if (create_memento) { |
| 471 __ Subu(a0, a3, Operand(AllocationMemento::kSize / kPointerSize)); | 479 __ Dsubu(a0, a3, Operand(AllocationMemento::kSize / kPointerSize)); |
| 472 __ sll(a0, a0, kPointerSizeLog2); | 480 __ dsll(a0, a0, kPointerSizeLog2); |
| 473 __ Addu(a0, t4, Operand(a0)); // End of object. | 481 __ Daddu(a0, t0, Operand(a0)); // End of object. |
| 474 __ InitializeFieldsWithFiller(t5, a0, t7); | 482 __ InitializeFieldsWithFiller(t1, a0, t3); |
| 475 | 483 |
| 476 // Fill in memento fields. | 484 // Fill in memento fields. |
| 477 // t5: points to the allocated but uninitialized memento. | 485 // t1: points to the allocated but uninitialized memento. |
| 478 __ LoadRoot(t7, Heap::kAllocationMementoMapRootIndex); | 486 __ LoadRoot(t3, Heap::kAllocationMementoMapRootIndex); |
| 479 ASSERT_EQ(0 * kPointerSize, AllocationMemento::kMapOffset); | 487 ASSERT_EQ(0 * kPointerSize, AllocationMemento::kMapOffset); |
| 480 __ sw(t7, MemOperand(t5)); | 488 __ sd(t3, MemOperand(t1)); |
| 481 __ Addu(t5, t5, kPointerSize); | 489 __ Daddu(t1, t1, kPointerSize); |
| 482 // Load the AllocationSite. | 490 // Load the AllocationSite. |
| 483 __ lw(t7, MemOperand(sp, 2 * kPointerSize)); | 491 __ ld(t3, MemOperand(sp, 2 * kPointerSize)); |
| 484 ASSERT_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset); | 492 ASSERT_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset); |
| 485 __ sw(t7, MemOperand(t5)); | 493 __ sd(t3, MemOperand(t1)); |
| 486 __ Addu(t5, t5, kPointerSize); | 494 __ Daddu(t1, t1, kPointerSize); |
| 487 } else { | 495 } else { |
| 488 __ sll(at, a3, kPointerSizeLog2); | 496 __ dsll(at, a3, kPointerSizeLog2); |
| 489 __ Addu(a0, t4, Operand(at)); // End of object. | 497 __ Daddu(a0, t0, Operand(at)); // End of object. |
| 490 __ InitializeFieldsWithFiller(t5, a0, t7); | 498 __ InitializeFieldsWithFiller(t1, a0, t3); |
| 491 } | 499 } |
| 492 | 500 |
| 493 // Add the object tag to make the JSObject real, so that we can continue | 501 // Add the object tag to make the JSObject real, so that we can continue |
| 494 // and jump into the continuation code at any time from now on. Any | 502 // and jump into the continuation code at any time from now on. Any |
| 495 // failures need to undo the allocation, so that the heap is in a | 503 // failures need to undo the allocation, so that the heap is in a |
| 496 // consistent state and verifiable. | 504 // consistent state and verifiable. |
| 497 __ Addu(t4, t4, Operand(kHeapObjectTag)); | 505 __ Daddu(t0, t0, Operand(kHeapObjectTag)); |
| 498 | 506 |
| 499 // Check if a non-empty properties array is needed. Continue with | 507 // Check if a non-empty properties array is needed. Continue with |
| 500 // allocated object if not fall through to runtime call if it is. | 508 // allocated object if not fall through to runtime call if it is. |
| 501 // a1: constructor function | 509 // a1: constructor function |
| 502 // t4: JSObject | 510 // t0: JSObject |
| 503 // t5: start of next object (not tagged) | 511 // t1: start of next object (not tagged) |
| 504 __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset)); | 512 __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset)); |
| 505 // The field instance sizes contains both pre-allocated property fields | 513 // The field instance sizes contains both pre-allocated property fields |
| 506 // and in-object properties. | 514 // and in-object properties. |
| 507 __ lbu(t6, FieldMemOperand(a2, Map::kPreAllocatedPropertyFieldsOffset)); | 515 __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset)); |
| 508 __ Addu(a3, a3, Operand(t6)); | 516 __ Ext(t2, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte, |
| 509 __ lbu(t6, FieldMemOperand(a2, Map::kInObjectPropertiesOffset)); | 517 kBitsPerByte); |
| 510 __ subu(a3, a3, t6); | 518 __ Daddu(a3, a3, Operand(t2)); |
| 519 __ Ext(t2, a0, Map::kInObjectPropertiesByte * kBitsPerByte, |
| 520 kBitsPerByte); |
| 521 __ dsubu(a3, a3, t2); |
| 511 | 522 |
| 512 // Done if no extra properties are to be allocated. | 523 // Done if no extra properties are to be allocated. |
| 513 __ Branch(&allocated, eq, a3, Operand(zero_reg)); | 524 __ Branch(&allocated, eq, a3, Operand(zero_reg)); |
| 514 __ Assert(greater_equal, kPropertyAllocationCountFailed, | 525 __ Assert(greater_equal, kPropertyAllocationCountFailed, |
| 515 a3, Operand(zero_reg)); | 526 a3, Operand(zero_reg)); |
| 516 | 527 |
| 517 // Scale the number of elements by pointer size and add the header for | 528 // Scale the number of elements by pointer size and add the header for |
| 518 // FixedArrays to the start of the next object calculation from above. | 529 // FixedArrays to the start of the next object calculation from above. |
| 519 // a1: constructor | 530 // a1: constructor |
| 520 // a3: number of elements in properties array | 531 // a3: number of elements in properties array |
| 521 // t4: JSObject | 532 // t0: JSObject |
| 522 // t5: start of next object | 533 // t1: start of next object |
| 523 __ Addu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize)); | 534 __ Daddu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize)); |
| 524 __ Allocate( | 535 __ Allocate( |
| 525 a0, | 536 a0, |
| 526 t5, | 537 t1, |
| 527 t6, | 538 t2, |
| 528 a2, | 539 a2, |
| 529 &undo_allocation, | 540 &undo_allocation, |
| 530 static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS)); | 541 static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS)); |
| 531 | 542 |
| 532 // Initialize the FixedArray. | 543 // Initialize the FixedArray. |
| 533 // a1: constructor | 544 // a1: constructor |
| 534 // a3: number of elements in properties array (untagged) | 545 // a3: number of elements in properties array (untagged) |
| 535 // t4: JSObject | 546 // t0: JSObject |
| 536 // t5: start of next object | 547 // t1: start of next object |
| 537 __ LoadRoot(t6, Heap::kFixedArrayMapRootIndex); | 548 __ LoadRoot(t2, Heap::kFixedArrayMapRootIndex); |
| 538 __ mov(a2, t5); | 549 __ mov(a2, t1); |
| 539 __ sw(t6, MemOperand(a2, JSObject::kMapOffset)); | 550 __ sd(t2, MemOperand(a2, JSObject::kMapOffset)); |
| 540 __ sll(a0, a3, kSmiTagSize); | 551 // Tag number of elements. |
| 541 __ sw(a0, MemOperand(a2, FixedArray::kLengthOffset)); | 552 __ dsll32(a0, a3, 0); |
| 542 __ Addu(a2, a2, Operand(2 * kPointerSize)); | 553 __ sd(a0, MemOperand(a2, FixedArray::kLengthOffset)); |
| 554 __ Daddu(a2, a2, Operand(2 * kPointerSize)); |
| 543 | 555 |
| 544 ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); | 556 ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); |
| 545 ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset); | 557 ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset); |
| 546 | 558 |
| 547 // Initialize the fields to undefined. | 559 // Initialize the fields to undefined. |
| 548 // a1: constructor | 560 // a1: constructor |
| 549 // a2: First element of FixedArray (not tagged) | 561 // a2: First element of FixedArray (not tagged) |
| 550 // a3: number of elements in properties array | 562 // a3: number of elements in properties array |
| 551 // t4: JSObject | 563 // t0: JSObject |
| 552 // t5: FixedArray (not tagged) | 564 // t1: FixedArray (not tagged) |
| 553 __ sll(t3, a3, kPointerSizeLog2); | 565 __ dsll(a7, a3, kPointerSizeLog2); |
| 554 __ addu(t6, a2, t3); // End of object. | 566 __ daddu(t2, a2, a7); // End of object. |
| 555 ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize); | 567 ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize); |
| 556 { Label loop, entry; | 568 { Label loop, entry; |
| 557 if (!is_api_function || create_memento) { | 569 if (!is_api_function || create_memento) { |
| 558 __ LoadRoot(t7, Heap::kUndefinedValueRootIndex); | 570 __ LoadRoot(t3, Heap::kUndefinedValueRootIndex); |
| 559 } else if (FLAG_debug_code) { | 571 } else if (FLAG_debug_code) { |
| 560 __ LoadRoot(t2, Heap::kUndefinedValueRootIndex); | 572 __ LoadRoot(a6, Heap::kUndefinedValueRootIndex); |
| 561 __ Assert(eq, kUndefinedValueNotLoaded, t7, Operand(t2)); | 573 __ Assert(eq, kUndefinedValueNotLoaded, t3, Operand(a6)); |
| 562 } | 574 } |
| 563 __ jmp(&entry); | 575 __ jmp(&entry); |
| 564 __ bind(&loop); | 576 __ bind(&loop); |
| 565 __ sw(t7, MemOperand(a2)); | 577 __ sd(t3, MemOperand(a2)); |
| 566 __ addiu(a2, a2, kPointerSize); | 578 __ daddiu(a2, a2, kPointerSize); |
| 567 __ bind(&entry); | 579 __ bind(&entry); |
| 568 __ Branch(&loop, less, a2, Operand(t6)); | 580 __ Branch(&loop, less, a2, Operand(t2)); |
| 569 } | 581 } |
| 570 | 582 |
| 571 // Store the initialized FixedArray into the properties field of | 583 // Store the initialized FixedArray into the properties field of |
| 572 // the JSObject. | 584 // the JSObject. |
| 573 // a1: constructor function | 585 // a1: constructor function |
| 574 // t4: JSObject | 586 // t0: JSObject |
| 575 // t5: FixedArray (not tagged) | 587 // t1: FixedArray (not tagged) |
| 576 __ Addu(t5, t5, Operand(kHeapObjectTag)); // Add the heap tag. | 588 __ Daddu(t1, t1, Operand(kHeapObjectTag)); // Add the heap tag. |
| 577 __ sw(t5, FieldMemOperand(t4, JSObject::kPropertiesOffset)); | 589 __ sd(t1, FieldMemOperand(t0, JSObject::kPropertiesOffset)); |
| 578 | 590 |
| 579 // Continue with JSObject being successfully allocated. | 591 // Continue with JSObject being successfully allocated. |
| 580 // a1: constructor function | 592 // a1: constructor function |
| 581 // a4: JSObject | 593 // a4: JSObject |
| 582 __ jmp(&allocated); | 594 __ jmp(&allocated); |
| 583 | 595 |
| 584 // Undo the setting of the new top so that the heap is verifiable. For | 596 // Undo the setting of the new top so that the heap is verifiable. For |
| 585 // example, the map's unused properties potentially do not match the | 597 // example, the map's unused properties potentially do not match the |
| 586 // allocated objects unused properties. | 598 // allocated objects unused properties. |
| 587 // t4: JSObject (previous new top) | 599 // t0: JSObject (previous new top) |
| 588 __ bind(&undo_allocation); | 600 __ bind(&undo_allocation); |
| 589 __ UndoAllocationInNewSpace(t4, t5); | 601 __ UndoAllocationInNewSpace(t0, t1); |
| 590 } | 602 } |
| 591 | 603 |
| 592 // Allocate the new receiver object using the runtime call. | 604 // Allocate the new receiver object using the runtime call. |
| 593 // a1: constructor function | 605 // a1: constructor function |
| 594 __ bind(&rt_call); | 606 __ bind(&rt_call); |
| 595 if (create_memento) { | 607 if (create_memento) { |
| 596 // Get the cell or allocation site. | 608 // Get the cell or allocation site. |
| 597 __ lw(a2, MemOperand(sp, 2 * kPointerSize)); | 609 __ ld(a2, MemOperand(sp, 2 * kPointerSize)); |
| 598 __ push(a2); | 610 __ push(a2); |
| 599 } | 611 } |
| 600 | 612 |
| 601 __ push(a1); // Argument for Runtime_NewObject. | 613 __ push(a1); // Argument for Runtime_NewObject. |
| 602 if (create_memento) { | 614 if (create_memento) { |
| 603 __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2); | 615 __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2); |
| 604 } else { | 616 } else { |
| 605 __ CallRuntime(Runtime::kNewObject, 1); | 617 __ CallRuntime(Runtime::kNewObject, 1); |
| 606 } | 618 } |
| 607 __ mov(t4, v0); | 619 __ mov(t0, v0); |
| 608 | 620 |
| 609 // If we ended up using the runtime, and we want a memento, then the | 621 // If we ended up using the runtime, and we want a memento, then the |
| 610 // runtime call made it for us, and we shouldn't do create count | 622 // runtime call made it for us, and we shouldn't do create count |
| 611 // increment. | 623 // increment. |
| 612 Label count_incremented; | 624 Label count_incremented; |
| 613 if (create_memento) { | 625 if (create_memento) { |
| 614 __ jmp(&count_incremented); | 626 __ jmp(&count_incremented); |
| 615 } | 627 } |
| 616 | 628 |
| 617 // Receiver for constructor call allocated. | 629 // Receiver for constructor call allocated. |
| 618 // t4: JSObject | 630 // t0: JSObject |
| 619 __ bind(&allocated); | 631 __ bind(&allocated); |
| 620 | 632 |
| 621 if (create_memento) { | 633 if (create_memento) { |
| 622 __ lw(a2, MemOperand(sp, kPointerSize * 2)); | 634 __ ld(a2, MemOperand(sp, kPointerSize * 2)); |
| 623 __ LoadRoot(t5, Heap::kUndefinedValueRootIndex); | 635 __ LoadRoot(t1, Heap::kUndefinedValueRootIndex); |
| 624 __ Branch(&count_incremented, eq, a2, Operand(t5)); | 636 __ Branch(&count_incremented, eq, a2, Operand(t1)); |
| 625 // a2 is an AllocationSite. We are creating a memento from it, so we | 637 // a2 is an AllocationSite. We are creating a memento from it, so we |
| 626 // need to increment the memento create count. | 638 // need to increment the memento create count. |
| 627 __ lw(a3, FieldMemOperand(a2, | 639 __ ld(a3, FieldMemOperand(a2, |
| 628 AllocationSite::kPretenureCreateCountOffset)); | 640 AllocationSite::kPretenureCreateCountOffset)); |
| 629 __ Addu(a3, a3, Operand(Smi::FromInt(1))); | 641 __ Daddu(a3, a3, Operand(Smi::FromInt(1))); |
| 630 __ sw(a3, FieldMemOperand(a2, | 642 __ sd(a3, FieldMemOperand(a2, |
| 631 AllocationSite::kPretenureCreateCountOffset)); | 643 AllocationSite::kPretenureCreateCountOffset)); |
| 632 __ bind(&count_incremented); | 644 __ bind(&count_incremented); |
| 633 } | 645 } |
| 634 | 646 |
| 635 __ Push(t4, t4); | 647 __ Push(t0, t0); |
| 636 | 648 |
| 637 // Reload the number of arguments from the stack. | 649 // Reload the number of arguments from the stack. |
| 638 // sp[0]: receiver | 650 // sp[0]: receiver |
| 639 // sp[1]: receiver | 651 // sp[1]: receiver |
| 640 // sp[2]: constructor function | 652 // sp[2]: constructor function |
| 641 // sp[3]: number of arguments (smi-tagged) | 653 // sp[3]: number of arguments (smi-tagged) |
| 642 __ lw(a1, MemOperand(sp, 2 * kPointerSize)); | 654 __ ld(a1, MemOperand(sp, 2 * kPointerSize)); |
| 643 __ lw(a3, MemOperand(sp, 3 * kPointerSize)); | 655 __ ld(a3, MemOperand(sp, 3 * kPointerSize)); |
| 644 | 656 |
| 645 // Set up pointer to last argument. | 657 // Set up pointer to last argument. |
| 646 __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset)); | 658 __ Daddu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset)); |
| 647 | 659 |
| 648 // Set up number of arguments for function call below. | 660 // Set up number of arguments for function call below. |
| 649 __ srl(a0, a3, kSmiTagSize); | 661 __ SmiUntag(a0, a3); |
| 650 | 662 |
| 651 // Copy arguments and receiver to the expression stack. | 663 // Copy arguments and receiver to the expression stack. |
| 652 // a0: number of arguments | 664 // a0: number of arguments |
| 653 // a1: constructor function | 665 // a1: constructor function |
| 654 // a2: address of last argument (caller sp) | 666 // a2: address of last argument (caller sp) |
| 655 // a3: number of arguments (smi-tagged) | 667 // a3: number of arguments (smi-tagged) |
| 656 // sp[0]: receiver | 668 // sp[0]: receiver |
| 657 // sp[1]: receiver | 669 // sp[1]: receiver |
| 658 // sp[2]: constructor function | 670 // sp[2]: constructor function |
| 659 // sp[3]: number of arguments (smi-tagged) | 671 // sp[3]: number of arguments (smi-tagged) |
| 660 Label loop, entry; | 672 Label loop, entry; |
| 673 __ SmiUntag(a3); |
| 661 __ jmp(&entry); | 674 __ jmp(&entry); |
| 662 __ bind(&loop); | 675 __ bind(&loop); |
| 663 __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize); | 676 __ dsll(a4, a3, kPointerSizeLog2); |
| 664 __ Addu(t0, a2, Operand(t0)); | 677 __ Daddu(a4, a2, Operand(a4)); |
| 665 __ lw(t1, MemOperand(t0)); | 678 __ ld(a5, MemOperand(a4)); |
| 666 __ push(t1); | 679 __ push(a5); |
| 667 __ bind(&entry); | 680 __ bind(&entry); |
| 668 __ Addu(a3, a3, Operand(-2)); | 681 __ Daddu(a3, a3, Operand(-1)); |
| 669 __ Branch(&loop, greater_equal, a3, Operand(zero_reg)); | 682 __ Branch(&loop, greater_equal, a3, Operand(zero_reg)); |
| 670 | 683 |
| 671 // Call the function. | 684 // Call the function. |
| 672 // a0: number of arguments | 685 // a0: number of arguments |
| 673 // a1: constructor function | 686 // a1: constructor function |
| 674 if (is_api_function) { | 687 if (is_api_function) { |
| 675 __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); | 688 __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); |
| 676 Handle<Code> code = | 689 Handle<Code> code = |
| 677 masm->isolate()->builtins()->HandleApiCallConstruct(); | 690 masm->isolate()->builtins()->HandleApiCallConstruct(); |
| 678 __ Call(code, RelocInfo::CODE_TARGET); | 691 __ Call(code, RelocInfo::CODE_TARGET); |
| 679 } else { | 692 } else { |
| 680 ParameterCount actual(a0); | 693 ParameterCount actual(a0); |
| 681 __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper()); | 694 __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper()); |
| 682 } | 695 } |
| 683 | 696 |
| 684 // Store offset of return address for deoptimizer. | 697 // Store offset of return address for deoptimizer. |
| 685 if (!is_api_function) { | 698 if (!is_api_function) { |
| 686 masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset()); | 699 masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset()); |
| 687 } | 700 } |
| 688 | 701 |
| 689 // Restore context from the frame. | 702 // Restore context from the frame. |
| 690 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | 703 __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 691 | 704 |
| 692 // If the result is an object (in the ECMA sense), we should get rid | 705 // If the result is an object (in the ECMA sense), we should get rid |
| 693 // of the receiver and use the result; see ECMA-262 section 13.2.2-7 | 706 // of the receiver and use the result; see ECMA-262 section 13.2.2-7 |
| 694 // on page 74. | 707 // on page 74. |
| 695 Label use_receiver, exit; | 708 Label use_receiver, exit; |
| 696 | 709 |
| 697 // If the result is a smi, it is *not* an object in the ECMA sense. | 710 // If the result is a smi, it is *not* an object in the ECMA sense. |
| 698 // v0: result | 711 // v0: result |
| 699 // sp[0]: receiver (newly allocated object) | 712 // sp[0]: receiver (newly allocated object) |
| 700 // sp[1]: constructor function | 713 // sp[1]: constructor function |
| 701 // sp[2]: number of arguments (smi-tagged) | 714 // sp[2]: number of arguments (smi-tagged) |
| 702 __ JumpIfSmi(v0, &use_receiver); | 715 __ JumpIfSmi(v0, &use_receiver); |
| 703 | 716 |
| 704 // If the type of the result (stored in its map) is less than | 717 // If the type of the result (stored in its map) is less than |
| 705 // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense. | 718 // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense. |
| 706 __ GetObjectType(v0, a1, a3); | 719 __ GetObjectType(v0, a1, a3); |
| 707 __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE)); | 720 __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE)); |
| 708 | 721 |
| 709 // Throw away the result of the constructor invocation and use the | 722 // Throw away the result of the constructor invocation and use the |
| 710 // on-stack receiver as the result. | 723 // on-stack receiver as the result. |
| 711 __ bind(&use_receiver); | 724 __ bind(&use_receiver); |
| 712 __ lw(v0, MemOperand(sp)); | 725 __ ld(v0, MemOperand(sp)); |
| 713 | 726 |
| 714 // Remove receiver from the stack, remove caller arguments, and | 727 // Remove receiver from the stack, remove caller arguments, and |
| 715 // return. | 728 // return. |
| 716 __ bind(&exit); | 729 __ bind(&exit); |
| 717 // v0: result | 730 // v0: result |
| 718 // sp[0]: receiver (newly allocated object) | 731 // sp[0]: receiver (newly allocated object) |
| 719 // sp[1]: constructor function | 732 // sp[1]: constructor function |
| 720 // sp[2]: number of arguments (smi-tagged) | 733 // sp[2]: number of arguments (smi-tagged) |
| 721 __ lw(a1, MemOperand(sp, 2 * kPointerSize)); | 734 __ ld(a1, MemOperand(sp, 2 * kPointerSize)); |
| 722 | 735 |
| 723 // Leave construct frame. | 736 // Leave construct frame. |
| 724 } | 737 } |
| 725 | 738 |
| 726 __ sll(t0, a1, kPointerSizeLog2 - 1); | 739 __ SmiScale(a4, a1, kPointerSizeLog2); |
| 727 __ Addu(sp, sp, t0); | 740 __ Daddu(sp, sp, a4); |
| 728 __ Addu(sp, sp, kPointerSize); | 741 __ Daddu(sp, sp, kPointerSize); |
| 729 __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2); | 742 __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2); |
| 730 __ Ret(); | 743 __ Ret(); |
| 731 } | 744 } |
| 732 | 745 |
| 733 | 746 |
| 734 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { | 747 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { |
| 735 Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new); | 748 Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new); |
| 736 } | 749 } |
| 737 | 750 |
| 738 | 751 |
| 739 void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) { | 752 void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) { |
| 740 Generate_JSConstructStubHelper(masm, true, false); | 753 Generate_JSConstructStubHelper(masm, true, false); |
| 741 } | 754 } |
| 742 | 755 |
| 743 | 756 |
| 744 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, | 757 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, |
| 745 bool is_construct) { | 758 bool is_construct) { |
| 746 // Called from JSEntryStub::GenerateBody | 759 // Called from JSEntryStub::GenerateBody |
| 747 | 760 |
| 748 // ----------- S t a t e ------------- | 761 // ----------- S t a t e ------------- |
| 749 // -- a0: code entry | 762 // -- a0: code entry |
| 750 // -- a1: function | 763 // -- a1: function |
| 751 // -- a2: receiver_pointer | 764 // -- a2: receiver_pointer |
| 752 // -- a3: argc | 765 // -- a3: argc |
| 753 // -- s0: argv | 766 // -- s0: argv |
| 754 // ----------------------------------- | 767 // ----------------------------------- |
| 755 ProfileEntryHookStub::MaybeCallEntryHook(masm); | 768 ProfileEntryHookStub::MaybeCallEntryHook(masm); |
| 756 | |
| 757 // Clear the context before we push it when entering the JS frame. | 769 // Clear the context before we push it when entering the JS frame. |
| 758 __ mov(cp, zero_reg); | 770 __ mov(cp, zero_reg); |
| 759 | 771 |
| 760 // Enter an internal frame. | 772 // Enter an internal frame. |
| 761 { | 773 { |
| 762 FrameScope scope(masm, StackFrame::INTERNAL); | 774 FrameScope scope(masm, StackFrame::INTERNAL); |
| 763 | 775 |
| 764 // Set up the context from the function argument. | 776 // Set up the context from the function argument. |
| 765 __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); | 777 __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); |
| 766 | 778 |
| 767 // Push the function and the receiver onto the stack. | 779 // Push the function and the receiver onto the stack. |
| 768 __ Push(a1, a2); | 780 __ Push(a1, a2); |
| 769 | 781 |
| 770 // Copy arguments to the stack in a loop. | 782 // Copy arguments to the stack in a loop. |
| 771 // a3: argc | 783 // a3: argc |
| 772 // s0: argv, i.e. points to first arg | 784 // s0: argv, i.e. points to first arg |
| 773 Label loop, entry; | 785 Label loop, entry; |
| 774 __ sll(t0, a3, kPointerSizeLog2); | 786 // TODO(plind): At least on simulator, argc in a3 is an int32_t with junk |
| 775 __ addu(t2, s0, t0); | 787 // in upper bits. Should fix the root cause, rather than use below |
| 788 // workaround to clear upper bits. |
| 789 __ dsll32(a3, a3, 0); // int32_t -> int64_t. |
| 790 __ dsrl32(a3, a3, 0); |
| 791 __ dsll(a4, a3, kPointerSizeLog2); |
| 792 __ daddu(a6, s0, a4); |
| 776 __ b(&entry); | 793 __ b(&entry); |
| 777 __ nop(); // Branch delay slot nop. | 794 __ nop(); // Branch delay slot nop. |
| 778 // t2 points past last arg. | 795 // a6 points past last arg. |
| 779 __ bind(&loop); | 796 __ bind(&loop); |
| 780 __ lw(t0, MemOperand(s0)); // Read next parameter. | 797 __ ld(a4, MemOperand(s0)); // Read next parameter. |
| 781 __ addiu(s0, s0, kPointerSize); | 798 __ daddiu(s0, s0, kPointerSize); |
| 782 __ lw(t0, MemOperand(t0)); // Dereference handle. | 799 __ ld(a4, MemOperand(a4)); // Dereference handle. |
| 783 __ push(t0); // Push parameter. | 800 __ push(a4); // Push parameter. |
| 784 __ bind(&entry); | 801 __ bind(&entry); |
| 785 __ Branch(&loop, ne, s0, Operand(t2)); | 802 __ Branch(&loop, ne, s0, Operand(a6)); |
| 786 | 803 |
| 787 // Initialize all JavaScript callee-saved registers, since they will be seen | 804 // Initialize all JavaScript callee-saved registers, since they will be seen |
| 788 // by the garbage collector as part of handlers. | 805 // by the garbage collector as part of handlers. |
| 789 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex); | 806 __ LoadRoot(a4, Heap::kUndefinedValueRootIndex); |
| 790 __ mov(s1, t0); | 807 __ mov(s1, a4); |
| 791 __ mov(s2, t0); | 808 __ mov(s2, a4); |
| 792 __ mov(s3, t0); | 809 __ mov(s3, a4); |
| 793 __ mov(s4, t0); | 810 __ mov(s4, a4); |
| 794 __ mov(s5, t0); | 811 __ mov(s5, a4); |
| 795 // s6 holds the root address. Do not clobber. | 812 // s6 holds the root address. Do not clobber. |
| 796 // s7 is cp. Do not init. | 813 // s7 is cp. Do not init. |
| 797 | 814 |
| 798 // Invoke the code and pass argc as a0. | 815 // Invoke the code and pass argc as a0. |
| 799 __ mov(a0, a3); | 816 __ mov(a0, a3); |
| 800 if (is_construct) { | 817 if (is_construct) { |
| 801 // No type feedback cell is available | 818 // No type feedback cell is available |
| 802 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); | 819 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); |
| 803 CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS); | 820 CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS); |
| 804 __ CallStub(&stub); | 821 __ CallStub(&stub); |
| 805 } else { | 822 } else { |
| 806 ParameterCount actual(a0); | 823 ParameterCount actual(a0); |
| 807 __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper()); | 824 __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper()); |
| 808 } | 825 } |
| 809 | 826 |
| 810 // Leave internal frame. | 827 // Leave internal frame. |
| 811 } | 828 } |
| 812 | |
| 813 __ Jump(ra); | 829 __ Jump(ra); |
| 814 } | 830 } |
| 815 | 831 |
| 816 | 832 |
| 817 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) { | 833 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) { |
| 818 Generate_JSEntryTrampolineHelper(masm, false); | 834 Generate_JSEntryTrampolineHelper(masm, false); |
| 819 } | 835 } |
| 820 | 836 |
| 821 | 837 |
| 822 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { | 838 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { |
| (...skipping 26 matching lines...) Expand all Loading... |
| 849 GenerateTailCallToReturnedCode(masm); | 865 GenerateTailCallToReturnedCode(masm); |
| 850 } | 866 } |
| 851 | 867 |
| 852 | 868 |
| 853 void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) { | 869 void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) { |
| 854 CallCompileOptimized(masm, true); | 870 CallCompileOptimized(masm, true); |
| 855 GenerateTailCallToReturnedCode(masm); | 871 GenerateTailCallToReturnedCode(masm); |
| 856 } | 872 } |
| 857 | 873 |
| 858 | 874 |
| 859 | |
| 860 static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) { | 875 static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) { |
| 861 // For now, we are relying on the fact that make_code_young doesn't do any | 876 // For now, we are relying on the fact that make_code_young doesn't do any |
| 862 // garbage collection which allows us to save/restore the registers without | 877 // garbage collection which allows us to save/restore the registers without |
| 863 // worrying about which of them contain pointers. We also don't build an | 878 // worrying about which of them contain pointers. We also don't build an |
| 864 // internal frame to make the code faster, since we shouldn't have to do stack | 879 // internal frame to make the code faster, since we shouldn't have to do stack |
| 865 // crawls in MakeCodeYoung. This seems a bit fragile. | 880 // crawls in MakeCodeYoung. This seems a bit fragile. |
| 866 | 881 |
| 867 // Set a0 to point to the head of the PlatformCodeAge sequence. | 882 // Set a0 to point to the head of the PlatformCodeAge sequence. |
| 868 __ Subu(a0, a0, | 883 __ Dsubu(a0, a0, |
| 869 Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize)); | 884 Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize)); |
| 870 | 885 |
| 871 // The following registers must be saved and restored when calling through to | 886 // The following registers must be saved and restored when calling through to |
| 872 // the runtime: | 887 // the runtime: |
| 873 // a0 - contains return address (beginning of patch sequence) | 888 // a0 - contains return address (beginning of patch sequence) |
| 874 // a1 - isolate | 889 // a1 - isolate |
| 875 RegList saved_regs = | 890 RegList saved_regs = |
| 876 (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit(); | 891 (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit(); |
| 877 FrameScope scope(masm, StackFrame::MANUAL); | 892 FrameScope scope(masm, StackFrame::MANUAL); |
| 878 __ MultiPush(saved_regs); | 893 __ MultiPush(saved_regs); |
| (...skipping 18 matching lines...) Expand all Loading... |
| 897 #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR | 912 #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR |
| 898 | 913 |
| 899 | 914 |
| 900 void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) { | 915 void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) { |
| 901 // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact | 916 // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact |
| 902 // that make_code_young doesn't do any garbage collection which allows us to | 917 // that make_code_young doesn't do any garbage collection which allows us to |
| 903 // save/restore the registers without worrying about which of them contain | 918 // save/restore the registers without worrying about which of them contain |
| 904 // pointers. | 919 // pointers. |
| 905 | 920 |
| 906 // Set a0 to point to the head of the PlatformCodeAge sequence. | 921 // Set a0 to point to the head of the PlatformCodeAge sequence. |
| 907 __ Subu(a0, a0, | 922 __ Dsubu(a0, a0, |
| 908 Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize)); | 923 Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize)); |
| 909 | 924 |
| 910 // The following registers must be saved and restored when calling through to | 925 // The following registers must be saved and restored when calling through to |
| 911 // the runtime: | 926 // the runtime: |
| 912 // a0 - contains return address (beginning of patch sequence) | 927 // a0 - contains return address (beginning of patch sequence) |
| 913 // a1 - isolate | 928 // a1 - isolate |
| 914 RegList saved_regs = | 929 RegList saved_regs = |
| 915 (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit(); | 930 (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit(); |
| 916 FrameScope scope(masm, StackFrame::MANUAL); | 931 FrameScope scope(masm, StackFrame::MANUAL); |
| 917 __ MultiPush(saved_regs); | 932 __ MultiPush(saved_regs); |
| 918 __ PrepareCallCFunction(2, 0, a2); | 933 __ PrepareCallCFunction(2, 0, a2); |
| 919 __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate()))); | 934 __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate()))); |
| 920 __ CallCFunction( | 935 __ CallCFunction( |
| 921 ExternalReference::get_mark_code_as_executed_function(masm->isolate()), | 936 ExternalReference::get_mark_code_as_executed_function(masm->isolate()), |
| 922 2); | 937 2); |
| 923 __ MultiPop(saved_regs); | 938 __ MultiPop(saved_regs); |
| 924 | 939 |
| 925 // Perform prologue operations usually performed by the young code stub. | 940 // Perform prologue operations usually performed by the young code stub. |
| 926 __ Push(ra, fp, cp, a1); | 941 __ Push(ra, fp, cp, a1); |
| 927 __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); | 942 __ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); |
| 928 | 943 |
| 929 // Jump to point after the code-age stub. | 944 // Jump to point after the code-age stub. |
| 930 __ Addu(a0, a0, Operand(kNoCodeAgeSequenceLength)); | 945 __ Daddu(a0, a0, Operand((kNoCodeAgeSequenceLength))); |
| 931 __ Jump(a0); | 946 __ Jump(a0); |
| 932 } | 947 } |
| 933 | 948 |
| 934 | 949 |
| 935 void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) { | 950 void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) { |
| 936 GenerateMakeCodeYoungAgainCommon(masm); | 951 GenerateMakeCodeYoungAgainCommon(masm); |
| 937 } | 952 } |
| 938 | 953 |
| 939 | 954 |
| 940 static void Generate_NotifyStubFailureHelper(MacroAssembler* masm, | 955 static void Generate_NotifyStubFailureHelper(MacroAssembler* masm, |
| 941 SaveFPRegsMode save_doubles) { | 956 SaveFPRegsMode save_doubles) { |
| 942 { | 957 { |
| 943 FrameScope scope(masm, StackFrame::INTERNAL); | 958 FrameScope scope(masm, StackFrame::INTERNAL); |
| 944 | 959 |
| 945 // Preserve registers across notification, this is important for compiled | 960 // Preserve registers across notification, this is important for compiled |
| 946 // stubs that tail call the runtime on deopts passing their parameters in | 961 // stubs that tail call the runtime on deopts passing their parameters in |
| 947 // registers. | 962 // registers. |
| 948 __ MultiPush(kJSCallerSaved | kCalleeSaved); | 963 __ MultiPush(kJSCallerSaved | kCalleeSaved); |
| 949 // Pass the function and deoptimization type to the runtime system. | 964 // Pass the function and deoptimization type to the runtime system. |
| 950 __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles); | 965 __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles); |
| 951 __ MultiPop(kJSCallerSaved | kCalleeSaved); | 966 __ MultiPop(kJSCallerSaved | kCalleeSaved); |
| 952 } | 967 } |
| 953 | 968 |
| 954 __ Addu(sp, sp, Operand(kPointerSize)); // Ignore state | 969 __ Daddu(sp, sp, Operand(kPointerSize)); // Ignore state |
| 955 __ Jump(ra); // Jump to miss handler | 970 __ Jump(ra); // Jump to miss handler |
| 956 } | 971 } |
| 957 | 972 |
| 958 | 973 |
| 959 void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { | 974 void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { |
| 960 Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs); | 975 Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs); |
| 961 } | 976 } |
| 962 | 977 |
| 963 | 978 |
| 964 void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) { | 979 void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) { |
| 965 Generate_NotifyStubFailureHelper(masm, kSaveFPRegs); | 980 Generate_NotifyStubFailureHelper(masm, kSaveFPRegs); |
| 966 } | 981 } |
| 967 | 982 |
| 968 | 983 |
| 969 static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, | 984 static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, |
| 970 Deoptimizer::BailoutType type) { | 985 Deoptimizer::BailoutType type) { |
| 971 { | 986 { |
| 972 FrameScope scope(masm, StackFrame::INTERNAL); | 987 FrameScope scope(masm, StackFrame::INTERNAL); |
| 973 // Pass the function and deoptimization type to the runtime system. | 988 // Pass the function and deoptimization type to the runtime system. |
| 974 __ li(a0, Operand(Smi::FromInt(static_cast<int>(type)))); | 989 __ li(a0, Operand(Smi::FromInt(static_cast<int>(type)))); |
| 975 __ push(a0); | 990 __ push(a0); |
| 976 __ CallRuntime(Runtime::kNotifyDeoptimized, 1); | 991 __ CallRuntime(Runtime::kNotifyDeoptimized, 1); |
| 977 } | 992 } |
| 978 | 993 |
| 979 // Get the full codegen state from the stack and untag it -> t2. | 994 // Get the full codegen state from the stack and untag it -> a6. |
| 980 __ lw(t2, MemOperand(sp, 0 * kPointerSize)); | 995 __ ld(a6, MemOperand(sp, 0 * kPointerSize)); |
| 981 __ SmiUntag(t2); | 996 __ SmiUntag(a6); |
| 982 // Switch on the state. | 997 // Switch on the state. |
| 983 Label with_tos_register, unknown_state; | 998 Label with_tos_register, unknown_state; |
| 984 __ Branch(&with_tos_register, | 999 __ Branch(&with_tos_register, |
| 985 ne, t2, Operand(FullCodeGenerator::NO_REGISTERS)); | 1000 ne, a6, Operand(FullCodeGenerator::NO_REGISTERS)); |
| 986 __ Ret(USE_DELAY_SLOT); | 1001 __ Ret(USE_DELAY_SLOT); |
| 987 // Safe to fill delay slot Addu will emit one instruction. | 1002 // Safe to fill delay slot Addu will emit one instruction. |
| 988 __ Addu(sp, sp, Operand(1 * kPointerSize)); // Remove state. | 1003 __ Daddu(sp, sp, Operand(1 * kPointerSize)); // Remove state. |
| 989 | 1004 |
| 990 __ bind(&with_tos_register); | 1005 __ bind(&with_tos_register); |
| 991 __ lw(v0, MemOperand(sp, 1 * kPointerSize)); | 1006 __ ld(v0, MemOperand(sp, 1 * kPointerSize)); |
| 992 __ Branch(&unknown_state, ne, t2, Operand(FullCodeGenerator::TOS_REG)); | 1007 __ Branch(&unknown_state, ne, a6, Operand(FullCodeGenerator::TOS_REG)); |
| 993 | 1008 |
| 994 __ Ret(USE_DELAY_SLOT); | 1009 __ Ret(USE_DELAY_SLOT); |
| 995 // Safe to fill delay slot Addu will emit one instruction. | 1010 // Safe to fill delay slot Addu will emit one instruction. |
| 996 __ Addu(sp, sp, Operand(2 * kPointerSize)); // Remove state. | 1011 __ Daddu(sp, sp, Operand(2 * kPointerSize)); // Remove state. |
| 997 | 1012 |
| 998 __ bind(&unknown_state); | 1013 __ bind(&unknown_state); |
| 999 __ stop("no cases left"); | 1014 __ stop("no cases left"); |
| 1000 } | 1015 } |
| 1001 | 1016 |
| 1002 | 1017 |
| 1003 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { | 1018 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { |
| 1004 Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER); | 1019 Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER); |
| 1005 } | 1020 } |
| 1006 | 1021 |
| 1007 | 1022 |
| 1008 void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) { | 1023 void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) { |
| 1009 Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT); | 1024 Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT); |
| 1010 } | 1025 } |
| 1011 | 1026 |
| 1012 | 1027 |
| 1013 void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) { | 1028 void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) { |
| 1014 Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY); | 1029 Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY); |
| 1015 } | 1030 } |
| 1016 | 1031 |
| 1017 | 1032 |
| 1018 void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { | 1033 void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { |
| 1019 // Lookup the function in the JavaScript frame. | 1034 // Lookup the function in the JavaScript frame. |
| 1020 __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); | 1035 __ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
| 1021 { | 1036 { |
| 1022 FrameScope scope(masm, StackFrame::INTERNAL); | 1037 FrameScope scope(masm, StackFrame::INTERNAL); |
| 1023 // Pass function as argument. | 1038 // Pass function as argument. |
| 1024 __ push(a0); | 1039 __ push(a0); |
| 1025 __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1); | 1040 __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1); |
| 1026 } | 1041 } |
| 1027 | 1042 |
| 1028 // If the code object is null, just return to the unoptimized code. | 1043 // If the code object is null, just return to the unoptimized code. |
| 1029 __ Ret(eq, v0, Operand(Smi::FromInt(0))); | 1044 __ Ret(eq, v0, Operand(Smi::FromInt(0))); |
| 1030 | 1045 |
| 1031 // Load deoptimization data from the code object. | 1046 // Load deoptimization data from the code object. |
| 1032 // <deopt_data> = <code>[#deoptimization_data_offset] | 1047 // <deopt_data> = <code>[#deoptimization_data_offset] |
| 1033 __ lw(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag)); | 1048 __ Uld(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag)); |
| 1034 | 1049 |
| 1035 // Load the OSR entrypoint offset from the deoptimization data. | 1050 // Load the OSR entrypoint offset from the deoptimization data. |
| 1036 // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset] | 1051 // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset] |
| 1037 __ lw(a1, MemOperand(a1, FixedArray::OffsetOfElementAt( | 1052 __ ld(a1, MemOperand(a1, FixedArray::OffsetOfElementAt( |
| 1038 DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag)); | 1053 DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag)); |
| 1039 __ SmiUntag(a1); | 1054 __ SmiUntag(a1); |
| 1040 | 1055 |
| 1041 // Compute the target address = code_obj + header_size + osr_offset | 1056 // Compute the target address = code_obj + header_size + osr_offset |
| 1042 // <entry_addr> = <code_obj> + #header_size + <osr_offset> | 1057 // <entry_addr> = <code_obj> + #header_size + <osr_offset> |
| 1043 __ addu(v0, v0, a1); | 1058 __ daddu(v0, v0, a1); |
| 1044 __ addiu(ra, v0, Code::kHeaderSize - kHeapObjectTag); | 1059 __ daddiu(ra, v0, Code::kHeaderSize - kHeapObjectTag); |
| 1045 | 1060 |
| 1046 // And "return" to the OSR entry point of the function. | 1061 // And "return" to the OSR entry point of the function. |
| 1047 __ Ret(); | 1062 __ Ret(); |
| 1048 } | 1063 } |
| 1049 | 1064 |
| 1050 | 1065 |
| 1051 void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) { | 1066 void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) { |
| 1052 // We check the stack limit as indicator that recompilation might be done. | 1067 // We check the stack limit as indicator that recompilation might be done. |
| 1053 Label ok; | 1068 Label ok; |
| 1054 __ LoadRoot(at, Heap::kStackLimitRootIndex); | 1069 __ LoadRoot(at, Heap::kStackLimitRootIndex); |
| 1055 __ Branch(&ok, hs, sp, Operand(at)); | 1070 __ Branch(&ok, hs, sp, Operand(at)); |
| 1056 { | 1071 { |
| 1057 FrameScope scope(masm, StackFrame::INTERNAL); | 1072 FrameScope scope(masm, StackFrame::INTERNAL); |
| 1058 __ CallRuntime(Runtime::kStackGuard, 0); | 1073 __ CallRuntime(Runtime::kStackGuard, 0); |
| 1059 } | 1074 } |
| 1060 __ Jump(masm->isolate()->builtins()->OnStackReplacement(), | 1075 __ Jump(masm->isolate()->builtins()->OnStackReplacement(), |
| 1061 RelocInfo::CODE_TARGET); | 1076 RelocInfo::CODE_TARGET); |
| 1062 | 1077 |
| 1063 __ bind(&ok); | 1078 __ bind(&ok); |
| 1064 __ Ret(); | 1079 __ Ret(); |
| 1065 } | 1080 } |
| 1066 | 1081 |
| 1067 | 1082 |
| 1068 void Builtins::Generate_FunctionCall(MacroAssembler* masm) { | 1083 void Builtins::Generate_FunctionCall(MacroAssembler* masm) { |
| 1069 // 1. Make sure we have at least one argument. | 1084 // 1. Make sure we have at least one argument. |
| 1070 // a0: actual number of arguments | 1085 // a0: actual number of arguments |
| 1071 { Label done; | 1086 { Label done; |
| 1072 __ Branch(&done, ne, a0, Operand(zero_reg)); | 1087 __ Branch(&done, ne, a0, Operand(zero_reg)); |
| 1073 __ LoadRoot(t2, Heap::kUndefinedValueRootIndex); | 1088 __ LoadRoot(a6, Heap::kUndefinedValueRootIndex); |
| 1074 __ push(t2); | 1089 __ push(a6); |
| 1075 __ Addu(a0, a0, Operand(1)); | 1090 __ Daddu(a0, a0, Operand(1)); |
| 1076 __ bind(&done); | 1091 __ bind(&done); |
| 1077 } | 1092 } |
| 1078 | 1093 |
| 1079 // 2. Get the function to call (passed as receiver) from the stack, check | 1094 // 2. Get the function to call (passed as receiver) from the stack, check |
| 1080 // if it is a function. | 1095 // if it is a function. |
| 1081 // a0: actual number of arguments | 1096 // a0: actual number of arguments |
| 1082 Label slow, non_function; | 1097 Label slow, non_function; |
| 1083 __ sll(at, a0, kPointerSizeLog2); | 1098 __ dsll(at, a0, kPointerSizeLog2); |
| 1084 __ addu(at, sp, at); | 1099 __ daddu(at, sp, at); |
| 1085 __ lw(a1, MemOperand(at)); | 1100 __ ld(a1, MemOperand(at)); |
| 1086 __ JumpIfSmi(a1, &non_function); | 1101 __ JumpIfSmi(a1, &non_function); |
| 1087 __ GetObjectType(a1, a2, a2); | 1102 __ GetObjectType(a1, a2, a2); |
| 1088 __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE)); | 1103 __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE)); |
| 1089 | 1104 |
| 1090 // 3a. Patch the first argument if necessary when calling a function. | 1105 // 3a. Patch the first argument if necessary when calling a function. |
| 1091 // a0: actual number of arguments | 1106 // a0: actual number of arguments |
| 1092 // a1: function | 1107 // a1: function |
| 1093 Label shift_arguments; | 1108 Label shift_arguments; |
| 1094 __ li(t0, Operand(0, RelocInfo::NONE32)); // Indicate regular JS_FUNCTION. | 1109 __ li(a4, Operand(0, RelocInfo::NONE32)); // Indicate regular JS_FUNCTION. |
| 1095 { Label convert_to_object, use_global_proxy, patch_receiver; | 1110 { Label convert_to_object, use_global_proxy, patch_receiver; |
| 1096 // Change context eagerly in case we need the global receiver. | 1111 // Change context eagerly in case we need the global receiver. |
| 1097 __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); | 1112 __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); |
| 1098 | 1113 |
| 1099 // Do not transform the receiver for strict mode functions. | 1114 // Do not transform the receiver for strict mode functions. |
| 1100 __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); | 1115 __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); |
| 1101 __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset)); | 1116 __ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kStrictModeByteOffset)); |
| 1102 __ And(t3, a3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + | 1117 __ And(a7, a3, Operand(1 << SharedFunctionInfo::kStrictModeBitWithinByte)); |
| 1103 kSmiTagSize))); | 1118 __ Branch(&shift_arguments, ne, a7, Operand(zero_reg)); |
| 1104 __ Branch(&shift_arguments, ne, t3, Operand(zero_reg)); | |
| 1105 | 1119 |
| 1106 // Do not transform the receiver for native (Compilerhints already in a3). | 1120 // Do not transform the receiver for native (Compilerhints already in a3). |
| 1107 __ And(t3, a3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize))); | 1121 __ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kNativeByteOffset)); |
| 1108 __ Branch(&shift_arguments, ne, t3, Operand(zero_reg)); | 1122 __ And(a7, a3, Operand(1 << SharedFunctionInfo::kNativeBitWithinByte)); |
| 1123 __ Branch(&shift_arguments, ne, a7, Operand(zero_reg)); |
| 1109 | 1124 |
| 1110 // Compute the receiver in sloppy mode. | 1125 // Compute the receiver in sloppy mode. |
| 1111 // Load first argument in a2. a2 = -kPointerSize(sp + n_args << 2). | 1126 // Load first argument in a2. a2 = -kPointerSize(sp + n_args << 2). |
| 1112 __ sll(at, a0, kPointerSizeLog2); | 1127 __ dsll(at, a0, kPointerSizeLog2); |
| 1113 __ addu(a2, sp, at); | 1128 __ daddu(a2, sp, at); |
| 1114 __ lw(a2, MemOperand(a2, -kPointerSize)); | 1129 __ ld(a2, MemOperand(a2, -kPointerSize)); |
| 1115 // a0: actual number of arguments | 1130 // a0: actual number of arguments |
| 1116 // a1: function | 1131 // a1: function |
| 1117 // a2: first argument | 1132 // a2: first argument |
| 1118 __ JumpIfSmi(a2, &convert_to_object, t2); | 1133 __ JumpIfSmi(a2, &convert_to_object, a6); |
| 1119 | 1134 |
| 1120 __ LoadRoot(a3, Heap::kUndefinedValueRootIndex); | 1135 __ LoadRoot(a3, Heap::kUndefinedValueRootIndex); |
| 1121 __ Branch(&use_global_proxy, eq, a2, Operand(a3)); | 1136 __ Branch(&use_global_proxy, eq, a2, Operand(a3)); |
| 1122 __ LoadRoot(a3, Heap::kNullValueRootIndex); | 1137 __ LoadRoot(a3, Heap::kNullValueRootIndex); |
| 1123 __ Branch(&use_global_proxy, eq, a2, Operand(a3)); | 1138 __ Branch(&use_global_proxy, eq, a2, Operand(a3)); |
| 1124 | 1139 |
| 1125 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); | 1140 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); |
| 1126 __ GetObjectType(a2, a3, a3); | 1141 __ GetObjectType(a2, a3, a3); |
| 1127 __ Branch(&shift_arguments, ge, a3, Operand(FIRST_SPEC_OBJECT_TYPE)); | 1142 __ Branch(&shift_arguments, ge, a3, Operand(FIRST_SPEC_OBJECT_TYPE)); |
| 1128 | 1143 |
| 1129 __ bind(&convert_to_object); | 1144 __ bind(&convert_to_object); |
| 1130 // Enter an internal frame in order to preserve argument count. | 1145 // Enter an internal frame in order to preserve argument count. |
| 1131 { | 1146 { |
| 1132 FrameScope scope(masm, StackFrame::INTERNAL); | 1147 FrameScope scope(masm, StackFrame::INTERNAL); |
| 1133 __ sll(a0, a0, kSmiTagSize); // Smi tagged. | 1148 __ SmiTag(a0); |
| 1134 __ Push(a0, a2); | 1149 __ Push(a0, a2); |
| 1135 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); | 1150 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); |
| 1136 __ mov(a2, v0); | 1151 __ mov(a2, v0); |
| 1137 | 1152 |
| 1138 __ pop(a0); | 1153 __ pop(a0); |
| 1139 __ sra(a0, a0, kSmiTagSize); // Un-tag. | 1154 __ SmiUntag(a0); |
| 1140 // Leave internal frame. | 1155 // Leave internal frame. |
| 1141 } | 1156 } |
| 1142 | 1157 // Restore the function to a1, and the flag to a4. |
| 1143 // Restore the function to a1, and the flag to t0. | 1158 __ dsll(at, a0, kPointerSizeLog2); |
| 1144 __ sll(at, a0, kPointerSizeLog2); | 1159 __ daddu(at, sp, at); |
| 1145 __ addu(at, sp, at); | 1160 __ ld(a1, MemOperand(at)); |
| 1146 __ lw(a1, MemOperand(at)); | |
| 1147 __ Branch(USE_DELAY_SLOT, &patch_receiver); | 1161 __ Branch(USE_DELAY_SLOT, &patch_receiver); |
| 1148 __ li(t0, Operand(0, RelocInfo::NONE32)); // In delay slot. | 1162 __ li(a4, Operand(0, RelocInfo::NONE32)); |
| 1149 | 1163 |
| 1150 __ bind(&use_global_proxy); | 1164 __ bind(&use_global_proxy); |
| 1151 __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); | 1165 __ ld(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); |
| 1152 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset)); | 1166 __ ld(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset)); |
| 1153 | 1167 |
| 1154 __ bind(&patch_receiver); | 1168 __ bind(&patch_receiver); |
| 1155 __ sll(at, a0, kPointerSizeLog2); | 1169 __ dsll(at, a0, kPointerSizeLog2); |
| 1156 __ addu(a3, sp, at); | 1170 __ daddu(a3, sp, at); |
| 1157 __ sw(a2, MemOperand(a3, -kPointerSize)); | 1171 __ sd(a2, MemOperand(a3, -kPointerSize)); |
| 1158 | 1172 |
| 1159 __ Branch(&shift_arguments); | 1173 __ Branch(&shift_arguments); |
| 1160 } | 1174 } |
| 1161 | 1175 |
| 1162 // 3b. Check for function proxy. | 1176 // 3b. Check for function proxy. |
| 1163 __ bind(&slow); | 1177 __ bind(&slow); |
| 1164 __ li(t0, Operand(1, RelocInfo::NONE32)); // Indicate function proxy. | 1178 __ li(a4, Operand(1, RelocInfo::NONE32)); // Indicate function proxy. |
| 1165 __ Branch(&shift_arguments, eq, a2, Operand(JS_FUNCTION_PROXY_TYPE)); | 1179 __ Branch(&shift_arguments, eq, a2, Operand(JS_FUNCTION_PROXY_TYPE)); |
| 1166 | 1180 |
| 1167 __ bind(&non_function); | 1181 __ bind(&non_function); |
| 1168 __ li(t0, Operand(2, RelocInfo::NONE32)); // Indicate non-function. | 1182 __ li(a4, Operand(2, RelocInfo::NONE32)); // Indicate non-function. |
| 1169 | 1183 |
| 1170 // 3c. Patch the first argument when calling a non-function. The | 1184 // 3c. Patch the first argument when calling a non-function. The |
| 1171 // CALL_NON_FUNCTION builtin expects the non-function callee as | 1185 // CALL_NON_FUNCTION builtin expects the non-function callee as |
| 1172 // receiver, so overwrite the first argument which will ultimately | 1186 // receiver, so overwrite the first argument which will ultimately |
| 1173 // become the receiver. | 1187 // become the receiver. |
| 1174 // a0: actual number of arguments | 1188 // a0: actual number of arguments |
| 1175 // a1: function | 1189 // a1: function |
| 1176 // t0: call type (0: JS function, 1: function proxy, 2: non-function) | 1190 // a4: call type (0: JS function, 1: function proxy, 2: non-function) |
| 1177 __ sll(at, a0, kPointerSizeLog2); | 1191 __ dsll(at, a0, kPointerSizeLog2); |
| 1178 __ addu(a2, sp, at); | 1192 __ daddu(a2, sp, at); |
| 1179 __ sw(a1, MemOperand(a2, -kPointerSize)); | 1193 __ sd(a1, MemOperand(a2, -kPointerSize)); |
| 1180 | 1194 |
| 1181 // 4. Shift arguments and return address one slot down on the stack | 1195 // 4. Shift arguments and return address one slot down on the stack |
| 1182 // (overwriting the original receiver). Adjust argument count to make | 1196 // (overwriting the original receiver). Adjust argument count to make |
| 1183 // the original first argument the new receiver. | 1197 // the original first argument the new receiver. |
| 1184 // a0: actual number of arguments | 1198 // a0: actual number of arguments |
| 1185 // a1: function | 1199 // a1: function |
| 1186 // t0: call type (0: JS function, 1: function proxy, 2: non-function) | 1200 // a4: call type (0: JS function, 1: function proxy, 2: non-function) |
| 1187 __ bind(&shift_arguments); | 1201 __ bind(&shift_arguments); |
| 1188 { Label loop; | 1202 { Label loop; |
| 1189 // Calculate the copy start address (destination). Copy end address is sp. | 1203 // Calculate the copy start address (destination). Copy end address is sp. |
| 1190 __ sll(at, a0, kPointerSizeLog2); | 1204 __ dsll(at, a0, kPointerSizeLog2); |
| 1191 __ addu(a2, sp, at); | 1205 __ daddu(a2, sp, at); |
| 1192 | 1206 |
| 1193 __ bind(&loop); | 1207 __ bind(&loop); |
| 1194 __ lw(at, MemOperand(a2, -kPointerSize)); | 1208 __ ld(at, MemOperand(a2, -kPointerSize)); |
| 1195 __ sw(at, MemOperand(a2)); | 1209 __ sd(at, MemOperand(a2)); |
| 1196 __ Subu(a2, a2, Operand(kPointerSize)); | 1210 __ Dsubu(a2, a2, Operand(kPointerSize)); |
| 1197 __ Branch(&loop, ne, a2, Operand(sp)); | 1211 __ Branch(&loop, ne, a2, Operand(sp)); |
| 1198 // Adjust the actual number of arguments and remove the top element | 1212 // Adjust the actual number of arguments and remove the top element |
| 1199 // (which is a copy of the last argument). | 1213 // (which is a copy of the last argument). |
| 1200 __ Subu(a0, a0, Operand(1)); | 1214 __ Dsubu(a0, a0, Operand(1)); |
| 1201 __ Pop(); | 1215 __ Pop(); |
| 1202 } | 1216 } |
| 1203 | 1217 |
| 1204 // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin, | 1218 // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin, |
| 1205 // or a function proxy via CALL_FUNCTION_PROXY. | 1219 // or a function proxy via CALL_FUNCTION_PROXY. |
| 1206 // a0: actual number of arguments | 1220 // a0: actual number of arguments |
| 1207 // a1: function | 1221 // a1: function |
| 1208 // t0: call type (0: JS function, 1: function proxy, 2: non-function) | 1222 // a4: call type (0: JS function, 1: function proxy, 2: non-function) |
| 1209 { Label function, non_proxy; | 1223 { Label function, non_proxy; |
| 1210 __ Branch(&function, eq, t0, Operand(zero_reg)); | 1224 __ Branch(&function, eq, a4, Operand(zero_reg)); |
| 1211 // Expected number of arguments is 0 for CALL_NON_FUNCTION. | 1225 // Expected number of arguments is 0 for CALL_NON_FUNCTION. |
| 1212 __ mov(a2, zero_reg); | 1226 __ mov(a2, zero_reg); |
| 1213 __ Branch(&non_proxy, ne, t0, Operand(1)); | 1227 __ Branch(&non_proxy, ne, a4, Operand(1)); |
| 1214 | 1228 |
| 1215 __ push(a1); // Re-add proxy object as additional argument. | 1229 __ push(a1); // Re-add proxy object as additional argument. |
| 1216 __ Addu(a0, a0, Operand(1)); | 1230 __ Daddu(a0, a0, Operand(1)); |
| 1217 __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY); | 1231 __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY); |
| 1218 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), | 1232 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), |
| 1219 RelocInfo::CODE_TARGET); | 1233 RelocInfo::CODE_TARGET); |
| 1220 | 1234 |
| 1221 __ bind(&non_proxy); | 1235 __ bind(&non_proxy); |
| 1222 __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION); | 1236 __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION); |
| 1223 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), | 1237 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), |
| 1224 RelocInfo::CODE_TARGET); | 1238 RelocInfo::CODE_TARGET); |
| 1225 __ bind(&function); | 1239 __ bind(&function); |
| 1226 } | 1240 } |
| 1227 | 1241 |
| 1228 // 5b. Get the code to call from the function and check that the number of | 1242 // 5b. Get the code to call from the function and check that the number of |
| 1229 // expected arguments matches what we're providing. If so, jump | 1243 // expected arguments matches what we're providing. If so, jump |
| 1230 // (tail-call) to the code in register edx without checking arguments. | 1244 // (tail-call) to the code in register edx without checking arguments. |
| 1231 // a0: actual number of arguments | 1245 // a0: actual number of arguments |
| 1232 // a1: function | 1246 // a1: function |
| 1233 __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); | 1247 __ ld(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); |
| 1248 // The argument count is stored as int32_t on 64-bit platforms. |
| 1249 // TODO(plind): Smi on 32-bit platforms. |
| 1234 __ lw(a2, | 1250 __ lw(a2, |
| 1235 FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset)); | 1251 FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset)); |
| 1236 __ sra(a2, a2, kSmiTagSize); | |
| 1237 // Check formal and actual parameter counts. | 1252 // Check formal and actual parameter counts. |
| 1238 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), | 1253 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), |
| 1239 RelocInfo::CODE_TARGET, ne, a2, Operand(a0)); | 1254 RelocInfo::CODE_TARGET, ne, a2, Operand(a0)); |
| 1240 | 1255 |
| 1241 __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); | 1256 __ ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); |
| 1242 ParameterCount expected(0); | 1257 ParameterCount expected(0); |
| 1243 __ InvokeCode(a3, expected, expected, JUMP_FUNCTION, NullCallWrapper()); | 1258 __ InvokeCode(a3, expected, expected, JUMP_FUNCTION, NullCallWrapper()); |
| 1244 } | 1259 } |
| 1245 | 1260 |
| 1246 | 1261 |
| 1247 void Builtins::Generate_FunctionApply(MacroAssembler* masm) { | 1262 void Builtins::Generate_FunctionApply(MacroAssembler* masm) { |
| 1248 const int kIndexOffset = | 1263 const int kIndexOffset = |
| 1249 StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize); | 1264 StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize); |
| 1250 const int kLimitOffset = | 1265 const int kLimitOffset = |
| 1251 StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize); | 1266 StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize); |
| 1252 const int kArgsOffset = 2 * kPointerSize; | 1267 const int kArgsOffset = 2 * kPointerSize; |
| 1253 const int kRecvOffset = 3 * kPointerSize; | 1268 const int kRecvOffset = 3 * kPointerSize; |
| 1254 const int kFunctionOffset = 4 * kPointerSize; | 1269 const int kFunctionOffset = 4 * kPointerSize; |
| 1255 | 1270 |
| 1256 { | 1271 { |
| 1257 FrameScope frame_scope(masm, StackFrame::INTERNAL); | 1272 FrameScope frame_scope(masm, StackFrame::INTERNAL); |
| 1258 __ lw(a0, MemOperand(fp, kFunctionOffset)); // Get the function. | 1273 __ ld(a0, MemOperand(fp, kFunctionOffset)); // Get the function. |
| 1259 __ push(a0); | 1274 __ push(a0); |
| 1260 __ lw(a0, MemOperand(fp, kArgsOffset)); // Get the args array. | 1275 __ ld(a0, MemOperand(fp, kArgsOffset)); // Get the args array. |
| 1261 __ push(a0); | 1276 __ push(a0); |
| 1262 // Returns (in v0) number of arguments to copy to stack as Smi. | 1277 // Returns (in v0) number of arguments to copy to stack as Smi. |
| 1263 __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); | 1278 __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); |
| 1264 | 1279 |
| 1265 // Check the stack for overflow. We are not trying to catch | 1280 // Check the stack for overflow. We are not trying to catch |
| 1266 // interruptions (e.g. debug break and preemption) here, so the "real stack | 1281 // interruptions (e.g. debug break and preemption) here, so the "real stack |
| 1267 // limit" is checked. | 1282 // limit" is checked. |
| 1268 Label okay; | 1283 Label okay; |
| 1269 __ LoadRoot(a2, Heap::kRealStackLimitRootIndex); | 1284 __ LoadRoot(a2, Heap::kRealStackLimitRootIndex); |
| 1270 // Make a2 the space we have left. The stack might already be overflowed | 1285 // Make a2 the space we have left. The stack might already be overflowed |
| 1271 // here which will cause a2 to become negative. | 1286 // here which will cause a2 to become negative. |
| 1272 __ subu(a2, sp, a2); | 1287 __ dsubu(a2, sp, a2); |
| 1273 // Check if the arguments will overflow the stack. | 1288 // Check if the arguments will overflow the stack. |
| 1274 __ sll(t3, v0, kPointerSizeLog2 - kSmiTagSize); | 1289 __ SmiScale(a7, v0, kPointerSizeLog2); |
| 1275 __ Branch(&okay, gt, a2, Operand(t3)); // Signed comparison. | 1290 __ Branch(&okay, gt, a2, Operand(a7)); // Signed comparison. |
| 1276 | 1291 |
| 1277 // Out of stack space. | 1292 // Out of stack space. |
| 1278 __ lw(a1, MemOperand(fp, kFunctionOffset)); | 1293 __ ld(a1, MemOperand(fp, kFunctionOffset)); |
| 1279 __ Push(a1, v0); | 1294 __ Push(a1, v0); |
| 1280 __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); | 1295 __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); |
| 1281 // End of stack check. | 1296 // End of stack check. |
| 1282 | 1297 |
| 1283 // Push current limit and index. | 1298 // Push current limit and index. |
| 1284 __ bind(&okay); | 1299 __ bind(&okay); |
| 1285 __ mov(a1, zero_reg); | 1300 __ mov(a1, zero_reg); |
| 1286 __ Push(v0, a1); // Limit and initial index. | 1301 __ Push(v0, a1); // Limit and initial index. |
| 1287 | 1302 |
| 1288 // Get the receiver. | 1303 // Get the receiver. |
| 1289 __ lw(a0, MemOperand(fp, kRecvOffset)); | 1304 __ ld(a0, MemOperand(fp, kRecvOffset)); |
| 1290 | 1305 |
| 1291 // Check that the function is a JS function (otherwise it must be a proxy). | 1306 // Check that the function is a JS function (otherwise it must be a proxy). |
| 1292 Label push_receiver; | 1307 Label push_receiver; |
| 1293 __ lw(a1, MemOperand(fp, kFunctionOffset)); | 1308 __ ld(a1, MemOperand(fp, kFunctionOffset)); |
| 1294 __ GetObjectType(a1, a2, a2); | 1309 __ GetObjectType(a1, a2, a2); |
| 1295 __ Branch(&push_receiver, ne, a2, Operand(JS_FUNCTION_TYPE)); | 1310 __ Branch(&push_receiver, ne, a2, Operand(JS_FUNCTION_TYPE)); |
| 1296 | 1311 |
| 1297 // Change context eagerly to get the right global object if necessary. | 1312 // Change context eagerly to get the right global object if necessary. |
| 1298 __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); | 1313 __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); |
| 1299 // Load the shared function info while the function is still in a1. | 1314 // Load the shared function info while the function is still in a1. |
| 1300 __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); | 1315 __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); |
| 1301 | 1316 |
| 1302 // Compute the receiver. | 1317 // Compute the receiver. |
| 1303 // Do not transform the receiver for strict mode functions. | 1318 // Do not transform the receiver for strict mode functions. |
| 1304 Label call_to_object, use_global_proxy; | 1319 Label call_to_object, use_global_proxy; |
| 1305 __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset)); | 1320 __ lbu(a7, FieldMemOperand(a2, SharedFunctionInfo::kStrictModeByteOffset)); |
| 1306 __ And(t3, a2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + | 1321 __ And(a7, a7, Operand(1 << SharedFunctionInfo::kStrictModeBitWithinByte)); |
| 1307 kSmiTagSize))); | 1322 __ Branch(&push_receiver, ne, a7, Operand(zero_reg)); |
| 1308 __ Branch(&push_receiver, ne, t3, Operand(zero_reg)); | |
| 1309 | 1323 |
| 1310 // Do not transform the receiver for native (Compilerhints already in a2). | 1324 // Do not transform the receiver for native (Compilerhints already in a2). |
| 1311 __ And(t3, a2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize))); | 1325 __ lbu(a7, FieldMemOperand(a2, SharedFunctionInfo::kNativeByteOffset)); |
| 1312 __ Branch(&push_receiver, ne, t3, Operand(zero_reg)); | 1326 __ And(a7, a7, Operand(1 << SharedFunctionInfo::kNativeBitWithinByte)); |
| 1327 __ Branch(&push_receiver, ne, a7, Operand(zero_reg)); |
| 1313 | 1328 |
| 1314 // Compute the receiver in sloppy mode. | 1329 // Compute the receiver in sloppy mode. |
| 1315 __ JumpIfSmi(a0, &call_to_object); | 1330 __ JumpIfSmi(a0, &call_to_object); |
| 1316 __ LoadRoot(a1, Heap::kNullValueRootIndex); | 1331 __ LoadRoot(a1, Heap::kNullValueRootIndex); |
| 1317 __ Branch(&use_global_proxy, eq, a0, Operand(a1)); | 1332 __ Branch(&use_global_proxy, eq, a0, Operand(a1)); |
| 1318 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); | 1333 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); |
| 1319 __ Branch(&use_global_proxy, eq, a0, Operand(a2)); | 1334 __ Branch(&use_global_proxy, eq, a0, Operand(a2)); |
| 1320 | 1335 |
| 1321 // Check if the receiver is already a JavaScript object. | 1336 // Check if the receiver is already a JavaScript object. |
| 1322 // a0: receiver | 1337 // a0: receiver |
| 1323 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); | 1338 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); |
| 1324 __ GetObjectType(a0, a1, a1); | 1339 __ GetObjectType(a0, a1, a1); |
| 1325 __ Branch(&push_receiver, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE)); | 1340 __ Branch(&push_receiver, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE)); |
| 1326 | 1341 |
| 1327 // Convert the receiver to a regular object. | 1342 // Convert the receiver to a regular object. |
| 1328 // a0: receiver | 1343 // a0: receiver |
| 1329 __ bind(&call_to_object); | 1344 __ bind(&call_to_object); |
| 1330 __ push(a0); | 1345 __ push(a0); |
| 1331 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); | 1346 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); |
| 1332 __ mov(a0, v0); // Put object in a0 to match other paths to push_receiver. | 1347 __ mov(a0, v0); // Put object in a0 to match other paths to push_receiver. |
| 1333 __ Branch(&push_receiver); | 1348 __ Branch(&push_receiver); |
| 1334 | 1349 |
| 1335 __ bind(&use_global_proxy); | 1350 __ bind(&use_global_proxy); |
| 1336 __ lw(a0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); | 1351 __ ld(a0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); |
| 1337 __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalProxyOffset)); | 1352 __ ld(a0, FieldMemOperand(a0, GlobalObject::kGlobalProxyOffset)); |
| 1338 | 1353 |
| 1339 // Push the receiver. | 1354 // Push the receiver. |
| 1340 // a0: receiver | 1355 // a0: receiver |
| 1341 __ bind(&push_receiver); | 1356 __ bind(&push_receiver); |
| 1342 __ push(a0); | 1357 __ push(a0); |
| 1343 | 1358 |
| 1344 // Copy all arguments from the array to the stack. | 1359 // Copy all arguments from the array to the stack. |
| 1345 Label entry, loop; | 1360 Label entry, loop; |
| 1346 __ lw(a0, MemOperand(fp, kIndexOffset)); | 1361 __ ld(a0, MemOperand(fp, kIndexOffset)); |
| 1347 __ Branch(&entry); | 1362 __ Branch(&entry); |
| 1348 | 1363 |
| 1349 // Load the current argument from the arguments array and push it to the | 1364 // Load the current argument from the arguments array and push it to the |
| 1350 // stack. | 1365 // stack. |
| 1351 // a0: current argument index | 1366 // a0: current argument index |
| 1352 __ bind(&loop); | 1367 __ bind(&loop); |
| 1353 __ lw(a1, MemOperand(fp, kArgsOffset)); | 1368 __ ld(a1, MemOperand(fp, kArgsOffset)); |
| 1354 __ Push(a1, a0); | 1369 __ Push(a1, a0); |
| 1355 | 1370 |
| 1356 // Call the runtime to access the property in the arguments array. | 1371 // Call the runtime to access the property in the arguments array. |
| 1357 __ CallRuntime(Runtime::kGetProperty, 2); | 1372 __ CallRuntime(Runtime::kGetProperty, 2); |
| 1358 __ push(v0); | 1373 __ push(v0); |
| 1359 | 1374 |
| 1360 // Use inline caching to access the arguments. | 1375 // Use inline caching to access the arguments. |
| 1361 __ lw(a0, MemOperand(fp, kIndexOffset)); | 1376 __ ld(a0, MemOperand(fp, kIndexOffset)); |
| 1362 __ Addu(a0, a0, Operand(1 << kSmiTagSize)); | 1377 __ Daddu(a0, a0, Operand(Smi::FromInt(1))); |
| 1363 __ sw(a0, MemOperand(fp, kIndexOffset)); | 1378 __ sd(a0, MemOperand(fp, kIndexOffset)); |
| 1364 | 1379 |
| 1365 // Test if the copy loop has finished copying all the elements from the | 1380 // Test if the copy loop has finished copying all the elements from the |
| 1366 // arguments object. | 1381 // arguments object. |
| 1367 __ bind(&entry); | 1382 __ bind(&entry); |
| 1368 __ lw(a1, MemOperand(fp, kLimitOffset)); | 1383 __ ld(a1, MemOperand(fp, kLimitOffset)); |
| 1369 __ Branch(&loop, ne, a0, Operand(a1)); | 1384 __ Branch(&loop, ne, a0, Operand(a1)); |
| 1370 | 1385 |
| 1371 // Call the function. | 1386 // Call the function. |
| 1372 Label call_proxy; | 1387 Label call_proxy; |
| 1373 ParameterCount actual(a0); | 1388 ParameterCount actual(a0); |
| 1374 __ sra(a0, a0, kSmiTagSize); | 1389 __ SmiUntag(a0); |
| 1375 __ lw(a1, MemOperand(fp, kFunctionOffset)); | 1390 __ ld(a1, MemOperand(fp, kFunctionOffset)); |
| 1376 __ GetObjectType(a1, a2, a2); | 1391 __ GetObjectType(a1, a2, a2); |
| 1377 __ Branch(&call_proxy, ne, a2, Operand(JS_FUNCTION_TYPE)); | 1392 __ Branch(&call_proxy, ne, a2, Operand(JS_FUNCTION_TYPE)); |
| 1378 | 1393 |
| 1379 __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper()); | 1394 __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper()); |
| 1380 | 1395 |
| 1381 frame_scope.GenerateLeaveFrame(); | 1396 frame_scope.GenerateLeaveFrame(); |
| 1382 __ Ret(USE_DELAY_SLOT); | 1397 __ Ret(USE_DELAY_SLOT); |
| 1383 __ Addu(sp, sp, Operand(3 * kPointerSize)); // In delay slot. | 1398 __ Daddu(sp, sp, Operand(3 * kPointerSize)); // In delay slot. |
| 1384 | 1399 |
| 1385 // Call the function proxy. | 1400 // Call the function proxy. |
| 1386 __ bind(&call_proxy); | 1401 __ bind(&call_proxy); |
| 1387 __ push(a1); // Add function proxy as last argument. | 1402 __ push(a1); // Add function proxy as last argument. |
| 1388 __ Addu(a0, a0, Operand(1)); | 1403 __ Daddu(a0, a0, Operand(1)); |
| 1389 __ li(a2, Operand(0, RelocInfo::NONE32)); | 1404 __ li(a2, Operand(0, RelocInfo::NONE32)); |
| 1390 __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY); | 1405 __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY); |
| 1391 __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), | 1406 __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), |
| 1392 RelocInfo::CODE_TARGET); | 1407 RelocInfo::CODE_TARGET); |
| 1393 // Tear down the internal frame and remove function, receiver and args. | 1408 // Tear down the internal frame and remove function, receiver and args. |
| 1394 } | 1409 } |
| 1395 | 1410 |
| 1396 __ Ret(USE_DELAY_SLOT); | 1411 __ Ret(USE_DELAY_SLOT); |
| 1397 __ Addu(sp, sp, Operand(3 * kPointerSize)); // In delay slot. | 1412 __ Daddu(sp, sp, Operand(3 * kPointerSize)); // In delay slot. |
| 1398 } | 1413 } |
| 1399 | 1414 |
| 1400 | 1415 |
| 1401 static void ArgumentAdaptorStackCheck(MacroAssembler* masm, | 1416 static void ArgumentAdaptorStackCheck(MacroAssembler* masm, |
| 1402 Label* stack_overflow) { | 1417 Label* stack_overflow) { |
| 1403 // ----------- S t a t e ------------- | 1418 // ----------- S t a t e ------------- |
| 1404 // -- a0 : actual number of arguments | 1419 // -- a0 : actual number of arguments |
| 1405 // -- a1 : function (passed through to callee) | 1420 // -- a1 : function (passed through to callee) |
| 1406 // -- a2 : expected number of arguments | 1421 // -- a2 : expected number of arguments |
| 1407 // ----------------------------------- | 1422 // ----------------------------------- |
| 1408 // Check the stack for overflow. We are not trying to catch | 1423 // Check the stack for overflow. We are not trying to catch |
| 1409 // interruptions (e.g. debug break and preemption) here, so the "real stack | 1424 // interruptions (e.g. debug break and preemption) here, so the "real stack |
| 1410 // limit" is checked. | 1425 // limit" is checked. |
| 1411 __ LoadRoot(t1, Heap::kRealStackLimitRootIndex); | 1426 __ LoadRoot(a5, Heap::kRealStackLimitRootIndex); |
| 1412 // Make t1 the space we have left. The stack might already be overflowed | 1427 // Make a5 the space we have left. The stack might already be overflowed |
| 1413 // here which will cause t1 to become negative. | 1428 // here which will cause a5 to become negative. |
| 1414 __ subu(t1, sp, t1); | 1429 __ dsubu(a5, sp, a5); |
| 1415 // Check if the arguments will overflow the stack. | 1430 // Check if the arguments will overflow the stack. |
| 1416 __ sll(at, a2, kPointerSizeLog2); | 1431 __ dsll(at, a2, kPointerSizeLog2); |
| 1417 // Signed comparison. | 1432 // Signed comparison. |
| 1418 __ Branch(stack_overflow, le, t1, Operand(at)); | 1433 __ Branch(stack_overflow, le, a5, Operand(at)); |
| 1419 } | 1434 } |
| 1420 | 1435 |
| 1421 | 1436 |
| 1422 static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { | 1437 static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { |
| 1423 __ sll(a0, a0, kSmiTagSize); | 1438 // __ sll(a0, a0, kSmiTagSize); |
| 1424 __ li(t0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | 1439 __ dsll32(a0, a0, 0); |
| 1425 __ MultiPush(a0.bit() | a1.bit() | t0.bit() | fp.bit() | ra.bit()); | 1440 __ li(a4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
| 1426 __ Addu(fp, sp, | 1441 __ MultiPush(a0.bit() | a1.bit() | a4.bit() | fp.bit() | ra.bit()); |
| 1442 __ Daddu(fp, sp, |
| 1427 Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize)); | 1443 Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize)); |
| 1428 } | 1444 } |
| 1429 | 1445 |
| 1430 | 1446 |
| 1431 static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { | 1447 static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { |
| 1432 // ----------- S t a t e ------------- | 1448 // ----------- S t a t e ------------- |
| 1433 // -- v0 : result being passed through | 1449 // -- v0 : result being passed through |
| 1434 // ----------------------------------- | 1450 // ----------------------------------- |
| 1435 // Get the number of arguments passed (as a smi), tear down the frame and | 1451 // Get the number of arguments passed (as a smi), tear down the frame and |
| 1436 // then tear down the parameters. | 1452 // then tear down the parameters. |
| 1437 __ lw(a1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp + | 1453 __ ld(a1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp + |
| 1438 kPointerSize))); | 1454 kPointerSize))); |
| 1439 __ mov(sp, fp); | 1455 __ mov(sp, fp); |
| 1440 __ MultiPop(fp.bit() | ra.bit()); | 1456 __ MultiPop(fp.bit() | ra.bit()); |
| 1441 __ sll(t0, a1, kPointerSizeLog2 - kSmiTagSize); | 1457 __ SmiScale(a4, a1, kPointerSizeLog2); |
| 1442 __ Addu(sp, sp, t0); | 1458 __ Daddu(sp, sp, a4); |
| 1443 // Adjust for the receiver. | 1459 // Adjust for the receiver. |
| 1444 __ Addu(sp, sp, Operand(kPointerSize)); | 1460 __ Daddu(sp, sp, Operand(kPointerSize)); |
| 1445 } | 1461 } |
| 1446 | 1462 |
| 1447 | 1463 |
| 1448 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { | 1464 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { |
| 1449 // State setup as expected by MacroAssembler::InvokePrologue. | 1465 // State setup as expected by MacroAssembler::InvokePrologue. |
| 1450 // ----------- S t a t e ------------- | 1466 // ----------- S t a t e ------------- |
| 1451 // -- a0: actual arguments count | 1467 // -- a0: actual arguments count |
| 1452 // -- a1: function (passed through to callee) | 1468 // -- a1: function (passed through to callee) |
| 1453 // -- a2: expected arguments count | 1469 // -- a2: expected arguments count |
| 1454 // ----------------------------------- | 1470 // ----------------------------------- |
| 1455 | 1471 |
| 1456 Label stack_overflow; | 1472 Label stack_overflow; |
| 1457 ArgumentAdaptorStackCheck(masm, &stack_overflow); | 1473 ArgumentAdaptorStackCheck(masm, &stack_overflow); |
| 1458 Label invoke, dont_adapt_arguments; | 1474 Label invoke, dont_adapt_arguments; |
| 1459 | 1475 |
| 1460 Label enough, too_few; | 1476 Label enough, too_few; |
| 1461 __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); | 1477 __ ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); |
| 1462 __ Branch(&dont_adapt_arguments, eq, | 1478 __ Branch(&dont_adapt_arguments, eq, |
| 1463 a2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel)); | 1479 a2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel)); |
| 1464 // We use Uless as the number of argument should always be greater than 0. | 1480 // We use Uless as the number of argument should always be greater than 0. |
| 1465 __ Branch(&too_few, Uless, a0, Operand(a2)); | 1481 __ Branch(&too_few, Uless, a0, Operand(a2)); |
| 1466 | 1482 |
| 1467 { // Enough parameters: actual >= expected. | 1483 { // Enough parameters: actual >= expected. |
| 1468 // a0: actual number of arguments as a smi | 1484 // a0: actual number of arguments as a smi |
| 1469 // a1: function | 1485 // a1: function |
| 1470 // a2: expected number of arguments | 1486 // a2: expected number of arguments |
| 1471 // a3: code entry to call | 1487 // a3: code entry to call |
| 1472 __ bind(&enough); | 1488 __ bind(&enough); |
| 1473 EnterArgumentsAdaptorFrame(masm); | 1489 EnterArgumentsAdaptorFrame(masm); |
| 1474 | 1490 |
| 1475 // Calculate copy start address into a0 and copy end address into a2. | 1491 // Calculate copy start address into a0 and copy end address into a2. |
| 1476 __ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize); | 1492 __ SmiScale(a0, a0, kPointerSizeLog2); |
| 1477 __ Addu(a0, fp, a0); | 1493 __ Daddu(a0, fp, a0); |
| 1478 // Adjust for return address and receiver. | 1494 // Adjust for return address and receiver. |
| 1479 __ Addu(a0, a0, Operand(2 * kPointerSize)); | 1495 __ Daddu(a0, a0, Operand(2 * kPointerSize)); |
| 1480 // Compute copy end address. | 1496 // Compute copy end address. |
| 1481 __ sll(a2, a2, kPointerSizeLog2); | 1497 __ dsll(a2, a2, kPointerSizeLog2); |
| 1482 __ subu(a2, a0, a2); | 1498 __ dsubu(a2, a0, a2); |
| 1483 | 1499 |
| 1484 // Copy the arguments (including the receiver) to the new stack frame. | 1500 // Copy the arguments (including the receiver) to the new stack frame. |
| 1485 // a0: copy start address | 1501 // a0: copy start address |
| 1486 // a1: function | 1502 // a1: function |
| 1487 // a2: copy end address | 1503 // a2: copy end address |
| 1488 // a3: code entry to call | 1504 // a3: code entry to call |
| 1489 | 1505 |
| 1490 Label copy; | 1506 Label copy; |
| 1491 __ bind(©); | 1507 __ bind(©); |
| 1492 __ lw(t0, MemOperand(a0)); | 1508 __ ld(a4, MemOperand(a0)); |
| 1493 __ push(t0); | 1509 __ push(a4); |
| 1494 __ Branch(USE_DELAY_SLOT, ©, ne, a0, Operand(a2)); | 1510 __ Branch(USE_DELAY_SLOT, ©, ne, a0, Operand(a2)); |
| 1495 __ addiu(a0, a0, -kPointerSize); // In delay slot. | 1511 __ daddiu(a0, a0, -kPointerSize); // In delay slot. |
| 1496 | 1512 |
| 1497 __ jmp(&invoke); | 1513 __ jmp(&invoke); |
| 1498 } | 1514 } |
| 1499 | 1515 |
| 1500 { // Too few parameters: Actual < expected. | 1516 { // Too few parameters: Actual < expected. |
| 1501 __ bind(&too_few); | 1517 __ bind(&too_few); |
| 1502 EnterArgumentsAdaptorFrame(masm); | 1518 EnterArgumentsAdaptorFrame(masm); |
| 1503 | 1519 |
| 1504 // Calculate copy start address into a0 and copy end address is fp. | 1520 // Calculate copy start address into a0 and copy end address is fp. |
| 1505 // a0: actual number of arguments as a smi | 1521 // a0: actual number of arguments as a smi |
| 1506 // a1: function | 1522 // a1: function |
| 1507 // a2: expected number of arguments | 1523 // a2: expected number of arguments |
| 1508 // a3: code entry to call | 1524 // a3: code entry to call |
| 1509 __ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize); | 1525 __ SmiScale(a0, a0, kPointerSizeLog2); |
| 1510 __ Addu(a0, fp, a0); | 1526 __ Daddu(a0, fp, a0); |
| 1511 // Adjust for return address and receiver. | 1527 // Adjust for return address and receiver. |
| 1512 __ Addu(a0, a0, Operand(2 * kPointerSize)); | 1528 __ Daddu(a0, a0, Operand(2 * kPointerSize)); |
| 1513 // Compute copy end address. Also adjust for return address. | 1529 // Compute copy end address. Also adjust for return address. |
| 1514 __ Addu(t3, fp, kPointerSize); | 1530 __ Daddu(a7, fp, kPointerSize); |
| 1515 | 1531 |
| 1516 // Copy the arguments (including the receiver) to the new stack frame. | 1532 // Copy the arguments (including the receiver) to the new stack frame. |
| 1517 // a0: copy start address | 1533 // a0: copy start address |
| 1518 // a1: function | 1534 // a1: function |
| 1519 // a2: expected number of arguments | 1535 // a2: expected number of arguments |
| 1520 // a3: code entry to call | 1536 // a3: code entry to call |
| 1521 // t3: copy end address | 1537 // a7: copy end address |
| 1522 Label copy; | 1538 Label copy; |
| 1523 __ bind(©); | 1539 __ bind(©); |
| 1524 __ lw(t0, MemOperand(a0)); // Adjusted above for return addr and receiver. | 1540 __ ld(a4, MemOperand(a0)); // Adjusted above for return addr and receiver. |
| 1525 __ Subu(sp, sp, kPointerSize); | 1541 __ Dsubu(sp, sp, kPointerSize); |
| 1526 __ Subu(a0, a0, kPointerSize); | 1542 __ Dsubu(a0, a0, kPointerSize); |
| 1527 __ Branch(USE_DELAY_SLOT, ©, ne, a0, Operand(t3)); | 1543 __ Branch(USE_DELAY_SLOT, ©, ne, a0, Operand(a7)); |
| 1528 __ sw(t0, MemOperand(sp)); // In the delay slot. | 1544 __ sd(a4, MemOperand(sp)); // In the delay slot. |
| 1529 | 1545 |
| 1530 // Fill the remaining expected arguments with undefined. | 1546 // Fill the remaining expected arguments with undefined. |
| 1531 // a1: function | 1547 // a1: function |
| 1532 // a2: expected number of arguments | 1548 // a2: expected number of arguments |
| 1533 // a3: code entry to call | 1549 // a3: code entry to call |
| 1534 __ LoadRoot(t0, Heap::kUndefinedValueRootIndex); | 1550 __ LoadRoot(a4, Heap::kUndefinedValueRootIndex); |
| 1535 __ sll(t2, a2, kPointerSizeLog2); | 1551 __ dsll(a6, a2, kPointerSizeLog2); |
| 1536 __ Subu(a2, fp, Operand(t2)); | 1552 __ Dsubu(a2, fp, Operand(a6)); |
| 1537 // Adjust for frame. | 1553 // Adjust for frame. |
| 1538 __ Subu(a2, a2, Operand(StandardFrameConstants::kFixedFrameSizeFromFp + | 1554 __ Dsubu(a2, a2, Operand(StandardFrameConstants::kFixedFrameSizeFromFp + |
| 1539 2 * kPointerSize)); | 1555 2 * kPointerSize)); |
| 1540 | 1556 |
| 1541 Label fill; | 1557 Label fill; |
| 1542 __ bind(&fill); | 1558 __ bind(&fill); |
| 1543 __ Subu(sp, sp, kPointerSize); | 1559 __ Dsubu(sp, sp, kPointerSize); |
| 1544 __ Branch(USE_DELAY_SLOT, &fill, ne, sp, Operand(a2)); | 1560 __ Branch(USE_DELAY_SLOT, &fill, ne, sp, Operand(a2)); |
| 1545 __ sw(t0, MemOperand(sp)); | 1561 __ sd(a4, MemOperand(sp)); |
| 1546 } | 1562 } |
| 1547 | 1563 |
| 1548 // Call the entry point. | 1564 // Call the entry point. |
| 1549 __ bind(&invoke); | 1565 __ bind(&invoke); |
| 1550 | 1566 |
| 1551 __ Call(a3); | 1567 __ Call(a3); |
| 1552 | 1568 |
| 1553 // Store offset of return address for deoptimizer. | 1569 // Store offset of return address for deoptimizer. |
| 1554 masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset()); | 1570 masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset()); |
| 1555 | 1571 |
| (...skipping 15 matching lines...) Expand all Loading... |
| 1571 __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); | 1587 __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); |
| 1572 __ break_(0xCC); | 1588 __ break_(0xCC); |
| 1573 } | 1589 } |
| 1574 } | 1590 } |
| 1575 | 1591 |
| 1576 | 1592 |
| 1577 #undef __ | 1593 #undef __ |
| 1578 | 1594 |
| 1579 } } // namespace v8::internal | 1595 } } // namespace v8::internal |
| 1580 | 1596 |
| 1581 #endif // V8_TARGET_ARCH_MIPS | 1597 #endif // V8_TARGET_ARCH_MIPS64 |
| OLD | NEW |