| OLD | NEW |
| (Empty) | |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are |
| 4 // met: |
| 5 // |
| 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided |
| 11 // with the distribution. |
| 12 // * Neither the name of Google Inc. nor the names of its |
| 13 // contributors may be used to endorse or promote products derived |
| 14 // from this software without specific prior written permission. |
| 15 // |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 |
| 28 #include "v8.h" |
| 29 |
| 30 #if defined(V8_TARGET_ARCH_A64) |
| 31 |
| 32 #include "codegen.h" |
| 33 #include "debug.h" |
| 34 #include "deoptimizer.h" |
| 35 #include "full-codegen.h" |
| 36 #include "runtime.h" |
| 37 |
| 38 namespace v8 { |
| 39 namespace internal { |
| 40 |
| 41 |
| 42 #define __ ACCESS_MASM(masm) |
| 43 |
| 44 |
| 45 // Load the built-in Array function from the current context. |
| 46 static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) { |
| 47 // Load the native context. |
| 48 __ Ldr(result, GlobalObjectMemOperand()); |
| 49 __ Ldr(result, |
| 50 FieldMemOperand(result, GlobalObject::kNativeContextOffset)); |
| 51 // Load the InternalArray function from the native context. |
| 52 __ Ldr(result, |
| 53 MemOperand(result, |
| 54 Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX))); |
| 55 } |
| 56 |
| 57 |
| 58 // Allocate an empty JSArray. The allocated array is put into the result |
| 59 // register. An elements backing store is allocated with size initial_capacity |
| 60 // and filled with the hole values. |
| 61 static void AllocateEmptyJSArray(MacroAssembler* masm, |
| 62 Register array_function, |
| 63 Register result, |
| 64 Register scratch1, |
| 65 Register scratch2, |
| 66 Register scratch3, |
| 67 Label* gc_required) { |
| 68 ASSERT(!AreAliased(array_function, result, scratch1, scratch2, scratch3)); |
| 69 const int initial_capacity = JSArray::kPreallocatedArrayElements; |
| 70 STATIC_ASSERT(initial_capacity >= 0); |
| 71 Register map = scratch1; |
| 72 __ LoadInitialArrayMap(array_function, scratch2, map, kArrayCantHaveHoles); |
| 73 |
| 74 // Allocate the JSArray object together with space for a fixed array with the |
| 75 // requested elements. |
| 76 int size = JSArray::kSize; |
| 77 if (initial_capacity > 0) { |
| 78 size += FixedArray::SizeFor(initial_capacity); |
| 79 } |
| 80 __ Allocate(size, result, scratch2, scratch3, gc_required, TAG_OBJECT); |
| 81 |
| 82 // Allocated the JSArray. Now initialize the fields except for the elements |
| 83 // array. |
| 84 __ Str(map, FieldMemOperand(result, JSObject::kMapOffset)); |
| 85 |
| 86 Register empty_prop = scratch1; |
| 87 __ LoadRoot(empty_prop, Heap::kEmptyFixedArrayRootIndex); |
| 88 __ Str(empty_prop, FieldMemOperand(result, JSArray::kPropertiesOffset)); |
| 89 |
| 90 // Field JSArray::kElementsOffset is initialized later. |
| 91 __ Mov(scratch3, Operand(Smi::FromInt(0))); |
| 92 __ Str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset)); |
| 93 |
| 94 if (initial_capacity == 0) { |
| 95 __ Str(empty_prop, FieldMemOperand(result, JSArray::kElementsOffset)); |
| 96 return; |
| 97 } |
| 98 |
| 99 // Calculate the location of the elements array and set elements array member |
| 100 // of the JSArray. |
| 101 Register elements_offset = scratch1; |
| 102 __ Add(elements_offset, result, JSArray::kSize); |
| 103 __ Str(elements_offset, FieldMemOperand(result, JSArray::kElementsOffset)); |
| 104 |
| 105 // Set up fixed array map. |
| 106 __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex); |
| 107 __ Str(scratch3, FieldMemOperand(elements_offset, FixedArray::kMapOffset)); |
| 108 |
| 109 // Set up fixed array length. |
| 110 __ Mov(scratch3, Operand(Smi::FromInt(initial_capacity))); |
| 111 __ Str(scratch3, FieldMemOperand(elements_offset, FixedArray::kLengthOffset)); |
| 112 |
| 113 // Set elements_offset to point to first element of fixed array. |
| 114 __ Add(elements_offset, elements_offset, |
| 115 FixedArray::kHeaderSize - kHeapObjectTag); |
| 116 |
| 117 // Set elements_end to the element after the last in the fixed array. |
| 118 Register elements_end = scratch2; |
| 119 STATIC_ASSERT(initial_capacity > 0); |
| 120 __ Add(elements_end, elements_offset, initial_capacity * kPointerSize); |
| 121 |
| 122 // Fill the FixedArray with the hole value. Inline the code if short. |
| 123 Register the_hole = scratch3; |
| 124 __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex); |
| 125 __ InitializeFieldsWithFiller(elements_offset, elements_end, the_hole); |
| 126 } |
| 127 |
| 128 enum JSArrayFillMode { kDontFillWithHoles, kFillWithHoles }; |
| 129 |
| 130 // Allocate a JSArray with the number of elements stored in a register. The |
| 131 // register array_function holds the built-in Array function and the register |
| 132 // array_size holds the size of the array as a smi. The allocated array is put |
| 133 // into the result register and beginning and end of the FixedArray elements |
| 134 // storage is put into registers elements_array_storage and elements_array_end |
| 135 // (see below for when that is not the case.) If the parameter fill_mode is |
| 136 // kFillWithHoles, the allocated elements backing store is filled with the hole |
| 137 // values otherwise it is left uninitialized. When the backing store is filled, |
| 138 // the register elements_array_storage is scratched. |
| 139 static void AllocateJSArray(MacroAssembler* masm, |
| 140 Register array_function, |
| 141 Register array_size, |
| 142 Register result, |
| 143 Register elements_array_storage, |
| 144 Register elements_array_end, |
| 145 Register scratch1, |
| 146 Register scratch2, |
| 147 JSArrayFillMode fill_mode, |
| 148 Label* gc_required) { |
| 149 ASSERT(!AreAliased(array_function, array_size, result, elements_array_storage, |
| 150 elements_array_end, scratch1, scratch2)); |
| 151 // Load the initial map from the array function. |
| 152 Register map = elements_array_storage; |
| 153 __ LoadInitialArrayMap(array_function, scratch2, map, |
| 154 (fill_mode == kFillWithHoles) ? kArrayCanHaveHoles |
| 155 : kArrayCantHaveHoles); |
| 156 if (FLAG_debug_code) { |
| 157 __ Cmp(array_size, Operand(Smi::FromInt(0))); |
| 158 __ Assert(ne, "Array size is unexpectedly 0"); |
| 159 } |
| 160 |
| 161 // Allocate the JSArray object together with space for a FixedArray with the |
| 162 // requested number of elements. |
| 163 __ Mov(elements_array_end, |
| 164 (JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize); |
| 165 __ Add(elements_array_end, elements_array_end, Operand::UntagSmi(array_size)); |
| 166 __ Allocate(elements_array_end, result, scratch1, scratch2, gc_required, |
| 167 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); |
| 168 |
| 169 // Allocated the JSArray. Now initialize the fields except for the elements |
| 170 // array. |
| 171 __ Str(map, FieldMemOperand(result, JSObject::kMapOffset)); |
| 172 Register empty_prop = elements_array_storage; |
| 173 __ LoadRoot(empty_prop, Heap::kEmptyFixedArrayRootIndex); |
| 174 __ Str(empty_prop, FieldMemOperand(result, JSArray::kPropertiesOffset)); |
| 175 // Field JSArray::kElementsOffset is initialized later. |
| 176 __ Str(array_size, FieldMemOperand(result, JSArray::kLengthOffset)); |
| 177 |
| 178 // Calculate the location of the elements array and set elements array member |
| 179 // of the JSArray. |
| 180 Register elements = elements_array_storage; |
| 181 __ Add(elements, result, JSArray::kSize); |
| 182 __ Str(elements, FieldMemOperand(result, JSArray::kElementsOffset)); |
| 183 |
| 184 // Set up fixed array map. |
| 185 __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex); |
| 186 __ Str(scratch1, FieldMemOperand(elements, FixedArray::kMapOffset)); |
| 187 |
| 188 // Set up fixed array length. |
| 189 __ Str(array_size, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
| 190 |
| 191 // Set elements to point to first element of fixed array. |
| 192 __ Add(elements, elements, FixedArray::kHeaderSize - kHeapObjectTag); |
| 193 |
| 194 // Calculate elements array and elements array end. |
| 195 __ Add(elements_array_end, elements, |
| 196 Operand::UntagSmiAndScale(array_size, kPointerSizeLog2)); |
| 197 |
| 198 // Fill the allocated FixedArray with the hole value if requested. |
| 199 if (fill_mode == kFillWithHoles) { |
| 200 Register the_hole = scratch1; |
| 201 __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex); |
| 202 __ InitializeFieldsWithFiller(elements, elements_array_end, the_hole); |
| 203 } |
| 204 } |
| 205 |
| 206 |
| 207 // Load the built-in InternalArray function from the current context. |
| 208 static void GenerateLoadInternalArrayFunction(MacroAssembler* masm, |
| 209 Register result) { |
| 210 // Load the native context. |
| 211 __ Ldr(result, GlobalObjectMemOperand()); |
| 212 __ Ldr(result, |
| 213 FieldMemOperand(result, GlobalObject::kNativeContextOffset)); |
| 214 // Load the InternalArray function from the native context. |
| 215 __ Ldr(result, ContextMemOperand(result, |
| 216 Context::INTERNAL_ARRAY_FUNCTION_INDEX)); |
| 217 } |
| 218 |
| 219 |
| 220 void Builtins::Generate_Adaptor(MacroAssembler* masm, |
| 221 CFunctionId id, |
| 222 BuiltinExtraArguments extra_args) { |
| 223 // ----------- S t a t e ------------- |
| 224 // -- x0 : number of arguments excluding receiver |
| 225 // -- x1 : called function (only guaranteed when |
| 226 // extra_args requires it) |
| 227 // -- cp : context |
| 228 // -- sp[0] : last argument |
| 229 // -- ... |
| 230 // -- sp[4 * (argc - 1)] : first argument (argc == x0) |
| 231 // -- sp[4 * argc] : receiver |
| 232 // ----------------------------------- |
| 233 |
| 234 // Insert extra arguments. |
| 235 int num_extra_args = 0; |
| 236 if (extra_args == NEEDS_CALLED_FUNCTION) { |
| 237 num_extra_args = 1; |
| 238 __ Push(x1); |
| 239 } else { |
| 240 ASSERT(extra_args == NO_EXTRA_ARGUMENTS); |
| 241 } |
| 242 |
| 243 // JumpToExternalReference expects x0 to contain the number of arguments |
| 244 // including the receiver and the extra arguments. |
| 245 __ Add(x0, x0, num_extra_args + 1); |
| 246 __ JumpToExternalReference(ExternalReference(id, masm->isolate())); |
| 247 } |
| 248 |
| 249 |
| 250 // Create a new array for the built-in Array function. This function allocates |
| 251 // the JSArray object and the FixedArray elements array and initializes these. |
| 252 // If the Array cannot be constructed in native code the runtime is called. This |
| 253 // function assumes the following state: |
| 254 // x0: argc |
| 255 // x1: constructor (built-in Array function) |
| 256 // lr: return address |
| 257 // sp[0]: last argument |
| 258 // This function is used for both construct and normal calls of Array. The only |
| 259 // difference between handling a construct call and a normal call is that for a |
| 260 // construct call the constructor function in x1 needs to be preserved for |
| 261 // entering the generic code. In both cases argc in x0 needs to be preserved. |
| 262 // Both registers are preserved by this code so no need to differentiate between |
| 263 // construct call and normal call. |
| 264 void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code) { |
| 265 Counters* counters = masm->isolate()->counters(); |
| 266 Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array, |
| 267 has_non_smi_element, finish, cant_transition_map, not_double; |
| 268 Register result = x0; |
| 269 Register argc = x0; |
| 270 Register constructor = x1; |
| 271 Register last_arg = x2; |
| 272 Register jsarray = x3; |
| 273 |
| 274 __ Peek(last_arg, 0 * kPointerSize); |
| 275 |
| 276 // Check for array construction with zero arguments or one. |
| 277 __ Cbnz(argc, &argc_one_or_more); |
| 278 |
| 279 // Handle construction of an empty array. |
| 280 __ Bind(&empty_array); |
| 281 AllocateEmptyJSArray(masm, constructor, jsarray, x10, x11, x12, |
| 282 call_generic_code); |
| 283 __ IncrementCounter(counters->array_function_native(), 1, x10, x11); |
| 284 // Set up return value, remove receiver from stack and return. |
| 285 __ Mov(result, jsarray); |
| 286 __ Drop(1); |
| 287 __ Ret(); |
| 288 |
| 289 // Check for one argument. Bail out if argument is not smi or if it is |
| 290 // negative. |
| 291 __ Bind(&argc_one_or_more); |
| 292 __ Cmp(argc, 1); |
| 293 __ B(ne, &argc_two_or_more); |
| 294 __ Cbnz(last_arg, ¬_empty_array); |
| 295 __ Drop(1); |
| 296 __ Mov(argc, 0); // Treat this as a call with argc of zero. |
| 297 __ B(&empty_array); |
| 298 |
| 299 __ Bind(¬_empty_array); |
| 300 // Call generic code for non-smi and negative smi values. |
| 301 __ JumpIfNotSmi(last_arg, call_generic_code); |
| 302 __ Tbnz(last_arg, MaskToBit(kIntptrSignBit), call_generic_code); |
| 303 |
| 304 // Handle construction of an empty array of a certain size. Bail out if size |
| 305 // is too large to actually allocate an elements array. |
| 306 __ Cmp(last_arg, |
| 307 Operand(Smi::FromInt(JSObject::kInitialMaxFastElementArray))); |
| 308 __ B(ge, call_generic_code); |
| 309 |
| 310 AllocateJSArray(masm, constructor, last_arg, jsarray, x4, x5, x6, x7, |
| 311 kFillWithHoles, call_generic_code); |
| 312 __ IncrementCounter(counters->array_function_native(), 1, x10, x11); |
| 313 __ Mov(result, jsarray); |
| 314 __ Drop(2); |
| 315 __ Ret(); |
| 316 |
| 317 // Handle construction of an array from a list of arguments. |
| 318 Register array_size = x10; |
| 319 Register elements_array = x4; |
| 320 Register elements_array_end = x5; |
| 321 __ Bind(&argc_two_or_more); |
| 322 __ SmiTag(array_size, argc); |
| 323 AllocateJSArray(masm, constructor, array_size, jsarray, elements_array, |
| 324 elements_array_end, x6, x7, kDontFillWithHoles, |
| 325 call_generic_code); |
| 326 __ IncrementCounter(counters->array_function_native(), 1, x10, x11); |
| 327 |
| 328 // Fill arguments as array elements. Copy from the top of the stack (last |
| 329 // element) to the array backing store filling it backwards. |
| 330 // Note: elements_array_end points after the backing store, therefore PreIndex |
| 331 // is used when filling the backing store. |
| 332 Label loop, entry; |
| 333 Register index = x6; |
| 334 Register element = x7; |
| 335 __ Mov(index, 0); |
| 336 __ B(&entry); |
| 337 __ Bind(&loop); |
| 338 __ Peek(element, Operand(index, LSL, kPointerSizeLog2)); |
| 339 __ Add(index, index, 1); |
| 340 if (FLAG_smi_only_arrays) { |
| 341 __ JumpIfNotSmi(element, &has_non_smi_element); |
| 342 } |
| 343 __ Str(element, MemOperand(elements_array_end, -kPointerSize, PreIndex)); |
| 344 __ Bind(&entry); |
| 345 __ Cmp(elements_array, elements_array_end); |
| 346 __ B(lt, &loop); |
| 347 |
| 348 __ Bind(&finish); |
| 349 |
| 350 // Remove caller arguments and receiver from the stack, set up return value |
| 351 // and return. |
| 352 __ Drop(index); |
| 353 __ Drop(1); |
| 354 __ Mov(result, jsarray); |
| 355 __ Ret(); |
| 356 |
| 357 __ Bind(&has_non_smi_element); |
| 358 // Double values are handled by the runtime. |
| 359 __ CheckMap(element, x10, Heap::kHeapNumberMapRootIndex, ¬_double, |
| 360 DONT_DO_SMI_CHECK); |
| 361 __ Bind(&cant_transition_map); |
| 362 __ UndoAllocationInNewSpace(jsarray, x4); |
| 363 __ B(call_generic_code); |
| 364 |
| 365 __ Bind(¬_double); |
| 366 // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS. |
| 367 __ Ldr(element, FieldMemOperand(jsarray, HeapObject::kMapOffset)); |
| 368 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, |
| 369 element, x10, &cant_transition_map); |
| 370 __ Str(element, FieldMemOperand(jsarray, HeapObject::kMapOffset)); |
| 371 __ RecordWriteField(jsarray, HeapObject::kMapOffset, element, x10, |
| 372 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, |
| 373 OMIT_SMI_CHECK); |
| 374 |
| 375 Label loop2; |
| 376 __ Sub(index, index, 1); |
| 377 __ Bind(&loop2); |
| 378 __ Peek(element, Operand(index, LSL, kPointerSizeLog2)); |
| 379 __ Add(index, index, 1); |
| 380 __ Str(element, MemOperand(elements_array_end, -kPointerSize, PreIndex)); |
| 381 __ Cmp(elements_array, elements_array_end); |
| 382 __ B(lt, &loop2); |
| 383 __ B(&finish); |
| 384 } |
| 385 |
| 386 |
| 387 void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) { |
| 388 // ----------- S t a t e ------------- |
| 389 // -- x0 : number of arguments |
| 390 // -- lr : return address |
| 391 // -- sp[...]: constructor arguments |
| 392 // ----------------------------------- |
| 393 ASM_LOCATION("Builtins::Generate_InternalArrayCode"); |
| 394 Label generic_array_code; |
| 395 |
| 396 // Get the InternalArray function. |
| 397 GenerateLoadInternalArrayFunction(masm, x1); |
| 398 |
| 399 if (FLAG_debug_code) { |
| 400 // Initial map for the builtin InternalArray functions should be maps. |
| 401 __ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset)); |
| 402 __ Tst(x10, kSmiTagMask); |
| 403 __ Assert(ne, "Unexpected initial map for InternalArray function"); |
| 404 __ CompareObjectType(x10, x11, x12, MAP_TYPE); |
| 405 __ Assert(eq, "Unexpected initial map for InternalArray function"); |
| 406 } |
| 407 |
| 408 // Run the native code for the InternalArray function called as a normal |
| 409 // function. |
| 410 if (FLAG_optimize_constructed_arrays) { |
| 411 // tail call a stub |
| 412 InternalArrayConstructorStub stub(masm->isolate()); |
| 413 __ TailCallStub(&stub); |
| 414 } else { |
| 415 ArrayNativeCode(masm, &generic_array_code); |
| 416 |
| 417 // Jump to the generic array code if the specialized code cannot handle the |
| 418 // construction. |
| 419 __ Bind(&generic_array_code); |
| 420 Handle<Code> array_code = |
| 421 masm->isolate()->builtins()->InternalArrayCodeGeneric(); |
| 422 __ Jump(array_code, RelocInfo::CODE_TARGET); |
| 423 } |
| 424 } |
| 425 |
| 426 |
| 427 void Builtins::Generate_ArrayCode(MacroAssembler* masm) { |
| 428 // ----------- S t a t e ------------- |
| 429 // -- x0 : number of arguments |
| 430 // -- lr : return address |
| 431 // -- sp[...]: constructor arguments |
| 432 // ----------------------------------- |
| 433 ASM_LOCATION("Builtins::Generate_ArrayCode"); |
| 434 Label generic_array_code, one_or_more_arguments, two_or_more_arguments; |
| 435 |
| 436 // Get the Array function. |
| 437 GenerateLoadArrayFunction(masm, x1); |
| 438 |
| 439 if (FLAG_debug_code) { |
| 440 // Initial map for the builtin Array functions should be maps. |
| 441 __ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset)); |
| 442 __ Tst(x10, kSmiTagMask); |
| 443 __ Assert(ne, "Unexpected initial map for Array function"); |
| 444 __ CompareObjectType(x10, x11, x12, MAP_TYPE); |
| 445 __ Assert(eq, "Unexpected initial map for Array function"); |
| 446 } |
| 447 |
| 448 // Run the native code for the Array function called as a normal function. |
| 449 if (FLAG_optimize_constructed_arrays) { |
| 450 // tail call a stub |
| 451 Handle<Object> undefined_sentinel( |
| 452 masm->isolate()->heap()->undefined_value(), |
| 453 masm->isolate()); |
| 454 __ Mov(x2, Operand(undefined_sentinel)); |
| 455 ArrayConstructorStub stub(masm->isolate()); |
| 456 __ TailCallStub(&stub); |
| 457 } else { |
| 458 ArrayNativeCode(masm, &generic_array_code); |
| 459 |
| 460 // Jump to the generic array code if the specialized code cannot handle |
| 461 // the construction. |
| 462 __ Bind(&generic_array_code); |
| 463 Handle<Code> array_code = |
| 464 masm->isolate()->builtins()->ArrayCodeGeneric(); |
| 465 __ Jump(array_code, RelocInfo::CODE_TARGET); |
| 466 } |
| 467 } |
| 468 |
| 469 |
| 470 void Builtins::Generate_CommonArrayConstructCode(MacroAssembler* masm) { |
| 471 // ----------- S t a t e ------------- |
| 472 // -- x0 : number of arguments |
| 473 // -- x1 : constructor function |
| 474 // -- x2 : type info cell |
| 475 // -- lr : return address |
| 476 // -- sp[...]: constructor arguments |
| 477 // ----------------------------------- |
| 478 ASM_LOCATION("Builtins::Generate_CommonArrayConstructCode"); |
| 479 |
| 480 if (FLAG_debug_code) { |
| 481 Register constructor = x1; |
| 482 // The array construct code is only set for the builtin and internal |
| 483 // Array functions which always have a map. |
| 484 // Initial map for the builtin Array function should be a map. |
| 485 __ Ldr(x3, FieldMemOperand(constructor, |
| 486 JSFunction::kPrototypeOrInitialMapOffset)); |
| 487 __ Tst(x3, kSmiTagMask); |
| 488 __ Assert(ne, "Unexpected initial map for Array function"); |
| 489 __ CompareObjectType(x3, x10, x11, MAP_TYPE); |
| 490 __ Assert(eq, "Unexpected initial map for Array function"); |
| 491 } |
| 492 Label generic_constructor; |
| 493 // Run the native code for the Array function called as a constructor. |
| 494 ArrayNativeCode(masm, &generic_constructor); |
| 495 |
| 496 // Jump to the generic construct code in case the specialized code cannot |
| 497 // handle the construction. |
| 498 __ Bind(&generic_constructor); |
| 499 Handle<Code> generic_construct_stub = |
| 500 masm->isolate()->builtins()->JSConstructStubGeneric(); |
| 501 __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); |
| 502 } |
| 503 |
| 504 |
| 505 void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { |
| 506 // ----------- S t a t e ------------- |
| 507 // -- x0 : number of arguments |
| 508 // -- x1 : constructor function |
| 509 // -- lr : return address |
| 510 // -- sp[(argc - n - 1) * 8] : arg[n] (zero based) |
| 511 // -- sp[argc * 8] : receiver |
| 512 // ----------------------------------- |
| 513 ASM_LOCATION("Builtins::Generate_StringConstructCode"); |
| 514 Counters* counters = masm->isolate()->counters(); |
| 515 __ IncrementCounter(counters->string_ctor_calls(), 1, x10, x11); |
| 516 |
| 517 Register argc = x0; |
| 518 Register function = x1; |
| 519 if (FLAG_debug_code) { |
| 520 __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, x10); |
| 521 __ Cmp(function, x10); |
| 522 __ Assert(eq, "Unexpected String function"); |
| 523 } |
| 524 |
| 525 // Load the first arguments in x0 and get rid of the rest. |
| 526 Label no_arguments; |
| 527 __ Cbz(argc, &no_arguments); |
| 528 // First args = sp[(argc - 1) * 8]. |
| 529 __ Sub(argc, argc, 1); |
| 530 __ Claim(argc, kXRegSizeInBytes); |
| 531 // jssp now point to args[0], load and drop args[0] + receiver. |
| 532 // TODO(jbramley): Consider adding ClaimAndPoke. |
| 533 __ Ldr(argc, MemOperand(jssp, 2 * kPointerSize, PostIndex)); |
| 534 |
| 535 Register argument = x2; |
| 536 Label not_cached, argument_is_string; |
| 537 NumberToStringStub::GenerateLookupNumberStringCache( |
| 538 masm, |
| 539 argc, // Input. |
| 540 argument, // Result. |
| 541 x10, // Scratch. |
| 542 x11, // Scratch. |
| 543 x12, // Scratch. |
| 544 NumberToStringStub::OBJECT_IS_NOT_SMI, // Is it a Smi? |
| 545 ¬_cached); |
| 546 __ IncrementCounter(counters->string_ctor_cached_number(), 1, x10, x11); |
| 547 __ Bind(&argument_is_string); |
| 548 |
| 549 // ----------- S t a t e ------------- |
| 550 // -- x2 : argument converted to string |
| 551 // -- x1 : constructor function |
| 552 // -- lr : return address |
| 553 // ----------------------------------- |
| 554 |
| 555 Label gc_required; |
| 556 Register new_obj = x0; |
| 557 __ Allocate(JSValue::kSize, new_obj, x10, x11, &gc_required, TAG_OBJECT); |
| 558 |
| 559 // Initialize the String object. |
| 560 Register map = x3; |
| 561 __ LoadGlobalFunctionInitialMap(function, map, x10); |
| 562 if (FLAG_debug_code) { |
| 563 __ Ldrb(x4, FieldMemOperand(map, Map::kInstanceSizeOffset)); |
| 564 __ Cmp(x4, JSValue::kSize >> kPointerSizeLog2); |
| 565 __ Assert(eq, "Unexpected string wrapper instance size"); |
| 566 __ Ldrb(x4, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset)); |
| 567 __ Cmp(x4, 0); |
| 568 __ Assert(eq, "Unexpected unused properties of string wrapper"); |
| 569 } |
| 570 __ Str(map, FieldMemOperand(new_obj, HeapObject::kMapOffset)); |
| 571 |
| 572 Register empty = x3; |
| 573 __ LoadRoot(empty, Heap::kEmptyFixedArrayRootIndex); |
| 574 __ Str(empty, FieldMemOperand(new_obj, JSObject::kPropertiesOffset)); |
| 575 __ Str(empty, FieldMemOperand(new_obj, JSObject::kElementsOffset)); |
| 576 |
| 577 __ Str(argument, FieldMemOperand(new_obj, JSValue::kValueOffset)); |
| 578 |
| 579 // Ensure the object is fully initialized. |
| 580 STATIC_ASSERT(JSValue::kSize == (4 * kPointerSize)); |
| 581 |
| 582 __ Ret(); |
| 583 |
| 584 // The argument was not found in the number to string cache. Check |
| 585 // if it's a string already before calling the conversion builtin. |
| 586 Label convert_argument; |
| 587 __ Bind(¬_cached); |
| 588 __ JumpIfSmi(argc, &convert_argument); |
| 589 |
| 590 // Is it a String? |
| 591 __ Ldr(x10, FieldMemOperand(x0, HeapObject::kMapOffset)); |
| 592 __ Ldrb(x11, FieldMemOperand(x10, Map::kInstanceTypeOffset)); |
| 593 __ Tbnz(x11, MaskToBit(kIsNotStringMask), &convert_argument); |
| 594 __ Mov(argument, argc); |
| 595 __ IncrementCounter(counters->string_ctor_string_value(), 1, x10, x11); |
| 596 __ B(&argument_is_string); |
| 597 |
| 598 // Invoke the conversion builtin and put the result into x2. |
| 599 __ Bind(&convert_argument); |
| 600 __ Push(function); // Preserve the function. |
| 601 __ IncrementCounter(counters->string_ctor_conversions(), 1, x10, x11); |
| 602 { |
| 603 FrameScope scope(masm, StackFrame::INTERNAL); |
| 604 __ Push(argc); |
| 605 __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION); |
| 606 } |
| 607 __ Pop(function); |
| 608 __ Mov(argument, x0); |
| 609 __ B(&argument_is_string); |
| 610 |
| 611 // Load the empty string into x2, remove the receiver from the |
| 612 // stack, and jump back to the case where the argument is a string. |
| 613 __ Bind(&no_arguments); |
| 614 __ LoadRoot(argument, Heap::kempty_stringRootIndex); |
| 615 __ Drop(1); |
| 616 __ B(&argument_is_string); |
| 617 |
| 618 // At this point the argument is already a string. Call runtime to create a |
| 619 // string wrapper. |
| 620 __ Bind(&gc_required); |
| 621 __ IncrementCounter(counters->string_ctor_gc_required(), 1, x10, x11); |
| 622 { |
| 623 FrameScope scope(masm, StackFrame::INTERNAL); |
| 624 __ Push(argument); |
| 625 __ CallRuntime(Runtime::kNewStringWrapper, 1); |
| 626 } |
| 627 __ Ret(); |
| 628 } |
| 629 |
| 630 |
| 631 static void GenerateTailCallToSharedCode(MacroAssembler* masm) { |
| 632 __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset)); |
| 633 __ Ldr(x2, FieldMemOperand(x2, SharedFunctionInfo::kCodeOffset)); |
| 634 __ Add(x2, x2, Code::kHeaderSize - kHeapObjectTag); |
| 635 __ Br(x2); |
| 636 } |
| 637 |
| 638 |
| 639 void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) { |
| 640 GenerateTailCallToSharedCode(masm); |
| 641 } |
| 642 |
| 643 |
| 644 void Builtins::Generate_InstallRecompiledCode(MacroAssembler* masm) { |
| 645 // Enter an internal frame. |
| 646 { |
| 647 FrameScope scope(masm, StackFrame::INTERNAL); |
| 648 |
| 649 // Preserve the function and the call kind information. |
| 650 __ Push(x1, x5); |
| 651 |
| 652 // Push the function on the stack as the argument to the runtime function. |
| 653 __ Push(x1); |
| 654 __ CallRuntime(Runtime::kInstallRecompiledCode, 1); |
| 655 // Calculate the entry point. |
| 656 __ Add(x2, x0, Code::kHeaderSize - kHeapObjectTag); |
| 657 |
| 658 // Restore call kind information and saved function. |
| 659 __ Pop(x5, x1); |
| 660 |
| 661 // Tear down internal frame. |
| 662 } |
| 663 |
| 664 // Do a tail-call of the compiled function. |
| 665 __ Jump(x2); |
| 666 } |
| 667 |
| 668 |
| 669 void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) { |
| 670 { |
| 671 FrameScope scope(masm, StackFrame::INTERNAL); |
| 672 |
| 673 // Push a copy of the function and call kind information on to the stack. |
| 674 __ Push(x1, x5); |
| 675 |
| 676 // Pointer to the function is also the parameter to the runtime call. |
| 677 __ Push(x1); |
| 678 __ CallRuntime(Runtime::kParallelRecompile, 1); |
| 679 |
| 680 // Restore call kind information and receiver. |
| 681 __ Pop(x5, x1); |
| 682 |
| 683 // Tear down internal frame. |
| 684 } |
| 685 GenerateTailCallToSharedCode(masm); |
| 686 } |
| 687 |
| 688 |
| 689 static void Generate_JSConstructStubHelper(MacroAssembler* masm, |
| 690 bool is_api_function, |
| 691 bool count_constructions) { |
| 692 // ----------- S t a t e ------------- |
| 693 // -- x0 : number of arguments |
| 694 // -- x1 : constructor function |
| 695 // -- lr : return address |
| 696 // -- sp[...]: constructor arguments |
| 697 // ----------------------------------- |
| 698 |
| 699 ASM_LOCATION("Builtins::Generate_JSConstructStubHelper"); |
| 700 // Should never count constructions for api objects. |
| 701 ASSERT(!is_api_function || !count_constructions); |
| 702 |
| 703 Isolate* isolate = masm->isolate(); |
| 704 |
| 705 // Enter a construct frame. |
| 706 { |
| 707 FrameScope scope(masm, StackFrame::CONSTRUCT); |
| 708 |
| 709 // Preserve the two incoming parameters on the stack. |
| 710 Register argc = x0; |
| 711 Register constructor = x1; |
| 712 // x1: constructor function |
| 713 __ SmiTag(argc); |
| 714 __ Push(argc, constructor); |
| 715 // sp[0] : Constructor function. |
| 716 // sp[1]: number of arguments (smi-tagged) |
| 717 |
| 718 // Try to allocate the object without transitioning into C code. If any of |
| 719 // the preconditions is not met, the code bails out to the runtime call. |
| 720 Label rt_call, allocated; |
| 721 if (FLAG_inline_new) { |
| 722 Label undo_allocation; |
| 723 #if ENABLE_DEBUGGER_SUPPORT |
| 724 ExternalReference debug_step_in_fp = |
| 725 ExternalReference::debug_step_in_fp_address(isolate); |
| 726 __ Mov(x2, Operand(debug_step_in_fp)); |
| 727 __ Ldr(x2, MemOperand(x2)); |
| 728 __ Cbnz(x2, &rt_call); |
| 729 #endif |
| 730 // Load the initial map and verify that it is in fact a map. |
| 731 Register init_map = x2; |
| 732 __ Ldr(init_map, |
| 733 FieldMemOperand(constructor, |
| 734 JSFunction::kPrototypeOrInitialMapOffset)); |
| 735 __ JumpIfSmi(init_map, &rt_call); |
| 736 __ JumpIfNotObjectType(init_map, x10, x11, MAP_TYPE, &rt_call); |
| 737 |
| 738 // Check that the constructor is not constructing a JSFunction (see |
| 739 // comments in Runtime_NewObject in runtime.cc). In which case the initial |
| 740 // map's instance type would be JS_FUNCTION_TYPE. |
| 741 __ CompareInstanceType(init_map, x10, JS_FUNCTION_TYPE); |
| 742 __ B(eq, &rt_call); |
| 743 |
| 744 if (count_constructions) { |
| 745 Label allocate; |
| 746 // Decrease generous allocation count. |
| 747 __ Ldr(x3, FieldMemOperand(constructor, |
| 748 JSFunction::kSharedFunctionInfoOffset)); |
| 749 MemOperand constructor_count = |
| 750 FieldMemOperand(x3, SharedFunctionInfo::kConstructionCountOffset); |
| 751 __ Ldrb(x4, constructor_count); |
| 752 __ Subs(x4, x4, 1); |
| 753 __ Strb(x4, constructor_count); |
| 754 __ B(ne, &allocate); |
| 755 |
| 756 // Push the constructor and map to the stack, and the constructor again |
| 757 // as argument to the runtime call. |
| 758 __ Push(constructor, init_map, constructor); |
| 759 // The call will replace the stub, so the countdown is only done once. |
| 760 __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); |
| 761 __ Pop(init_map, constructor); |
| 762 __ Bind(&allocate); |
| 763 } |
| 764 |
| 765 // Now allocate the JSObject on the heap. |
| 766 Register obj_size = x3; |
| 767 Register new_obj = x4; |
| 768 __ Ldrb(obj_size, FieldMemOperand(init_map, Map::kInstanceSizeOffset)); |
| 769 __ Allocate(obj_size, new_obj, x10, x11, &rt_call, SIZE_IN_WORDS); |
| 770 |
| 771 // Allocated the JSObject, now initialize the fields. Map is set to |
| 772 // initial map and properties and elements are set to empty fixed array. |
| 773 // NB. the object pointer is not tagged, so MemOperand is used. |
| 774 Register empty = x5; |
| 775 __ LoadRoot(empty, Heap::kEmptyFixedArrayRootIndex); |
| 776 __ Str(init_map, MemOperand(new_obj, JSObject::kMapOffset)); |
| 777 __ Str(empty, MemOperand(new_obj, JSObject::kPropertiesOffset)); |
| 778 __ Str(empty, MemOperand(new_obj, JSObject::kElementsOffset)); |
| 779 |
| 780 Register first_prop = x5; |
| 781 __ Add(first_prop, new_obj, JSObject::kHeaderSize); |
| 782 |
| 783 // Fill all of the in-object properties with the appropriate filler. |
| 784 Register obj_end = x6; |
| 785 __ Add(obj_end, new_obj, Operand(obj_size, LSL, kPointerSizeLog2)); |
| 786 Register undef = x7; |
| 787 __ LoadRoot(undef, Heap::kUndefinedValueRootIndex); |
| 788 |
| 789 // Obtain number of pre-allocated property fields and in-object |
| 790 // properties. |
| 791 Register prealloc_fields = x10; |
| 792 Register inobject_props = x11; |
| 793 Register inst_sizes = x11; |
| 794 __ Ldr(inst_sizes, FieldMemOperand(init_map, Map::kInstanceSizesOffset)); |
| 795 __ Ubfx(prealloc_fields, inst_sizes, |
| 796 Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte, |
| 797 kBitsPerByte); |
| 798 __ Ubfx(inobject_props, inst_sizes, |
| 799 Map::kInObjectPropertiesByte * kBitsPerByte, kBitsPerByte); |
| 800 |
| 801 if (count_constructions) { |
| 802 // Register first_non_prealloc is the offset of the first field after |
| 803 // pre-allocated fields. |
| 804 Register first_non_prealloc = x12; |
| 805 __ Add(first_non_prealloc, first_prop, |
| 806 Operand(prealloc_fields, LSL, kPointerSizeLog2)); |
| 807 |
| 808 if (FLAG_debug_code) { |
| 809 __ Cmp(first_non_prealloc, obj_end); |
| 810 __ Assert(le, "Unexpected number of pre-allocated property fields"); |
| 811 } |
| 812 __ InitializeFieldsWithFiller(first_prop, first_non_prealloc, undef); |
| 813 // To allow for truncation. |
| 814 __ LoadRoot(x12, Heap::kOnePointerFillerMapRootIndex); |
| 815 __ InitializeFieldsWithFiller(first_prop, obj_end, x12); |
| 816 } else { |
| 817 __ InitializeFieldsWithFiller(first_prop, obj_end, undef); |
| 818 } |
| 819 |
| 820 // Add the object tag to make the JSObject real, so that we can continue |
| 821 // and jump into the continuation code at any time from now on. Any |
| 822 // failures need to undo the allocation, so that the heap is in a |
| 823 // consistent state and verifiable. |
| 824 __ Add(new_obj, new_obj, kHeapObjectTag); |
| 825 |
| 826 // Check if a non-empty properties array is needed. Continue with |
| 827 // allocated object if not, or fall through to runtime call if it is. |
| 828 Register element_count = x3; |
| 829 __ Ldrb(x3, FieldMemOperand(init_map, Map::kUnusedPropertyFieldsOffset)); |
| 830 // The field instance sizes contains both pre-allocated property fields |
| 831 // and in-object properties. |
| 832 __ Add(x3, x3, prealloc_fields); |
| 833 __ Subs(element_count, x3, inobject_props); |
| 834 |
| 835 // Done if no extra properties are to be allocated. |
| 836 __ B(eq, &allocated); |
| 837 __ Assert(pl, "Property allocation count failed"); |
| 838 |
| 839 // Scale the number of elements by pointer size and add the header for |
| 840 // FixedArrays to the start of the next object calculation from above. |
| 841 Register new_array = x5; |
| 842 Register array_size = x6; |
| 843 __ Add(array_size, element_count, FixedArray::kHeaderSize / kPointerSize); |
| 844 __ Allocate(array_size, new_array, x11, x12, &undo_allocation, |
| 845 static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | |
| 846 SIZE_IN_WORDS)); |
| 847 |
| 848 Register array_map = x10; |
| 849 __ LoadRoot(array_map, Heap::kFixedArrayMapRootIndex); |
| 850 __ Str(array_map, MemOperand(new_array, FixedArray::kMapOffset)); |
| 851 __ SmiTag(x0, element_count); |
| 852 __ Str(x0, MemOperand(new_array, FixedArray::kLengthOffset)); |
| 853 |
| 854 // Initialize the fields to undefined. |
| 855 Register elements = x10; |
| 856 Register elements_end = x11; |
| 857 __ Add(elements, new_array, FixedArray::kHeaderSize); |
| 858 __ Add(elements_end, elements, |
| 859 Operand(element_count, LSL, kPointerSizeLog2)); |
| 860 __ InitializeFieldsWithFiller(elements, elements_end, undef); |
| 861 |
| 862 // Store the initialized FixedArray into the properties field of the |
| 863 // JSObject. |
| 864 __ Add(new_array, new_array, kHeapObjectTag); |
| 865 __ Str(new_array, FieldMemOperand(new_obj, JSObject::kPropertiesOffset)); |
| 866 |
| 867 // Continue with JSObject being successfully allocated. |
| 868 __ B(&allocated); |
| 869 |
| 870 // Undo the setting of the new top so that the heap is verifiable. For |
| 871 // example, the map's unused properties potentially do not match the |
| 872 // allocated objects unused properties. |
| 873 __ Bind(&undo_allocation); |
| 874 __ UndoAllocationInNewSpace(new_obj, x14); |
| 875 } |
| 876 |
| 877 // Allocate the new receiver object using the runtime call. |
| 878 __ Bind(&rt_call); |
| 879 __ Push(constructor); // Argument for Runtime_NewObject. |
| 880 __ CallRuntime(Runtime::kNewObject, 1); |
| 881 __ Mov(x4, x0); |
| 882 |
| 883 // Receiver for constructor call allocated. |
| 884 // x4: JSObject |
| 885 __ Bind(&allocated); |
| 886 __ Push(x4, x4); |
| 887 |
| 888 // Reload the number of arguments from the stack. |
| 889 // Set it up in x0 for the function call below. |
| 890 // jssp[0]: receiver |
| 891 // jssp[1]: receiver |
| 892 // jssp[2]: constructor function |
| 893 // jssp[3]: number of arguments (smi-tagged) |
| 894 __ Peek(constructor, 2 * kXRegSizeInBytes); // Load constructor. |
| 895 __ Peek(argc, 3 * kXRegSizeInBytes); // Load number of arguments. |
| 896 __ SmiUntag(argc); |
| 897 |
| 898 // Set up pointer to last argument. |
| 899 __ Add(x2, fp, StandardFrameConstants::kCallerSPOffset); |
| 900 |
| 901 // Copy arguments and receiver to the expression stack. |
| 902 // Copy 2 values every loop to use ldp/stp. |
| 903 // x0: number of arguments |
| 904 // x1: constructor function |
| 905 // x2: address of last argument (caller sp) |
| 906 // jssp[0]: receiver |
| 907 // jssp[1]: receiver |
| 908 // jssp[2]: constructor function |
| 909 // jssp[3]: number of arguments (smi-tagged) |
| 910 // Compute the start address of the copy in x3. |
| 911 __ Add(x3, x2, Operand(argc, LSL, kPointerSizeLog2)); |
| 912 Label loop, entry, done_copying_arguments; |
| 913 __ B(&entry); |
| 914 __ Bind(&loop); |
| 915 __ Ldp(x10, x11, MemOperand(x3, -2 * kPointerSize, PreIndex)); |
| 916 __ Push(x11, x10); |
| 917 __ Bind(&entry); |
| 918 __ Cmp(x3, x2); |
| 919 __ B(gt, &loop); |
| 920 // Because we copied values 2 by 2 we may have copied one extra value. |
| 921 // Drop it if that is the case. |
| 922 __ B(eq, &done_copying_arguments); |
| 923 __ Drop(1); |
| 924 __ Bind(&done_copying_arguments); |
| 925 |
| 926 // Call the function. |
| 927 // x0: number of arguments |
| 928 // x1: constructor function |
| 929 if (is_api_function) { |
| 930 __ Ldr(cp, FieldMemOperand(constructor, JSFunction::kContextOffset)); |
| 931 Handle<Code> code = |
| 932 masm->isolate()->builtins()->HandleApiCallConstruct(); |
| 933 ParameterCount expected(0); |
| 934 __ InvokeCode(code, expected, expected, |
| 935 RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD); |
| 936 } else { |
| 937 ParameterCount actual(argc); |
| 938 __ InvokeFunction(constructor, actual, CALL_FUNCTION, |
| 939 NullCallWrapper(), CALL_AS_METHOD); |
| 940 } |
| 941 |
| 942 // Store offset of return address for deoptimizer. |
| 943 if (!is_api_function && !count_constructions) { |
| 944 masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset()); |
| 945 } |
| 946 |
| 947 // Restore the context from the frame. |
| 948 // x0: result |
| 949 // jssp[0]: receiver |
| 950 // jssp[1]: constructor function |
| 951 // jssp[2]: number of arguments (smi-tagged) |
| 952 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 953 |
| 954 // If the result is an object (in the ECMA sense), we should get rid |
| 955 // of the receiver and use the result; see ECMA-262 section 13.2.2-7 |
| 956 // on page 74. |
| 957 Label use_receiver, exit; |
| 958 |
| 959 // If the result is a smi, it is *not* an object in the ECMA sense. |
| 960 // x0: result |
| 961 // jssp[0]: receiver (newly allocated object) |
| 962 // jssp[1]: constructor function |
| 963 // jssp[2]: number of arguments (smi-tagged) |
| 964 __ JumpIfSmi(x0, &use_receiver); |
| 965 |
| 966 // If the type of the result (stored in its map) is less than |
| 967 // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense. |
| 968 __ JumpIfObjectType(x0, x1, x3, FIRST_SPEC_OBJECT_TYPE, &exit, ge); |
| 969 |
| 970 // Throw away the result of the constructor invocation and use the |
| 971 // on-stack receiver as the result. |
| 972 __ Bind(&use_receiver); |
| 973 __ Peek(x0, 0); |
| 974 |
| 975 // Remove the receiver from the stack, remove caller arguments, and |
| 976 // return. |
| 977 __ Bind(&exit); |
| 978 // x0: result |
| 979 // jssp[0]: receiver (newly allocated object) |
| 980 // jssp[1]: constructor function |
| 981 // jssp[2]: number of arguments (smi-tagged) |
| 982 __ Peek(x1, 2 * kXRegSizeInBytes); |
| 983 |
| 984 // Leave construct frame. |
| 985 } |
| 986 |
| 987 __ DropBySMI(x1); |
| 988 __ Drop(1); |
| 989 __ IncrementCounter(isolate->counters()->constructed_objects(), 1, x1, x2); |
| 990 __ Ret(); |
| 991 } |
| 992 |
| 993 |
| 994 void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) { |
| 995 Generate_JSConstructStubHelper(masm, false, true); |
| 996 } |
| 997 |
| 998 |
| 999 void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { |
| 1000 Generate_JSConstructStubHelper(masm, false, false); |
| 1001 } |
| 1002 |
| 1003 |
| 1004 void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) { |
| 1005 Generate_JSConstructStubHelper(masm, true, false); |
| 1006 } |
| 1007 |
| 1008 |
| 1009 // Input: |
| 1010 // x0: code entry. |
| 1011 // x1: function. |
| 1012 // x2: receiver. |
| 1013 // x3: argc. |
| 1014 // x4: argv. |
| 1015 // Output: |
| 1016 // x0: result. |
| 1017 static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, |
| 1018 bool is_construct) { |
| 1019 // Called from JSEntryStub::GenerateBody(). |
| 1020 Register function = x1; |
| 1021 Register receiver = x2; |
| 1022 Register argc = x3; |
| 1023 Register argv = x4; |
| 1024 |
| 1025 // Clear the context before we push it when entering the internal frame. |
| 1026 __ Mov(cp, 0); |
| 1027 |
| 1028 { |
| 1029 // Enter an internal frame. |
| 1030 FrameScope scope(masm, StackFrame::INTERNAL); |
| 1031 |
| 1032 // Set up the context from the function argument. |
| 1033 __ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset)); |
| 1034 |
| 1035 __ InitializeRootRegister(); |
| 1036 |
| 1037 // Push the function and the receiver onto the stack. |
| 1038 __ Push(function, receiver); |
| 1039 |
| 1040 // Copy arguments to the stack in a loop, in reverse order. |
| 1041 // x3: argc. |
| 1042 // x4: argv. |
| 1043 Label loop, entry; |
| 1044 // Compute the copy end address. |
| 1045 __ Add(x10, argv, Operand(argc, LSL, kPointerSizeLog2)); |
| 1046 |
| 1047 // TODO(all): This can potentially be optimized with ldp/stp to speed up |
| 1048 // arguments passing from C++ to JS. |
| 1049 __ B(&entry); |
| 1050 __ Bind(&loop); |
| 1051 __ Ldr(x11, MemOperand(argv, kPointerSize, PostIndex)); |
| 1052 __ Ldr(x12, MemOperand(x11)); // Dereference the handle. |
| 1053 __ Push(x12); // Push the argument. |
| 1054 __ Bind(&entry); |
| 1055 __ Cmp(x10, argv); |
| 1056 __ B(ne, &loop); |
| 1057 |
| 1058 // Initialize all JavaScript callee-saved registers, since they will be seen |
| 1059 // by the garbage collector as part of handlers. |
| 1060 // The original values have been saved in JSEntryStub::GenerateBody(). |
| 1061 __ LoadRoot(x19, Heap::kUndefinedValueRootIndex); |
| 1062 __ Mov(x20, x19); |
| 1063 __ Mov(x21, x19); |
| 1064 __ Mov(x22, x19); |
| 1065 __ Mov(x23, x19); |
| 1066 __ Mov(x24, x19); |
| 1067 __ Mov(x25, x19); |
| 1068 // Don't initialize the reserved registers. |
| 1069 // x26 : root register (root). |
| 1070 // x27 : context pointer (cp). |
| 1071 // x28 : JS stack pointer (jssp). |
| 1072 // x29 : frame pointer (fp). |
| 1073 |
| 1074 // TODO(alexandre): Revisit the MAsm function invocation mechanisms. |
| 1075 // Currently there is a mix of statically and dynamically allocated |
| 1076 // registers. |
| 1077 __ Mov(x0, argc); |
| 1078 if (is_construct) { |
| 1079 // No type feedback cell is available. |
| 1080 Handle<Object> undefined_sentinel( |
| 1081 masm->isolate()->heap()->undefined_value(), masm->isolate()); |
| 1082 __ Mov(x2, Operand(undefined_sentinel)); |
| 1083 |
| 1084 CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); |
| 1085 __ CallStub(&stub); |
| 1086 } else { |
| 1087 ParameterCount actual(x0); |
| 1088 __ InvokeFunction(function, actual, CALL_FUNCTION, |
| 1089 NullCallWrapper(), CALL_AS_METHOD); |
| 1090 } |
| 1091 // Exit the JS internal frame and remove the parameters (except function), |
| 1092 // and return. |
| 1093 } |
| 1094 |
| 1095 // Result is in x0. Return. |
| 1096 __ Ret(); |
| 1097 } |
| 1098 |
| 1099 |
| 1100 void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) { |
| 1101 Generate_JSEntryTrampolineHelper(masm, false); |
| 1102 } |
| 1103 |
| 1104 |
| 1105 void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { |
| 1106 Generate_JSEntryTrampolineHelper(masm, true); |
| 1107 } |
| 1108 |
| 1109 |
| 1110 static void GenerateLazyCompile(MacroAssembler* masm, Runtime::FunctionId fid) { |
| 1111 ASSERT((fid == Runtime::kLazyCompile) || (fid == Runtime::kLazyRecompile)); |
| 1112 // x1: function. |
| 1113 // x5: call kind (set by SetCallKind()). |
| 1114 Register function = x1; |
| 1115 Register call_kind = x5; |
| 1116 |
| 1117 // Enter an internal frame. |
| 1118 { |
| 1119 FrameScope scope(masm, StackFrame::INTERNAL); |
| 1120 |
| 1121 // Preserve the function and push the call kind information. |
| 1122 __ Push(function, call_kind); |
| 1123 |
| 1124 // Push the function on the stack as the argument to the runtime function. |
| 1125 __ Push(function); |
| 1126 |
| 1127 __ CallRuntime(fid, 1); |
| 1128 // Calculate the entry point. |
| 1129 __ Add(x2, x0, Code::kHeaderSize - kHeapObjectTag); |
| 1130 |
| 1131 // Restore call kind information and the saved function. |
| 1132 __ Pop(call_kind, function); |
| 1133 |
| 1134 // Tear down internal frame. |
| 1135 } |
| 1136 |
| 1137 // Do a tail-call of the compiled function. |
| 1138 __ Jump(x2); |
| 1139 } |
| 1140 |
| 1141 |
| 1142 void Builtins::Generate_LazyCompile(MacroAssembler* masm) { |
| 1143 GenerateLazyCompile(masm, Runtime::kLazyCompile); |
| 1144 } |
| 1145 |
| 1146 |
| 1147 void Builtins::Generate_LazyRecompile(MacroAssembler* masm) { |
| 1148 GenerateLazyCompile(masm, Runtime::kLazyRecompile); |
| 1149 } |
| 1150 |
| 1151 |
| 1152 static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) { |
| 1153 // For now, we are relying on the fact that make_code_young doesn't do any |
| 1154 // garbage collection which allows us to save/restore the registers without |
| 1155 // worrying about which of them contain pointers. We also don't build an |
| 1156 // internal frame to make the code fast, since we shouldn't have to do stack |
| 1157 // crawls in MakeCodeYoung. This seems a bit fragile. |
| 1158 |
| 1159 // The following caller-saved registers must be saved and restored when |
| 1160 // calling through to the runtime: |
| 1161 // x0 - The address from which to resume execution. |
| 1162 // x1 - The JSFunction object. |
| 1163 // lr - The return address for the JSFunction itself. It has not yet been |
| 1164 // preserved on the stack because the frame setup code was replaced |
| 1165 // with a call to this stub, to handle code ageing. |
| 1166 { |
| 1167 FrameScope scope(masm, StackFrame::MANUAL); |
| 1168 __ Push(x0, x1, fp, lr); |
| 1169 __ CallCFunction( |
| 1170 ExternalReference::get_make_code_young_function(masm->isolate()), 1); |
| 1171 __ Pop(lr, fp, x1, x0); |
| 1172 } |
| 1173 |
| 1174 // The calling function has been made young again, so return to execute the |
| 1175 // real frame set-up code. |
| 1176 __ Br(x0); |
| 1177 } |
| 1178 |
| 1179 #define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \ |
| 1180 void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \ |
| 1181 MacroAssembler* masm) { \ |
| 1182 GenerateMakeCodeYoungAgainCommon(masm); \ |
| 1183 } \ |
| 1184 void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \ |
| 1185 MacroAssembler* masm) { \ |
| 1186 GenerateMakeCodeYoungAgainCommon(masm); \ |
| 1187 } |
| 1188 CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR) |
| 1189 #undef DEFINE_CODE_AGE_BUILTIN_GENERATOR |
| 1190 |
| 1191 |
| 1192 void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { |
| 1193 { |
| 1194 FrameScope scope(masm, StackFrame::INTERNAL); |
| 1195 |
| 1196 // Preserve registers across notification, this is important for compiled |
| 1197 // stubs that tail call the runtime on deopts passing their parameters in |
| 1198 // registers. |
| 1199 // TODO(jbramley): Is it correct (and appropriate) to use safepoint |
| 1200 // registers here? According to the comment above, we should only need to |
| 1201 // preserve the registers with parameters. |
| 1202 __ PushXRegList(kSafepointSavedRegisters); |
| 1203 // Pass the function and deoptimization type to the runtime system. |
| 1204 __ CallRuntime(Runtime::kNotifyStubFailure, 0); |
| 1205 __ PopXRegList(kSafepointSavedRegisters); |
| 1206 } |
| 1207 |
| 1208 // Ignore state (pushed by Deoptimizer::EntryGenerator::Generate). |
| 1209 __ Drop(1); |
| 1210 |
| 1211 // Jump to the miss handler. Deoptimizer::EntryGenerator::Generate loads this |
| 1212 // into lr before it jumps here. |
| 1213 __ Br(lr); |
| 1214 } |
| 1215 |
| 1216 |
| 1217 static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, |
| 1218 Deoptimizer::BailoutType type) { |
| 1219 { |
| 1220 FrameScope scope(masm, StackFrame::INTERNAL); |
| 1221 // Pass the deoptimization type to the runtime system. |
| 1222 __ Mov(x0, Operand(Smi::FromInt(static_cast<int>(type)))); |
| 1223 __ Push(x0); |
| 1224 __ CallRuntime(Runtime::kNotifyDeoptimized, 1); |
| 1225 } |
| 1226 |
| 1227 // Get the full codegen state from the stack and untag it. |
| 1228 Register state = x6; |
| 1229 __ Peek(state, 0); |
| 1230 __ SmiUntag(state); |
| 1231 |
| 1232 // Switch on the state. |
| 1233 Label with_tos_register, unknown_state; |
| 1234 __ CompareAndBranch( |
| 1235 state, FullCodeGenerator::NO_REGISTERS, ne, &with_tos_register); |
| 1236 __ Drop(1); // Remove state. |
| 1237 __ Ret(); |
| 1238 |
| 1239 __ Bind(&with_tos_register); |
| 1240 // Reload TOS register. |
| 1241 __ Peek(x0, kPointerSize); |
| 1242 __ CompareAndBranch(state, FullCodeGenerator::TOS_REG, ne, &unknown_state); |
| 1243 __ Drop(2); // Remove state and TOS. |
| 1244 __ Ret(); |
| 1245 |
| 1246 __ Bind(&unknown_state); |
| 1247 __ Abort("Invalid fullcodegen state."); |
| 1248 } |
| 1249 |
| 1250 |
| 1251 void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { |
| 1252 Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER); |
| 1253 } |
| 1254 |
| 1255 |
| 1256 void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) { |
| 1257 Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY); |
| 1258 } |
| 1259 |
| 1260 |
| 1261 void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) { |
| 1262 Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT); |
| 1263 } |
| 1264 |
| 1265 |
| 1266 void Builtins::Generate_NotifyOSR(MacroAssembler* masm) { |
| 1267 ASM_UNIMPLEMENTED_BREAK("Implement Generate_NotifyOSR"); |
| 1268 } |
| 1269 |
| 1270 |
| 1271 void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { |
| 1272 ASM_UNIMPLEMENTED_BREAK("Implement Generate_OnStackReplacement"); |
| 1273 } |
| 1274 |
| 1275 |
| 1276 void Builtins::Generate_FunctionCall(MacroAssembler* masm) { |
| 1277 Register receiver_type = x13; |
| 1278 |
| 1279 ASM_LOCATION("Builtins::Generate_FunctionCall"); |
| 1280 // TODO(all/rames): Optimize and use named registers. |
| 1281 // 1. Make sure we have at least one argument. |
| 1282 // x0: actual number of arguments |
| 1283 { Label done; |
| 1284 __ Cbnz(x0, &done); |
| 1285 __ LoadRoot(x10, Heap::kUndefinedValueRootIndex); |
| 1286 __ Push(x10); |
| 1287 __ Mov(x0, 1); |
| 1288 __ Bind(&done); |
| 1289 } |
| 1290 |
| 1291 // 2. Get the function to call (passed as receiver) from the stack, check |
| 1292 // if it is a function. |
| 1293 // x0: actual number of arguments |
| 1294 Label slow, non_function; |
| 1295 // TODO(jbramley): Consider giving Peek a unit_size parameter, like Claim and |
| 1296 // Drop. This usage pattern is very common. |
| 1297 __ Peek(x1, Operand(x0, LSL, kXRegSizeInBytesLog2)); |
| 1298 __ JumpIfSmi(x1, &non_function); |
| 1299 __ JumpIfNotObjectType(x1, x10, receiver_type, JS_FUNCTION_TYPE, &slow); |
| 1300 |
| 1301 // 3a. Patch the first argument if necessary when calling a function. |
| 1302 // x0: actual number of arguments |
| 1303 // x1: function |
| 1304 Label shift_arguments; |
| 1305 __ Mov(x4, 0); // Indicates a regular JS_FUNCTION. |
| 1306 { Label convert_to_object, use_global_receiver, patch_receiver; |
| 1307 // Change context eagerly in case we need the global receiver. |
| 1308 __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset)); |
| 1309 |
| 1310 // Do not transform the receiver for strict mode functions. |
| 1311 __ Ldr(x10, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset)); |
| 1312 __ Ldr(w11, FieldMemOperand(x10, SharedFunctionInfo::kCompilerHintsOffset)); |
| 1313 __ Tbnz(x11, SharedFunctionInfo::kStrictModeFunction, &shift_arguments); |
| 1314 |
| 1315 // TODO(all): Shoudld we insert space to avoid BTAC collisions? |
| 1316 // Do not transform the receiver for native (Compilerhints already in x3). |
| 1317 __ Tbnz(x11, SharedFunctionInfo::kNative, &shift_arguments); |
| 1318 |
| 1319 // Compute the receiver in non-strict mode. |
| 1320 __ Sub(x10, x0, 1); |
| 1321 __ Peek(x2, Operand(x10, LSL, kXRegSizeInBytesLog2)); |
| 1322 // x0: actual number of arguments |
| 1323 // x1: function |
| 1324 // x2: first argument |
| 1325 __ JumpIfSmi(x2, &convert_to_object); |
| 1326 |
| 1327 // TODO(all): We could potentially work to optimize loads of root values. |
| 1328 // TODO(all): If the indexes are successive we can use 'ldp'. |
| 1329 __ JumpIfRoot(x2, Heap::kUndefinedValueRootIndex, &use_global_receiver); |
| 1330 __ JumpIfRoot(x2, Heap::kNullValueRootIndex, &use_global_receiver); |
| 1331 |
| 1332 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); |
| 1333 __ JumpIfObjectType(x2, x10, x11, FIRST_SPEC_OBJECT_TYPE, &shift_arguments, |
| 1334 ge); |
| 1335 |
| 1336 __ Bind(&convert_to_object); |
| 1337 |
| 1338 { |
| 1339 // Enter an internal frame in order to preserve argument count. |
| 1340 FrameScope scope(masm, StackFrame::INTERNAL); |
| 1341 __ SmiTag(x0); |
| 1342 |
| 1343 __ Push(x0, x2); |
| 1344 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); |
| 1345 __ Mov(x2, x0); |
| 1346 |
| 1347 __ Pop(x0); |
| 1348 __ SmiUntag(x0); |
| 1349 |
| 1350 // Exit the internal frame. |
| 1351 } |
| 1352 |
| 1353 // Restore the function to x1, and the flag to x4. |
| 1354 __ Peek(x1, Operand(x0, LSL, kXRegSizeInBytesLog2)); |
| 1355 __ Mov(x4, 0); |
| 1356 __ B(&patch_receiver); |
| 1357 |
| 1358 // Use the global receiver object from the called function as the |
| 1359 // receiver. |
| 1360 __ Bind(&use_global_receiver); |
| 1361 const int kGlobalIndex = |
| 1362 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize; |
| 1363 __ Ldr(x10, FieldMemOperand(cp, kGlobalIndex)); |
| 1364 __ Ldr(x2, FieldMemOperand(x10, GlobalObject::kNativeContextOffset)); |
| 1365 __ Ldr(x10, FieldMemOperand(x2, kGlobalIndex)); |
| 1366 __ Ldr(x2, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset)); |
| 1367 |
| 1368 __ Bind(&patch_receiver); |
| 1369 __ Sub(x10, x0, 1); |
| 1370 __ Poke(x2, Operand(x10, LSL, kXRegSizeInBytesLog2)); |
| 1371 |
| 1372 __ B(&shift_arguments); |
| 1373 } |
| 1374 |
| 1375 // 3b. Check for function proxy. |
| 1376 __ Bind(&slow); |
| 1377 __ Mov(x4, 1); // Indicate function proxy. |
| 1378 __ Cmp(receiver_type, JS_FUNCTION_PROXY_TYPE); |
| 1379 __ B(eq, &shift_arguments); |
| 1380 __ Bind(&non_function); |
| 1381 __ Mov(x4, 2); // Indicate non-function. |
| 1382 |
| 1383 // 3c. Patch the first argument when calling a non-function. The |
| 1384 // CALL_NON_FUNCTION builtin expects the non-function callee as |
| 1385 // receiver, so overwrite the first argument which will ultimately |
| 1386 // become the receiver. |
| 1387 // x0: actual number of arguments |
| 1388 // x1: function |
| 1389 // x4: call type (0: JS function, 1: function proxy, 2: non-function) |
| 1390 __ Sub(x10, x0, 1); |
| 1391 __ Poke(x1, Operand(x10, LSL, kXRegSizeInBytesLog2)); |
| 1392 |
| 1393 // 4. Shift arguments and return address one slot down on the stack |
| 1394 // (overwriting the original receiver). Adjust argument count to make |
| 1395 // the original first argument the new receiver. |
| 1396 // x0: actual number of arguments |
| 1397 // x1: function |
| 1398 // x4: call type (0: JS function, 1: function proxy, 2: non-function) |
| 1399 __ Bind(&shift_arguments); |
| 1400 { Label loop; |
| 1401 // Calculate the copy start address (destination). Copy end address is jssp. |
| 1402 __ Add(x11, jssp, Operand(x0, LSL, kPointerSizeLog2)); |
| 1403 __ Sub(x10, x11, kPointerSize); |
| 1404 |
| 1405 // TODO(all): Optimize to copy values 2 by 2? |
| 1406 __ Bind(&loop); |
| 1407 __ Ldr(x12, MemOperand(x10, -kPointerSize, PostIndex)); |
| 1408 __ Str(x12, MemOperand(x11, -kPointerSize, PostIndex)); |
| 1409 __ Cmp(x10, jssp); |
| 1410 __ B(ge, &loop); |
| 1411 // Adjust the actual number of arguments and remove the top element |
| 1412 // (which is a copy of the last argument). |
| 1413 __ Sub(x0, x0, 1); |
| 1414 __ Drop(1); |
| 1415 } |
| 1416 |
| 1417 // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin, |
| 1418 // or a function proxy via CALL_FUNCTION_PROXY. |
| 1419 // x0: actual number of arguments |
| 1420 // x1: function |
| 1421 // x4: call type (0: JS function, 1: function proxy, 2: non-function) |
| 1422 { Label function, non_proxy; |
| 1423 __ Cbz(x4, &function); |
| 1424 ASM_UNIMPLEMENTED("Builtins::Generate_FunctionCall non-function"); |
| 1425 // Expected number of arguments is 0 for CALL_NON_FUNCTION. |
| 1426 __ Mov(x2, 0); |
| 1427 __ SetCallKind(x5, CALL_AS_METHOD); |
| 1428 __ Cmp(x4, 1); |
| 1429 __ B(ne, &non_proxy); |
| 1430 |
| 1431 __ Push(x1); // Re-add proxy object as additional argument. |
| 1432 __ Add(x0, x0, 1); |
| 1433 __ GetBuiltinEntry(x3, Builtins::CALL_FUNCTION_PROXY); |
| 1434 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), |
| 1435 RelocInfo::CODE_TARGET); |
| 1436 |
| 1437 __ Bind(&non_proxy); |
| 1438 __ GetBuiltinEntry(x3, Builtins::CALL_NON_FUNCTION); |
| 1439 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), |
| 1440 RelocInfo::CODE_TARGET); |
| 1441 __ Bind(&function); |
| 1442 } |
| 1443 |
| 1444 // 5b. Get the code to call from the function and check that the number of |
| 1445 // expected arguments matches what we're providing. If so, jump |
| 1446 // (tail-call) to the code in register edx without checking arguments. |
| 1447 // x0: actual number of arguments |
| 1448 // x1: function |
| 1449 __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset)); |
| 1450 __ Ldrsw(x2, |
| 1451 FieldMemOperand(x3, |
| 1452 SharedFunctionInfo::kFormalParameterCountOffset)); |
| 1453 __ Ldr(x3, FieldMemOperand(x1, JSFunction::kCodeEntryOffset)); |
| 1454 __ SetCallKind(x5, CALL_AS_METHOD); |
| 1455 Label dont_adapt_args; |
| 1456 __ Cmp(x2, x0); // Check formal and actual parameter counts. |
| 1457 __ B(eq, &dont_adapt_args); |
| 1458 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), |
| 1459 RelocInfo::CODE_TARGET); |
| 1460 |
| 1461 __ Bind(&dont_adapt_args); |
| 1462 ParameterCount expected(0); |
| 1463 __ InvokeCode(x3, expected, expected, JUMP_FUNCTION, |
| 1464 NullCallWrapper(), CALL_AS_METHOD); |
| 1465 } |
| 1466 |
| 1467 |
| 1468 void Builtins::Generate_FunctionApply(MacroAssembler* masm) { |
| 1469 ASM_LOCATION("Builtins::Generate_FunctionApply"); |
| 1470 const int kIndexOffset = -5 * kPointerSize; |
| 1471 const int kLimitOffset = -4 * kPointerSize; |
| 1472 const int kArgsOffset = 2 * kPointerSize; |
| 1473 const int kReceiverOffset = 3 * kPointerSize; |
| 1474 const int kFunctionOffset = 4 * kPointerSize; |
| 1475 |
| 1476 { |
| 1477 FrameScope frame_scope(masm, StackFrame::INTERNAL); |
| 1478 |
| 1479 Register args = x12; |
| 1480 Register receiver = x14; |
| 1481 Register function = x15; |
| 1482 |
| 1483 // Get the length of the arguments via a builtin call. |
| 1484 __ Ldr(function, MemOperand(fp, kFunctionOffset)); |
| 1485 __ Ldr(args, MemOperand(fp, kArgsOffset)); |
| 1486 __ Push(function, args); |
| 1487 __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); |
| 1488 Register argc = x0; |
| 1489 |
| 1490 // Check the stack for overflow. |
| 1491 // We are not trying to catch interruptions (e.g. debug break and |
| 1492 // preemption) here, so the "real stack limit" is checked. |
| 1493 Label enough_stack_space; |
| 1494 __ LoadRoot(x10, Heap::kRealStackLimitRootIndex); |
| 1495 __ Ldr(function, MemOperand(fp, kFunctionOffset)); |
| 1496 // Make x10 the space we have left. The stack might already be overflowed |
| 1497 // here which will cause x10 to become negative. |
| 1498 // TODO(jbramley): Check that the stack usage here is safe. |
| 1499 __ Sub(x10, jssp, x10); |
| 1500 // Check if the arguments will overflow the stack. |
| 1501 __ Cmp(x10, Operand(argc, LSR, kSmiShift - kPointerSizeLog2)); |
| 1502 __ B(gt, &enough_stack_space); |
| 1503 // There is not enough stack space, so use a builtin to throw an appropriate |
| 1504 // error. |
| 1505 __ Push(function, argc); |
| 1506 __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); |
| 1507 // We should never return from the APPLY_OVERFLOW builtin. |
| 1508 if (__ emit_debug_code()) { |
| 1509 __ Abort("Unreachable code."); |
| 1510 } |
| 1511 |
| 1512 __ Bind(&enough_stack_space); |
| 1513 // Push current limit and index. |
| 1514 __ Mov(x1, 0); // Initial index. |
| 1515 __ Push(argc, x1); |
| 1516 |
| 1517 Label push_receiver; |
| 1518 __ Ldr(receiver, MemOperand(fp, kReceiverOffset)); |
| 1519 |
| 1520 // Check that the function is a JS function. Otherwise it must be a proxy. |
| 1521 // When it is not the function proxy will be invoked later. |
| 1522 __ JumpIfNotObjectType(function, x10, x11, JS_FUNCTION_TYPE, |
| 1523 &push_receiver); |
| 1524 |
| 1525 // Change context eagerly to get the right global object if necessary. |
| 1526 __ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset)); |
| 1527 // Load the shared function info. |
| 1528 __ Ldr(x2, FieldMemOperand(function, |
| 1529 JSFunction::kSharedFunctionInfoOffset)); |
| 1530 |
| 1531 // Compute and push the receiver. |
| 1532 // Do not transform the receiver for strict mode functions. |
| 1533 Label convert_receiver_to_object, use_global_receiver; |
| 1534 __ Ldr(w10, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset)); |
| 1535 __ Tbnz(x10, SharedFunctionInfo::kStrictModeFunction, &push_receiver); |
| 1536 // Do not transform the receiver for native functions. |
| 1537 __ Tbnz(x10, SharedFunctionInfo::kNative, &push_receiver); |
| 1538 |
| 1539 // Compute the receiver in non-strict mode. |
| 1540 __ JumpIfSmi(receiver, &convert_receiver_to_object); |
| 1541 __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_receiver); |
| 1542 __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, |
| 1543 &use_global_receiver); |
| 1544 |
| 1545 // Check if the receiver is already a JavaScript object. |
| 1546 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); |
| 1547 __ JumpIfObjectType(receiver, x10, x11, FIRST_SPEC_OBJECT_TYPE, |
| 1548 &push_receiver, ge); |
| 1549 |
| 1550 // Call a builtin to convert the receiver to a regular object. |
| 1551 __ Bind(&convert_receiver_to_object); |
| 1552 __ Push(receiver); |
| 1553 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); |
| 1554 __ Mov(receiver, x0); |
| 1555 __ B(&push_receiver); |
| 1556 |
| 1557 // Use the current global receiver object as the receiver. |
| 1558 __ Bind(&use_global_receiver); |
| 1559 const int kGlobalOffset = |
| 1560 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize; |
| 1561 __ Ldr(x10, FieldMemOperand(cp, kGlobalOffset)); |
| 1562 __ Ldr(x11, FieldMemOperand(x10, GlobalObject::kNativeContextOffset)); |
| 1563 __ Ldr(x10, FieldMemOperand(x11, kGlobalOffset)); |
| 1564 __ Ldr(receiver, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset)); |
| 1565 |
| 1566 // Push the receiver |
| 1567 __ Bind(&push_receiver); |
| 1568 __ Push(receiver); |
| 1569 |
| 1570 // Copy all arguments from the array to the stack. |
| 1571 Label entry, loop; |
| 1572 Register current = x0; |
| 1573 __ Ldr(current, MemOperand(fp, kIndexOffset)); |
| 1574 __ B(&entry); |
| 1575 |
| 1576 __ Bind(&loop); |
| 1577 // Load the current argument from the arguments array and push it. |
| 1578 // TODO(all): Couldn't we optimize this for JS arrays? |
| 1579 |
| 1580 __ Ldr(x1, MemOperand(fp, kArgsOffset)); |
| 1581 __ Push(x1, current); |
| 1582 |
| 1583 // Call the runtime to access the property in the arguments array. |
| 1584 __ CallRuntime(Runtime::kGetProperty, 2); |
| 1585 __ Push(x0); |
| 1586 |
| 1587 // Use inline caching to access the arguments. |
| 1588 __ Ldr(current, MemOperand(fp, kIndexOffset)); |
| 1589 __ Add(current, current, Operand(Smi::FromInt(1))); |
| 1590 __ Str(current, MemOperand(fp, kIndexOffset)); |
| 1591 |
| 1592 // Test if the copy loop has finished copying all the elements from the |
| 1593 // arguments object. |
| 1594 __ Bind(&entry); |
| 1595 __ Ldr(x1, MemOperand(fp, kLimitOffset)); |
| 1596 __ Cmp(current, x1); |
| 1597 __ B(ne, &loop); |
| 1598 |
| 1599 // At the end of the loop, the number of arguments is stored in 'current', |
| 1600 // represented as a smi. |
| 1601 |
| 1602 function = x1; // From now on we want the function to be kept in x1; |
| 1603 __ Ldr(function, MemOperand(fp, kFunctionOffset)); |
| 1604 |
| 1605 // Invoke the function. |
| 1606 Label call_proxy; |
| 1607 ParameterCount actual(current); |
| 1608 __ SmiUntag(current); |
| 1609 __ JumpIfNotObjectType(function, x10, x11, JS_FUNCTION_TYPE, &call_proxy); |
| 1610 __ InvokeFunction(function, actual, CALL_FUNCTION, |
| 1611 NullCallWrapper(), CALL_AS_METHOD); |
| 1612 frame_scope.GenerateLeaveFrame(); |
| 1613 __ Drop(3); |
| 1614 __ Ret(); |
| 1615 |
| 1616 // Invoke the function proxy. |
| 1617 __ Bind(&call_proxy); |
| 1618 // x0 : argc |
| 1619 // x1 : function |
| 1620 __ Push(function); // Add function proxy as last argument. |
| 1621 __ Add(x0, x0, 1); |
| 1622 __ Mov(x2, 0); |
| 1623 __ SetCallKind(x5, CALL_AS_METHOD); |
| 1624 __ GetBuiltinEntry(x3, Builtins::CALL_FUNCTION_PROXY); |
| 1625 __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), |
| 1626 RelocInfo::CODE_TARGET); |
| 1627 } |
| 1628 __ Drop(3); |
| 1629 __ Ret(); |
| 1630 } |
| 1631 |
| 1632 |
| 1633 static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { |
| 1634 __ SmiTag(x10, x0); |
| 1635 __ Mov(x11, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
| 1636 __ Push(lr, fp); |
| 1637 __ Push(x11, x1, x10); |
| 1638 __ Add(fp, jssp, 3 * kPointerSize); |
| 1639 } |
| 1640 |
| 1641 |
| 1642 static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { |
| 1643 // ----------- S t a t e ------------- |
| 1644 // -- x0 : result being passed through |
| 1645 // ----------------------------------- |
| 1646 // Get the number of arguments passed (as a smi), tear down the frame and |
| 1647 // then drop the parameters and the receiver. |
| 1648 __ Ldr(x10, MemOperand(fp, -3 * kPointerSize)); |
| 1649 __ Mov(jssp, fp); |
| 1650 __ Pop(fp, lr); |
| 1651 __ DropBySMI(x10, kXRegSizeInBytes); |
| 1652 __ Drop(1); |
| 1653 } |
| 1654 |
| 1655 |
| 1656 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { |
| 1657 ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline"); |
| 1658 // ----------- S t a t e ------------- |
| 1659 // -- x0 : actual number of arguments |
| 1660 // -- x1 : function (passed through to callee) |
| 1661 // -- x2 : expected number of arguments |
| 1662 // -- x3 : code entry to call |
| 1663 // -- x5 : call kind information |
| 1664 // ----------------------------------- |
| 1665 |
| 1666 Label invoke, dont_adapt_arguments; |
| 1667 |
| 1668 Label enough, too_few; |
| 1669 __ Cmp(x0, x2); |
| 1670 __ B(lt, &too_few); |
| 1671 __ Cmp(x2, SharedFunctionInfo::kDontAdaptArgumentsSentinel); |
| 1672 __ B(eq, &dont_adapt_arguments); |
| 1673 |
| 1674 { // Enough parameters: actual >= expected |
| 1675 EnterArgumentsAdaptorFrame(masm); |
| 1676 |
| 1677 // Calculate copy start address into x10 and end address into x11. |
| 1678 // x0: actual number of arguments |
| 1679 // x1: function |
| 1680 // x2: expected number of arguments |
| 1681 // x3: code entry to call |
| 1682 __ Add(x10, fp, Operand(x0, LSL, kPointerSizeLog2)); |
| 1683 // Adjust for return address and receiver |
| 1684 __ Add(x10, x10, 2 * kPointerSize); |
| 1685 __ Sub(x11, x10, Operand(x2, LSL, kPointerSizeLog2)); |
| 1686 |
| 1687 // Copy the arguments (including the receiver) to the new stack frame. |
| 1688 // x0: actual number of arguments |
| 1689 // x1: function |
| 1690 // x2: expected number of arguments |
| 1691 // x3: code entry to call |
| 1692 // x10: copy start address |
| 1693 // x11: copy end address |
| 1694 |
| 1695 // TODO(all): Should we push values 2 by 2? |
| 1696 Label copy; |
| 1697 __ Bind(©); |
| 1698 __ Cmp(x10, x11); |
| 1699 __ Ldr(x12, MemOperand(x10, -kPointerSize, PostIndex)); |
| 1700 __ Push(x12); |
| 1701 __ B(gt, ©); |
| 1702 |
| 1703 __ B(&invoke); |
| 1704 } |
| 1705 |
| 1706 { // Too few parameters: Actual < expected |
| 1707 __ Bind(&too_few); |
| 1708 EnterArgumentsAdaptorFrame(masm); |
| 1709 |
| 1710 // Calculate copy start address into x10 and copy end address into x11. |
| 1711 // x0: actual number of arguments |
| 1712 // x1: function |
| 1713 // x2: expected number of arguments |
| 1714 // x3: code entry to call |
| 1715 // Adjust for return address. |
| 1716 __ Add(x11, fp, 1 * kPointerSize); |
| 1717 __ Add(x10, x11, Operand(x0, LSL, kPointerSizeLog2)); |
| 1718 __ Add(x10, x10, 1 * kPointerSize); |
| 1719 |
| 1720 // Copy the arguments (including the receiver) to the new stack frame. |
| 1721 // x0: actual number of arguments |
| 1722 // x1: function |
| 1723 // x2: expected number of arguments |
| 1724 // x3: code entry to call |
| 1725 // x10: copy start address |
| 1726 // x11: copy end address |
| 1727 Label copy; |
| 1728 __ Bind(©); |
| 1729 __ Ldr(x12, MemOperand(x10, -kPointerSize, PostIndex)); |
| 1730 __ Push(x12); |
| 1731 __ Cmp(x10, x11); // Compare before moving to next argument. |
| 1732 __ B(ne, ©); |
| 1733 |
| 1734 // Fill the remaining expected arguments with undefined. |
| 1735 // x0: actual number of arguments |
| 1736 // x1: function |
| 1737 // x2: expected number of arguments |
| 1738 // x3: code entry to call |
| 1739 __ LoadRoot(x10, Heap::kUndefinedValueRootIndex); |
| 1740 __ Sub(x11, fp, Operand(x2, LSL, kPointerSizeLog2)); |
| 1741 // Adjust for the arguments adaptor frame and already pushed receiver. |
| 1742 __ Sub(x11, x11, 4 * kPointerSize); |
| 1743 |
| 1744 // TODO(all): Optimize this to use ldp? |
| 1745 Label fill; |
| 1746 __ Bind(&fill); |
| 1747 __ Push(x10); |
| 1748 __ Cmp(jssp, x11); |
| 1749 __ B(ne, &fill); |
| 1750 } |
| 1751 |
| 1752 // Arguments have been adapted. Now call the entry point. |
| 1753 __ Bind(&invoke); |
| 1754 __ Call(x3); |
| 1755 |
| 1756 // Store offset of return address for deoptimizer. |
| 1757 masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset()); |
| 1758 |
| 1759 // Exit frame and return. |
| 1760 LeaveArgumentsAdaptorFrame(masm); |
| 1761 __ Ret(); |
| 1762 |
| 1763 // Call the entry point without adapting the arguments. |
| 1764 __ Bind(&dont_adapt_arguments); |
| 1765 __ Jump(x3); |
| 1766 } |
| 1767 |
| 1768 |
| 1769 #undef __ |
| 1770 |
| 1771 } } // namespace v8::internal |
| 1772 |
| 1773 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |