| OLD | NEW |
| (Empty) | |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are |
| 4 // met: |
| 5 // |
| 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided |
| 11 // with the distribution. |
| 12 // * Neither the name of Google Inc. nor the names of its |
| 13 // contributors may be used to endorse or promote products derived |
| 14 // from this software without specific prior written permission. |
| 15 // |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 |
| 28 #include "v8.h" |
| 29 |
| 30 #if defined(V8_TARGET_ARCH_A64) |
| 31 |
| 32 #include "bootstrapper.h" |
| 33 #include "code-stubs.h" |
| 34 #include "regexp-macro-assembler.h" |
| 35 #include "stub-cache.h" |
| 36 |
| 37 namespace v8 { |
| 38 namespace internal { |
| 39 |
| 40 |
| 41 void FastCloneShallowArrayStub::InitializeInterfaceDescriptor( |
| 42 Isolate* isolate, |
| 43 CodeStubInterfaceDescriptor* descriptor) { |
| 44 // x3: array literals array |
| 45 // x2: array literal index |
| 46 // x1: constant elements |
| 47 static Register registers[] = { x3, x2, x1 }; |
| 48 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); |
| 49 descriptor->register_params_ = registers; |
| 50 descriptor->deoptimization_handler_ = |
| 51 Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry; |
| 52 } |
| 53 |
| 54 |
| 55 void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( |
| 56 Isolate* isolate, |
| 57 CodeStubInterfaceDescriptor* descriptor) { |
| 58 // x3: object literals array |
| 59 // x2: object literal index |
| 60 // x1: constant properties |
| 61 // x0: object literal flags |
| 62 static Register registers[] = { x3, x2, x1, x0 }; |
| 63 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); |
| 64 descriptor->register_params_ = registers; |
| 65 descriptor->deoptimization_handler_ = |
| 66 Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry; |
| 67 } |
| 68 |
| 69 |
| 70 void KeyedLoadFastElementStub::InitializeInterfaceDescriptor( |
| 71 Isolate* isolate, |
| 72 CodeStubInterfaceDescriptor* descriptor) { |
| 73 // x1: receiver |
| 74 // x0: key |
| 75 static Register registers[] = { x1, x0 }; |
| 76 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); |
| 77 descriptor->register_params_ = registers; |
| 78 descriptor->deoptimization_handler_ = |
| 79 FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure); |
| 80 } |
| 81 |
| 82 |
| 83 void LoadFieldStub::InitializeInterfaceDescriptor( |
| 84 Isolate* isolate, |
| 85 CodeStubInterfaceDescriptor* descriptor) { |
| 86 // x0: receiver |
| 87 static Register registers[] = { x0 }; |
| 88 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); |
| 89 descriptor->register_params_ = registers; |
| 90 descriptor->deoptimization_handler_ = NULL; |
| 91 } |
| 92 |
| 93 |
| 94 void KeyedLoadFieldStub::InitializeInterfaceDescriptor( |
| 95 Isolate* isolate, |
| 96 CodeStubInterfaceDescriptor* descriptor) { |
| 97 // x1: receiver |
| 98 static Register registers[] = { x1 }; |
| 99 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); |
| 100 descriptor->register_params_ = registers; |
| 101 descriptor->deoptimization_handler_ = NULL; |
| 102 } |
| 103 |
| 104 |
| 105 void KeyedStoreFastElementStub::InitializeInterfaceDescriptor( |
| 106 Isolate* isolate, |
| 107 CodeStubInterfaceDescriptor* descriptor) { |
| 108 // x2: receiver |
| 109 // x1: key |
| 110 // x0: value |
| 111 static Register registers[] = { x2, x1, x0 }; |
| 112 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); |
| 113 descriptor->register_params_ = registers; |
| 114 descriptor->deoptimization_handler_ = |
| 115 FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure); |
| 116 } |
| 117 |
| 118 |
| 119 void TransitionElementsKindStub::InitializeInterfaceDescriptor( |
| 120 Isolate* isolate, |
| 121 CodeStubInterfaceDescriptor* descriptor) { |
| 122 // x0: value (js_array) |
| 123 // x1: to_map |
| 124 static Register registers[] = { x0, x1 }; |
| 125 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); |
| 126 descriptor->register_params_ = registers; |
| 127 Address entry = |
| 128 Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry; |
| 129 descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry); |
| 130 } |
| 131 |
| 132 |
| 133 void CompareNilICStub::InitializeInterfaceDescriptor( |
| 134 Isolate* isolate, |
| 135 CodeStubInterfaceDescriptor* descriptor) { |
| 136 // x0: value to compare |
| 137 static Register registers[] = { x0 }; |
| 138 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); |
| 139 descriptor->register_params_ = registers; |
| 140 descriptor->deoptimization_handler_ = |
| 141 FUNCTION_ADDR(CompareNilIC_Miss); |
| 142 descriptor->SetMissHandler( |
| 143 ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate)); |
| 144 } |
| 145 |
| 146 |
| 147 static void InitializeArrayConstructorDescriptor( |
| 148 Isolate* isolate, |
| 149 CodeStubInterfaceDescriptor* descriptor, |
| 150 int constant_stack_parameter_count) { |
| 151 // x1: function |
| 152 // x2: type info cell with elements kind |
| 153 static Register registers[] = { x1, x2 }; |
| 154 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); |
| 155 if (constant_stack_parameter_count != 0) { |
| 156 // stack param count needs (constructor pointer, and single argument) |
| 157 // x0: number of arguments to the constructor function |
| 158 descriptor->stack_parameter_count_ = &x0; |
| 159 } |
| 160 descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; |
| 161 descriptor->register_params_ = registers; |
| 162 descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; |
| 163 descriptor->deoptimization_handler_ = |
| 164 Runtime::FunctionForId(Runtime::kArrayConstructor)->entry; |
| 165 } |
| 166 |
| 167 |
| 168 void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( |
| 169 Isolate* isolate, |
| 170 CodeStubInterfaceDescriptor* descriptor) { |
| 171 InitializeArrayConstructorDescriptor(isolate, descriptor, 0); |
| 172 } |
| 173 |
| 174 |
| 175 void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( |
| 176 Isolate* isolate, |
| 177 CodeStubInterfaceDescriptor* descriptor) { |
| 178 InitializeArrayConstructorDescriptor(isolate, descriptor, 1); |
| 179 } |
| 180 |
| 181 |
| 182 void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( |
| 183 Isolate* isolate, |
| 184 CodeStubInterfaceDescriptor* descriptor) { |
| 185 InitializeArrayConstructorDescriptor(isolate, descriptor, -1); |
| 186 } |
| 187 |
| 188 |
| 189 static void InitializeInternalArrayConstructorDescriptor( |
| 190 Isolate* isolate, |
| 191 CodeStubInterfaceDescriptor* descriptor, |
| 192 int constant_stack_parameter_count) { |
| 193 // x1: constructor function |
| 194 static Register registers[] = { x1 }; |
| 195 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); |
| 196 if (constant_stack_parameter_count != 0) { |
| 197 // stack param count needs (constructor pointer, and single argument) |
| 198 // x0: number of arguments to the constructor function |
| 199 descriptor->stack_parameter_count_ = &x0; |
| 200 } |
| 201 descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; |
| 202 descriptor->register_params_ = registers; |
| 203 descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; |
| 204 descriptor->deoptimization_handler_ = |
| 205 Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry; |
| 206 } |
| 207 |
| 208 |
| 209 void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( |
| 210 Isolate* isolate, |
| 211 CodeStubInterfaceDescriptor* descriptor) { |
| 212 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0); |
| 213 } |
| 214 |
| 215 |
| 216 void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( |
| 217 Isolate* isolate, |
| 218 CodeStubInterfaceDescriptor* descriptor) { |
| 219 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1); |
| 220 } |
| 221 |
| 222 |
| 223 void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( |
| 224 Isolate* isolate, |
| 225 CodeStubInterfaceDescriptor* descriptor) { |
| 226 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1); |
| 227 } |
| 228 |
| 229 |
| 230 void ToBooleanStub::InitializeInterfaceDescriptor( |
| 231 Isolate* isolate, |
| 232 CodeStubInterfaceDescriptor* descriptor) { |
| 233 // x0: value |
| 234 static Register registers[] = { x0 }; |
| 235 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); |
| 236 descriptor->register_params_ = registers; |
| 237 descriptor->deoptimization_handler_ = FUNCTION_ADDR(ToBooleanIC_Miss); |
| 238 descriptor->SetMissHandler( |
| 239 ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate)); |
| 240 } |
| 241 |
| 242 |
| 243 #define __ ACCESS_MASM(masm) |
| 244 |
| 245 |
| 246 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { |
| 247 // Update the static counter each time a new code stub is generated. |
| 248 Isolate* isolate = masm->isolate(); |
| 249 isolate->counters()->code_stubs()->Increment(); |
| 250 |
| 251 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate); |
| 252 int param_count = descriptor->register_param_count_; |
| 253 { |
| 254 // Call the runtime system in a fresh internal frame. |
| 255 FrameScope scope(masm, StackFrame::INTERNAL); |
| 256 ASSERT((descriptor->register_param_count_ == 0) || |
| 257 x0.Is(descriptor->register_params_[param_count - 1])); |
| 258 // Push arguments |
| 259 // TODO(jbramley): Try to push these in blocks. |
| 260 for (int i = 0; i < param_count; ++i) { |
| 261 __ Push(descriptor->register_params_[i]); |
| 262 } |
| 263 ExternalReference miss = descriptor->miss_handler(); |
| 264 __ CallExternalReference(miss, descriptor->register_param_count_); |
| 265 } |
| 266 |
| 267 __ Ret(); |
| 268 } |
| 269 |
| 270 |
| 271 // Input: |
| 272 // x0: object to convert. |
| 273 // Output: |
| 274 // x0: result number. |
| 275 void ToNumberStub::Generate(MacroAssembler* masm) { |
| 276 // See ECMA-262 section 9.3. |
| 277 |
| 278 // If it is a Smi or a HeapNumber, just return the value. |
| 279 Label done; |
| 280 __ JumpIfSmi(x0, &done); |
| 281 __ JumpIfHeapNumber(x0, &done); |
| 282 |
| 283 // Inline checks for specific values that we can easily convert. |
| 284 Label return_zero, return_one; |
| 285 |
| 286 // Check for 'true', 'false', and 'null'. |
| 287 __ JumpIfRoot(x0, Heap::kTrueValueRootIndex, &return_one); |
| 288 __ JumpIfRoot(x0, Heap::kFalseValueRootIndex, &return_zero); |
| 289 __ JumpIfRoot(x0, Heap::kNullValueRootIndex, &return_zero); |
| 290 |
| 291 // Call a builtin to do the job. |
| 292 __ Push(x0); |
| 293 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION); |
| 294 |
| 295 // We never fall through here. |
| 296 if (FLAG_debug_code) { |
| 297 __ Abort("We should never reach this code."); |
| 298 } |
| 299 |
| 300 __ Bind(&return_zero); |
| 301 __ Mov(x0, Operand(Smi::FromInt(0))); |
| 302 __ Ret(); |
| 303 |
| 304 __ Bind(&return_one); |
| 305 __ Mov(x0, Operand(Smi::FromInt(1))); |
| 306 __ Bind(&done); |
| 307 __ Ret(); |
| 308 } |
| 309 |
| 310 |
| 311 void FastNewClosureStub::Generate(MacroAssembler* masm) { |
| 312 // Create a new closure from the given function info in new space. Set the |
| 313 // context to the current context in cp. |
| 314 Register new_fn = x0; |
| 315 Register function = x1; |
| 316 |
| 317 Counters* counters = masm->isolate()->counters(); |
| 318 |
| 319 Label gc; |
| 320 |
| 321 // Pop the function info from the stack. |
| 322 __ Pop(function); |
| 323 |
| 324 // Attempt to allocate new JSFunction in new space. |
| 325 __ Allocate(JSFunction::kSize, new_fn, x6, x7, &gc, TAG_OBJECT); |
| 326 |
| 327 __ IncrementCounter(counters->fast_new_closure_total(), 1, x6, x7); |
| 328 |
| 329 int map_index = Context::FunctionMapIndex(language_mode_, is_generator_); |
| 330 |
| 331 // Compute the function map in the current native context and set that as the |
| 332 // map of the allocated object. |
| 333 Register global_object = x2; |
| 334 Register global_ctx = x5; |
| 335 Register global_fn_map = x2; |
| 336 __ Ldr(global_object, GlobalObjectMemOperand()); |
| 337 __ Ldr(global_ctx, FieldMemOperand(global_object, |
| 338 GlobalObject::kNativeContextOffset)); |
| 339 __ Ldr(global_fn_map, ContextMemOperand(global_ctx, map_index)); |
| 340 __ Str(global_fn_map, FieldMemOperand(new_fn, HeapObject::kMapOffset)); |
| 341 |
| 342 // Initialize the rest of the function. We don't have to update the write |
| 343 // barrier because the allocated object is in new space. |
| 344 Register empty_array = x2; |
| 345 Register the_hole = x3; |
| 346 __ LoadRoot(empty_array, Heap::kEmptyFixedArrayRootIndex); |
| 347 __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex); |
| 348 |
| 349 __ Str(empty_array, FieldMemOperand(new_fn, JSObject::kPropertiesOffset)); |
| 350 __ Str(empty_array, FieldMemOperand(new_fn, JSObject::kElementsOffset)); |
| 351 __ Str(the_hole, FieldMemOperand(new_fn, |
| 352 JSFunction::kPrototypeOrInitialMapOffset)); |
| 353 __ Str(function, FieldMemOperand(new_fn, |
| 354 JSFunction::kSharedFunctionInfoOffset)); |
| 355 __ Str(cp, FieldMemOperand(new_fn, JSFunction::kContextOffset)); |
| 356 __ Str(empty_array, FieldMemOperand(new_fn, JSFunction::kLiteralsOffset)); |
| 357 |
| 358 // Initialize the code pointer in the new function to be the one found in the |
| 359 // shared function info object. |
| 360 // But first check if there is an optimized version for our context. |
| 361 Label check_optimized; |
| 362 Label install_unoptimized; |
| 363 Register opt_code_map = x4; |
| 364 if (FLAG_cache_optimized_code) { |
| 365 __ Ldr(opt_code_map, |
| 366 FieldMemOperand(function, |
| 367 SharedFunctionInfo::kOptimizedCodeMapOffset)); |
| 368 __ Cbnz(opt_code_map, &check_optimized); |
| 369 } |
| 370 |
| 371 __ Bind(&install_unoptimized); |
| 372 Register undef = x4; |
| 373 __ LoadRoot(undef, Heap::kUndefinedValueRootIndex); |
| 374 __ Str(undef, FieldMemOperand(new_fn, JSFunction::kNextFunctionLinkOffset)); |
| 375 |
| 376 Register fn_code = x2; |
| 377 __ Ldr(fn_code, FieldMemOperand(function, SharedFunctionInfo::kCodeOffset)); |
| 378 __ Add(fn_code, fn_code, Code::kHeaderSize - kHeapObjectTag); |
| 379 __ Str(fn_code, FieldMemOperand(new_fn, JSFunction::kCodeEntryOffset)); |
| 380 |
| 381 // Return result. The argument function info has been popped already. |
| 382 __ Ret(); |
| 383 |
| 384 // This code is never reached if FLAG_cache_optimized_code is false. |
| 385 __ Bind(&check_optimized); |
| 386 |
| 387 __ IncrementCounter(counters->fast_new_closure_try_optimized(), 1, x6, x7); |
| 388 |
| 389 // x4 opt_code_map pointer to optimized code map |
| 390 // x5 global_ctx pointer to global context |
| 391 |
| 392 // The optimized code map must never be empty, so check the first elements. |
| 393 Label install_optimized; |
| 394 // Speculatively move code object into opt_code. |
| 395 Register opt_code = x11; |
| 396 Register opt_code_ctx = x12; |
| 397 __ Ldr(opt_code, FieldMemOperand(opt_code_map, |
| 398 SharedFunctionInfo::kFirstCodeSlot)); |
| 399 __ Ldr(opt_code_ctx, FieldMemOperand(opt_code_map, |
| 400 SharedFunctionInfo::kFirstContextSlot)); |
| 401 __ Cmp(opt_code_ctx, global_ctx); |
| 402 __ B(eq, &install_optimized); |
| 403 |
| 404 // Iterate through the rest of the map backwards. |
| 405 Label loop; |
| 406 Register index = x10; |
| 407 Register array_base = x13; |
| 408 Register entry = x14; |
| 409 __ Ldrsw(index, UntagSmiFieldMemOperand(opt_code_map, |
| 410 FixedArray::kLengthOffset)); |
| 411 __ Add(array_base, opt_code_map, FixedArray::kHeaderSize - kHeapObjectTag); |
| 412 __ Bind(&loop); |
| 413 |
| 414 // Do not double check first entry. |
| 415 __ Cmp(index, SharedFunctionInfo::kSecondEntryIndex); |
| 416 __ B(eq, &install_unoptimized); |
| 417 // TODO(all) Optimise this to use addressing mode to update the pointer. |
| 418 __ Sub(index, index, SharedFunctionInfo::kEntryLength); |
| 419 __ Add(entry, array_base, Operand(index, LSL, kPointerSizeLog2)); |
| 420 __ Ldr(opt_code_ctx, MemOperand(entry)); |
| 421 __ Cmp(global_ctx, opt_code_ctx); |
| 422 __ B(ne, &loop); |
| 423 |
| 424 // Hit: fetch the optimized code. Register entry already contains pointer to |
| 425 // the first element (context) of the triple. |
| 426 __ Ldr(opt_code, MemOperand(entry, kPointerSize)); |
| 427 |
| 428 __ Bind(&install_optimized); |
| 429 __ IncrementCounter(counters->fast_new_closure_install_optimized(), |
| 430 1, x6, x7); |
| 431 |
| 432 Register opt_code_entry = x10; |
| 433 __ Add(opt_code_entry, opt_code, Code::kHeaderSize - kHeapObjectTag); |
| 434 __ Str(opt_code_entry, FieldMemOperand(new_fn, JSFunction::kCodeEntryOffset)); |
| 435 |
| 436 // Now link a function into a list of optimized functions. |
| 437 Register opt_fn_list = x10; |
| 438 __ Ldr(opt_fn_list, ContextMemOperand(global_ctx, |
| 439 Context::OPTIMIZED_FUNCTIONS_LIST)); |
| 440 __ Str(opt_fn_list, FieldMemOperand(new_fn, |
| 441 JSFunction::kNextFunctionLinkOffset)); |
| 442 // No need for write barrier as JSFunction is in the new space. |
| 443 |
| 444 // Store JSFunction before issuing write barrier as it clobbers all of the |
| 445 // registers passed. |
| 446 __ Str(new_fn, ContextMemOperand(global_ctx, |
| 447 Context::OPTIMIZED_FUNCTIONS_LIST)); |
| 448 |
| 449 // Move value to a temporary, to prevent RecordWriteContextSlot() |
| 450 // corrupting the return value. |
| 451 __ Mov(x4, new_fn); |
| 452 __ RecordWriteContextSlot( |
| 453 global_ctx, |
| 454 Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST), |
| 455 x4, |
| 456 x1, |
| 457 kLRHasNotBeenSaved, |
| 458 kDontSaveFPRegs); |
| 459 |
| 460 // Return result. The argument function info has been popped already. |
| 461 __ Ret(); |
| 462 |
| 463 // Create a new closure through the slower runtime call. |
| 464 __ Bind(&gc); |
| 465 Register false_val = x2; |
| 466 __ LoadRoot(false_val, Heap::kFalseValueRootIndex); |
| 467 __ Push(cp, function, false_val); |
| 468 __ TailCallRuntime(Runtime::kNewClosure, 3, 1); |
| 469 } |
| 470 |
| 471 |
| 472 void FastNewContextStub::Generate(MacroAssembler* masm) { |
| 473 Register function = x0; |
| 474 Register allocated = x1; |
| 475 Label gc; |
| 476 |
| 477 // Pop the function from the stack. |
| 478 __ Pop(function); |
| 479 |
| 480 // Attempt to allocate the context in new space. |
| 481 int context_length = slots_ + Context::MIN_CONTEXT_SLOTS; |
| 482 __ Allocate(FixedArray::SizeFor(context_length), allocated, x6, x7, &gc, |
| 483 TAG_OBJECT); |
| 484 |
| 485 // Set up the object header. |
| 486 Register map = x2; |
| 487 Register length = x2; |
| 488 __ LoadRoot(map, Heap::kFunctionContextMapRootIndex); |
| 489 __ Str(map, FieldMemOperand(allocated, HeapObject::kMapOffset)); |
| 490 __ Mov(length, Operand(Smi::FromInt(context_length))); |
| 491 __ Str(length, FieldMemOperand(allocated, FixedArray::kLengthOffset)); |
| 492 |
| 493 // Set up the fixed slots. |
| 494 Register extension = x2; |
| 495 __ Mov(extension, Operand(Smi::FromInt(0))); |
| 496 __ Str(function, ContextMemOperand(allocated, Context::CLOSURE_INDEX)); |
| 497 __ Str(cp, ContextMemOperand(allocated, Context::PREVIOUS_INDEX)); |
| 498 __ Str(extension, ContextMemOperand(allocated, Context::EXTENSION_INDEX)); |
| 499 |
| 500 // Copy the global object from the previous context. |
| 501 Register global_object = x2; |
| 502 __ Ldr(global_object, GlobalObjectMemOperand()); |
| 503 __ Str(global_object, ContextMemOperand(allocated, |
| 504 Context::GLOBAL_OBJECT_INDEX)); |
| 505 |
| 506 // Initialize the rest of the slots to undefined. |
| 507 Register undef_val = x2; |
| 508 __ LoadRoot(undef_val, Heap::kUndefinedValueRootIndex); |
| 509 for (int i = Context::MIN_CONTEXT_SLOTS; i < context_length; i++) { |
| 510 __ Str(undef_val, ContextMemOperand(allocated, i)); |
| 511 } |
| 512 |
| 513 // Install new context and return. |
| 514 __ Mov(cp, allocated); |
| 515 __ Ret(); |
| 516 |
| 517 // Need to collect. Call into runtime system. |
| 518 __ Bind(&gc); |
| 519 __ Push(function); |
| 520 __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1); |
| 521 } |
| 522 |
| 523 |
| 524 void FastNewBlockContextStub::Generate(MacroAssembler* masm) { |
| 525 // Stack on entry: |
| 526 // jssp[0]: function. |
| 527 // jssp[8]: serialized scope info. |
| 528 |
| 529 // Try to allocate the context in new space. |
| 530 Register context = x10; |
| 531 Register function = x11; |
| 532 Register scope = x12; |
| 533 Register global_obj = x13; |
| 534 Label gc; |
| 535 int length = slots_ + Context::MIN_CONTEXT_SLOTS; |
| 536 __ Allocate(FixedArray::SizeFor(length), context, x6, x7, &gc, TAG_OBJECT); |
| 537 |
| 538 // Load the global object. |
| 539 __ Ldr(global_obj, GlobalObjectMemOperand()); |
| 540 |
| 541 // Pop the function and scope from the stack. |
| 542 __ Pop(function, scope); |
| 543 |
| 544 // Set up the object header. |
| 545 Register map = x14; |
| 546 Register obj_length = x15; |
| 547 __ LoadRoot(map, Heap::kBlockContextMapRootIndex); |
| 548 __ Mov(obj_length, Operand(Smi::FromInt(length))); |
| 549 __ Str(map, FieldMemOperand(context, HeapObject::kMapOffset)); |
| 550 __ Str(obj_length, FieldMemOperand(context, FixedArray::kLengthOffset)); |
| 551 |
| 552 // If this block context is nested in the native context we get a smi |
| 553 // sentinel instead of a function. The block context should get the |
| 554 // canonical empty function of the native context as its closure which we |
| 555 // still have to look up. |
| 556 Label after_sentinel; |
| 557 __ JumpIfNotSmi(function, &after_sentinel); |
| 558 if (FLAG_debug_code) { |
| 559 __ Cmp(function, 0); |
| 560 __ Assert(eq, "Expected 0 as a Smi sentinel"); |
| 561 } |
| 562 |
| 563 Register global_ctx = x14; |
| 564 __ Ldr(global_ctx, FieldMemOperand(global_obj, |
| 565 GlobalObject::kNativeContextOffset)); |
| 566 __ Ldr(function, ContextMemOperand(global_ctx, Context::CLOSURE_INDEX)); |
| 567 __ Bind(&after_sentinel); |
| 568 |
| 569 // Store the global object from the previous context, and set up the fixed |
| 570 // slots. |
| 571 __ Str(global_obj, ContextMemOperand(context, |
| 572 Context::GLOBAL_OBJECT_INDEX)); |
| 573 __ Str(function, ContextMemOperand(context, Context::CLOSURE_INDEX)); |
| 574 __ Str(cp, ContextMemOperand(context, Context::PREVIOUS_INDEX)); |
| 575 __ Str(scope, ContextMemOperand(context, Context::EXTENSION_INDEX)); |
| 576 |
| 577 // Initialize the rest of the slots to the hole value. |
| 578 __ LoadRoot(x7, Heap::kTheHoleValueRootIndex); |
| 579 for (int i = 0; i < slots_; i++) { |
| 580 __ Str(x7, ContextMemOperand(context, i + Context::MIN_CONTEXT_SLOTS)); |
| 581 } |
| 582 |
| 583 // Remove the on-stack argument and return. |
| 584 __ Mov(cp, context); |
| 585 __ Ret(); |
| 586 |
| 587 // Need to collect. Call into runtime system. |
| 588 __ Bind(&gc); |
| 589 // The arguments (function and scope) should still be on the stack. |
| 590 __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1); |
| 591 } |
| 592 |
| 593 |
| 594 // See call site for description. |
| 595 static void EmitIdenticalObjectComparison(MacroAssembler* masm, |
| 596 Register left, |
| 597 Register right, |
| 598 Register scratch, |
| 599 FPRegister double_scratch, |
| 600 Label* slow, |
| 601 Condition cond) { |
| 602 ASSERT(!AreAliased(left, right, scratch)); |
| 603 Label not_identical, return_equal, heap_number; |
| 604 Register result = x0; |
| 605 |
| 606 __ Cmp(right, left); |
| 607 __ B(ne, ¬_identical); |
| 608 |
| 609 // Test for NaN. Sadly, we can't just compare to factory::nan_value(), |
| 610 // so we do the second best thing - test it ourselves. |
| 611 // They are both equal and they are not both Smis so both of them are not |
| 612 // Smis. If it's not a heap number, then return equal. |
| 613 if ((cond == lt) || (cond == gt)) { |
| 614 __ JumpIfObjectType(right, scratch, scratch, FIRST_SPEC_OBJECT_TYPE, slow, |
| 615 ge); |
| 616 } else { |
| 617 Register right_type = scratch; |
| 618 __ JumpIfObjectType(right, right_type, right_type, HEAP_NUMBER_TYPE, |
| 619 &heap_number); |
| 620 // Comparing JS objects with <=, >= is complicated. |
| 621 if (cond != eq) { |
| 622 __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE); |
| 623 __ B(ge, slow); |
| 624 // Normally here we fall through to return_equal, but undefined is |
| 625 // special: (undefined == undefined) == true, but |
| 626 // (undefined <= undefined) == false! See ECMAScript 11.8.5. |
| 627 if ((cond == le) || (cond == ge)) { |
| 628 __ Cmp(right_type, ODDBALL_TYPE); |
| 629 __ B(ne, &return_equal); |
| 630 __ JumpIfNotRoot(right, Heap::kUndefinedValueRootIndex, &return_equal); |
| 631 if (cond == le) { |
| 632 // undefined <= undefined should fail. |
| 633 __ Mov(result, GREATER); |
| 634 } else { |
| 635 // undefined >= undefined should fail. |
| 636 __ Mov(result, LESS); |
| 637 } |
| 638 __ Ret(); |
| 639 } |
| 640 } |
| 641 } |
| 642 |
| 643 __ Bind(&return_equal); |
| 644 if (cond == lt) { |
| 645 __ Mov(result, GREATER); // Things aren't less than themselves. |
| 646 } else if (cond == gt) { |
| 647 __ Mov(result, LESS); // Things aren't greater than themselves. |
| 648 } else { |
| 649 __ Mov(result, EQUAL); // Things are <=, >=, ==, === themselves. |
| 650 } |
| 651 __ Ret(); |
| 652 |
| 653 // Cases lt and gt have been handled earlier, and case ne is never seen, as |
| 654 // it is handled in the parser (see Parser::ParseBinaryExpression). We are |
| 655 // only concerned with cases ge, le and eq here. |
| 656 if ((cond != lt) && (cond != gt)) { |
| 657 ASSERT((cond == ge) || (cond == le) || (cond == eq)); |
| 658 __ Bind(&heap_number); |
| 659 // Left and right are identical pointers to a heap number object. Return |
| 660 // non-equal if the heap number is a NaN, and equal otherwise. Comparing |
| 661 // the number to itself will set the overflow flag iff the number is NaN. |
| 662 __ Ldr(double_scratch, FieldMemOperand(right, HeapNumber::kValueOffset)); |
| 663 __ Fcmp(double_scratch, double_scratch); |
| 664 __ B(vc, &return_equal); // Not NaN, so treat as normal heap number. |
| 665 |
| 666 if (cond == le) { |
| 667 __ Mov(result, GREATER); |
| 668 } else { |
| 669 __ Mov(result, LESS); |
| 670 } |
| 671 __ Ret(); |
| 672 } |
| 673 |
| 674 // No fall through here. |
| 675 if (FLAG_debug_code) { |
| 676 __ Abort("We should never reach this code."); |
| 677 } |
| 678 |
| 679 __ Bind(¬_identical); |
| 680 } |
| 681 |
| 682 |
| 683 // See call site for description. |
| 684 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, |
| 685 Register left, |
| 686 Register right, |
| 687 Register left_type, |
| 688 Register right_type, |
| 689 Register scratch) { |
| 690 ASSERT(!AreAliased(left, right, left_type, right_type, scratch)); |
| 691 |
| 692 // If either operand is a JS object or an oddball value, then they are not |
| 693 // equal since their pointers are different. |
| 694 // There is no test for undetectability in strict equality. |
| 695 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); |
| 696 Label right_non_object; |
| 697 |
| 698 __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE); |
| 699 __ B(lt, &right_non_object); |
| 700 |
| 701 // Return non-zero - x0 already contains a non-zero pointer. |
| 702 ASSERT(left.is(x0) || right.is(x0)); |
| 703 Label return_not_equal; |
| 704 __ Bind(&return_not_equal); |
| 705 __ Ret(); |
| 706 |
| 707 __ Bind(&right_non_object); |
| 708 |
| 709 // Check for oddballs: true, false, null, undefined. |
| 710 __ Cmp(right_type, ODDBALL_TYPE); |
| 711 |
| 712 // If right is not ODDBALL, test left. Otherwise, set eq condition. |
| 713 __ Ccmp(left_type, ODDBALL_TYPE, ZFlag, ne); |
| 714 |
| 715 // If right or left is not ODDBALL, test left >= FIRST_SPEC_OBJECT_TYPE. |
| 716 // Otherwise, right or left is ODDBALL, so set a ge condition. |
| 717 __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NVFlag, ne); |
| 718 |
| 719 __ B(ge, &return_not_equal); |
| 720 |
| 721 // Check for internalized-internalized comparison. Ensure that no non-strings |
| 722 // have the internalized bit set. |
| 723 STATIC_ASSERT(LAST_TYPE < (kNotStringTag + kIsInternalizedMask)); |
| 724 STATIC_ASSERT(kInternalizedTag != 0); |
| 725 __ And(scratch, right_type, left_type); |
| 726 __ Tbnz(scratch, MaskToBit(kIsInternalizedMask), &return_not_equal); |
| 727 } |
| 728 |
| 729 |
| 730 // See call site for description. |
| 731 static void EmitSmiNonsmiComparison(MacroAssembler* masm, |
| 732 Register left, |
| 733 Register right, |
| 734 FPRegister left_d, |
| 735 FPRegister right_d, |
| 736 Register scratch, |
| 737 Label* slow, |
| 738 bool strict) { |
| 739 ASSERT(!AreAliased(left, right, scratch)); |
| 740 ASSERT(!AreAliased(left_d, right_d)); |
| 741 ASSERT((left.is(x0) && right.is(x1)) || |
| 742 (right.is(x0) && left.is(x1))); |
| 743 Register result = x0; |
| 744 |
| 745 Label right_is_smi, done; |
| 746 __ JumpIfSmi(right, &right_is_smi); |
| 747 |
| 748 // Left is the smi. Check whether right is a heap number. |
| 749 if (strict) { |
| 750 // If right is not a number and left is a smi, then strict equality cannot |
| 751 // succeed. Return non-equal. |
| 752 Label is_heap_number; |
| 753 __ JumpIfObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE, |
| 754 &is_heap_number); |
| 755 // Register right is a non-zero pointer, which is a valid NOT_EQUAL result. |
| 756 if (!right.is(result)) { |
| 757 __ Mov(result, NOT_EQUAL); |
| 758 } |
| 759 __ Ret(); |
| 760 __ Bind(&is_heap_number); |
| 761 } else { |
| 762 // Smi compared non-strictly with a non-smi, non-heap-number. Call the |
| 763 // runtime. |
| 764 __ JumpIfNotObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE, slow); |
| 765 } |
| 766 |
| 767 // Left is the smi. Right is a heap number. Load right value into right_d, and |
| 768 // convert left smi into double in left_d. |
| 769 __ Ldr(right_d, FieldMemOperand(right, HeapNumber::kValueOffset)); |
| 770 __ SmiUntagToDouble(left_d, left); |
| 771 __ B(&done); |
| 772 |
| 773 __ Bind(&right_is_smi); |
| 774 // Right is a smi. Check whether the non-smi left is a heap number. |
| 775 if (strict) { |
| 776 // If left is not a number and right is a smi then strict equality cannot |
| 777 // succeed. Return non-equal. |
| 778 Label is_heap_number; |
| 779 __ JumpIfObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE, |
| 780 &is_heap_number); |
| 781 // Register left is a non-zero pointer, which is a valid NOT_EQUAL result. |
| 782 if (!left.is(result)) { |
| 783 __ Mov(result, NOT_EQUAL); |
| 784 } |
| 785 __ Ret(); |
| 786 __ Bind(&is_heap_number); |
| 787 } else { |
| 788 // Smi compared non-strictly with a non-smi, non-heap-number. Call the |
| 789 // runtime. |
| 790 __ JumpIfNotObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE, slow); |
| 791 } |
| 792 |
| 793 // Right is the smi. Left is a heap number. Load left value into left_d, and |
| 794 // convert right smi into double in right_d. |
| 795 __ Ldr(left_d, FieldMemOperand(left, HeapNumber::kValueOffset)); |
| 796 __ SmiUntagToDouble(right_d, right); |
| 797 |
| 798 // Fall through to both_loaded_as_doubles. |
| 799 __ Bind(&done); |
| 800 } |
| 801 |
| 802 |
| 803 // Fast negative check for internalized-to-internalized equality. |
| 804 // See call site for description. |
| 805 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, |
| 806 Register left, |
| 807 Register right, |
| 808 Register left_map, |
| 809 Register right_map, |
| 810 Register left_type, |
| 811 Register right_type, |
| 812 Label* possible_strings, |
| 813 Label* not_both_strings) { |
| 814 ASSERT(!AreAliased(left, right, left_map, right_map, left_type, right_type)); |
| 815 Register result = x0; |
| 816 |
| 817 // Ensure that no non-strings have the internalized bit set. |
| 818 Label object_test; |
| 819 STATIC_ASSERT(kStringTag == 0); |
| 820 STATIC_ASSERT(kInternalizedTag != 0); |
| 821 // TODO(all): reexamine this branch sequence for optimisation wrt branch |
| 822 // prediction. |
| 823 __ Tbnz(right_type, MaskToBit(kIsNotStringMask), &object_test); |
| 824 __ Tbz(right_type, MaskToBit(kIsInternalizedMask), possible_strings); |
| 825 __ Tbnz(left_type, MaskToBit(kIsNotStringMask), not_both_strings); |
| 826 __ Tbz(left_type, MaskToBit(kIsInternalizedMask), possible_strings); |
| 827 |
| 828 // Both are internalized. We already checked that they weren't the same |
| 829 // pointer, so they are not equal. |
| 830 __ Mov(result, NOT_EQUAL); |
| 831 __ Ret(); |
| 832 |
| 833 __ Bind(&object_test); |
| 834 |
| 835 __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE); |
| 836 |
| 837 // If right >= FIRST_SPEC_OBJECT_TYPE, test left. |
| 838 // Otherwise, right < FIRST_SPEC_OBJECT_TYPE, so set lt condition. |
| 839 __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NFlag, ge); |
| 840 |
| 841 __ B(lt, not_both_strings); |
| 842 |
| 843 // If both objects are undetectable, they are equal. Otherwise, they are not |
| 844 // equal, since they are different objects and an object is not equal to |
| 845 // undefined. |
| 846 |
| 847 // Returning here, so we can corrupt right_type and left_type. |
| 848 Register right_bitfield = right_type; |
| 849 Register left_bitfield = left_type; |
| 850 __ Ldrb(right_bitfield, FieldMemOperand(right_map, Map::kBitFieldOffset)); |
| 851 __ Ldrb(left_bitfield, FieldMemOperand(left_map, Map::kBitFieldOffset)); |
| 852 __ And(result, right_bitfield, left_bitfield); |
| 853 __ And(result, result, 1 << Map::kIsUndetectable); |
| 854 __ Eor(result, result, 1 << Map::kIsUndetectable); |
| 855 __ Ret(); |
| 856 } |
| 857 |
| 858 |
| 859 static void ICCompareStub_CheckInputType(MacroAssembler* masm, |
| 860 Register input, |
| 861 Register scratch, |
| 862 CompareIC::State expected, |
| 863 Label* fail) { |
| 864 Label ok; |
| 865 if (expected == CompareIC::SMI) { |
| 866 __ JumpIfNotSmi(input, fail); |
| 867 } else if (expected == CompareIC::NUMBER) { |
| 868 __ JumpIfSmi(input, &ok); |
| 869 __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail, |
| 870 DONT_DO_SMI_CHECK); |
| 871 } |
| 872 // We could be strict about internalized/non-internalized here, but as long as |
| 873 // hydrogen doesn't care, the stub doesn't have to care either. |
| 874 __ Bind(&ok); |
| 875 } |
| 876 |
| 877 |
| 878 void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { |
| 879 Register lhs = x1; |
| 880 Register rhs = x0; |
| 881 Register result = x0; |
| 882 Condition cond = GetCondition(); |
| 883 |
| 884 Label miss; |
| 885 ICCompareStub_CheckInputType(masm, lhs, x2, left_, &miss); |
| 886 ICCompareStub_CheckInputType(masm, rhs, x3, right_, &miss); |
| 887 |
| 888 Label slow; // Call builtin. |
| 889 Label not_smis, both_loaded_as_doubles; |
| 890 Label not_two_smis, smi_done; |
| 891 __ JumpIfEitherNotSmi(lhs, rhs, ¬_two_smis); |
| 892 __ SmiUntag(lhs); |
| 893 __ Sub(result, lhs, Operand::UntagSmi(rhs)); |
| 894 __ Ret(); |
| 895 |
| 896 __ Bind(¬_two_smis); |
| 897 |
| 898 // NOTICE! This code is only reached after a smi-fast-case check, so it is |
| 899 // certain that at least one operand isn't a smi. |
| 900 |
| 901 // Handle the case where the objects are identical. Either returns the answer |
| 902 // or goes to slow. Only falls through if the objects were not identical. |
| 903 EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond); |
| 904 |
| 905 // If either is a smi (we know that at least one is not a smi), then they can |
| 906 // only be strictly equal if the other is a HeapNumber. |
| 907 __ JumpIfBothNotSmi(lhs, rhs, ¬_smis); |
| 908 |
| 909 // Exactly one operand is a smi. EmitSmiNonsmiComparison generates code that |
| 910 // can: |
| 911 // 1) Return the answer. |
| 912 // 2) Branch to the slow case. |
| 913 // 3) Fall through to both_loaded_as_doubles. |
| 914 // In case 3, we have found out that we were dealing with a number-number |
| 915 // comparison. The double values of the numbers have been loaded, right into |
| 916 // rhs_d, left into lhs_d. |
| 917 FPRegister rhs_d = d0; |
| 918 FPRegister lhs_d = d1; |
| 919 EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, x10, &slow, strict()); |
| 920 |
| 921 __ Bind(&both_loaded_as_doubles); |
| 922 // The arguments have been converted to doubles and stored in rhs_d and |
| 923 // lhs_d. |
| 924 Label nan; |
| 925 __ Fcmp(lhs_d, rhs_d); |
| 926 __ B(vs, &nan); // Overflow flag set if either is NaN. |
| 927 STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1)); |
| 928 __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL). |
| 929 __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0. |
| 930 __ Ret(); |
| 931 |
| 932 __ Bind(&nan); |
| 933 // Left and/or right is a NaN. Load the result register with whatever makes |
| 934 // the comparison fail, since comparisons with NaN always fail (except ne, |
| 935 // which is filtered out at a higher level.) |
| 936 ASSERT(cond != ne); |
| 937 if ((cond == lt) || (cond == le)) { |
| 938 __ Mov(result, GREATER); |
| 939 } else { |
| 940 __ Mov(result, LESS); |
| 941 } |
| 942 __ Ret(); |
| 943 |
| 944 __ Bind(¬_smis); |
| 945 // At this point we know we are dealing with two different objects, and |
| 946 // neither of them is a smi. The objects are in rhs_ and lhs_. |
| 947 |
| 948 // Load the maps and types of the objects. |
| 949 Register rhs_map = x10; |
| 950 Register rhs_type = x11; |
| 951 Register lhs_map = x12; |
| 952 Register lhs_type = x13; |
| 953 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset)); |
| 954 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset)); |
| 955 __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset)); |
| 956 __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset)); |
| 957 |
| 958 if (strict()) { |
| 959 // This emits a non-equal return sequence for some object types, or falls |
| 960 // through if it was not lucky. |
| 961 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs, lhs_type, rhs_type, x14); |
| 962 } |
| 963 |
| 964 Label check_for_internalized_strings; |
| 965 Label flat_string_check; |
| 966 // Check for heap number comparison. Branch to earlier double comparison code |
| 967 // if they are heap numbers, otherwise, branch to internalized string check. |
| 968 __ Cmp(rhs_type, HEAP_NUMBER_TYPE); |
| 969 __ B(ne, &check_for_internalized_strings); |
| 970 __ Cmp(lhs_map, rhs_map); |
| 971 |
| 972 // If maps aren't equal, lhs_ and rhs_ are not heap numbers. Branch to flat |
| 973 // string check. |
| 974 __ B(ne, &flat_string_check); |
| 975 |
| 976 // Both lhs_ and rhs_ are heap numbers. Load them and branch to the double |
| 977 // comparison code. |
| 978 __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset)); |
| 979 __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset)); |
| 980 __ B(&both_loaded_as_doubles); |
| 981 |
| 982 __ Bind(&check_for_internalized_strings); |
| 983 // In the strict case, the EmitStrictTwoHeapObjectCompare already took care |
| 984 // of internalized strings. |
| 985 if ((cond == eq) && !strict()) { |
| 986 // Returns an answer for two internalized strings or two detectable objects. |
| 987 // Otherwise branches to the string case or not both strings case. |
| 988 EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, lhs_map, rhs_map, |
| 989 lhs_type, rhs_type, |
| 990 &flat_string_check, &slow); |
| 991 } |
| 992 |
| 993 // Check for both being sequential ASCII strings, and inline if that is the |
| 994 // case. |
| 995 __ Bind(&flat_string_check); |
| 996 __ JumpIfBothInstanceTypesAreNotSequentialAscii(lhs_type, rhs_type, x14, |
| 997 x15, &slow); |
| 998 |
| 999 Isolate* isolate = masm->isolate(); |
| 1000 __ IncrementCounter(isolate->counters()->string_compare_native(), 1, x10, |
| 1001 x11); |
| 1002 if (cond == eq) { |
| 1003 StringCompareStub::GenerateFlatAsciiStringEquals(masm, lhs, rhs, |
| 1004 x10, x11, x12); |
| 1005 } else { |
| 1006 StringCompareStub::GenerateCompareFlatAsciiStrings(masm, lhs, rhs, |
| 1007 x10, x11, x12, x13); |
| 1008 } |
| 1009 |
| 1010 // Never fall through to here. |
| 1011 if (FLAG_debug_code) { |
| 1012 __ Abort("We should never reach this code."); |
| 1013 } |
| 1014 |
| 1015 __ Bind(&slow); |
| 1016 |
| 1017 __ Push(lhs, rhs); |
| 1018 // Figure out which native to call and setup the arguments. |
| 1019 Builtins::JavaScript native; |
| 1020 if (cond == eq) { |
| 1021 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; |
| 1022 } else { |
| 1023 native = Builtins::COMPARE; |
| 1024 int ncr; // NaN compare result |
| 1025 if ((cond == lt) || (cond == le)) { |
| 1026 ncr = GREATER; |
| 1027 } else { |
| 1028 ASSERT((cond == gt) || (cond == ge)); // remaining cases |
| 1029 ncr = LESS; |
| 1030 } |
| 1031 __ Mov(x10, Operand(Smi::FromInt(ncr))); |
| 1032 __ Push(x10); |
| 1033 } |
| 1034 |
| 1035 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) |
| 1036 // tagged as a small integer. |
| 1037 __ InvokeBuiltin(native, JUMP_FUNCTION); |
| 1038 |
| 1039 __ Bind(&miss); |
| 1040 GenerateMiss(masm); |
| 1041 } |
| 1042 |
| 1043 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { |
| 1044 // Preserve caller-saved registers x0-x7 and x10-x15. We don't care if x8, x9, |
| 1045 // ip0 and ip1 are corrupted by the call into C. |
| 1046 CPURegList saved_regs = kCallerSaved; |
| 1047 saved_regs.Remove(ip0); |
| 1048 saved_regs.Remove(ip1); |
| 1049 saved_regs.Remove(x8); |
| 1050 saved_regs.Remove(x9); |
| 1051 |
| 1052 // We don't allow a GC during a store buffer overflow so there is no need to |
| 1053 // store the registers in any particular way, but we do have to store and |
| 1054 // restore them. |
| 1055 __ PushCPURegList(saved_regs); |
| 1056 if (save_doubles_ == kSaveFPRegs) { |
| 1057 __ PushCPURegList(kCallerSavedFP); |
| 1058 } |
| 1059 |
| 1060 AllowExternalCallThatCantCauseGC scope(masm); |
| 1061 __ Mov(x0, Operand(ExternalReference::isolate_address(masm->isolate()))); |
| 1062 __ CallCFunction( |
| 1063 ExternalReference::store_buffer_overflow_function(masm->isolate()), |
| 1064 1, 0); |
| 1065 |
| 1066 if (save_doubles_ == kSaveFPRegs) { |
| 1067 __ PopCPURegList(kCallerSavedFP); |
| 1068 } |
| 1069 __ PopCPURegList(saved_regs); |
| 1070 __ Ret(); |
| 1071 } |
| 1072 |
| 1073 |
| 1074 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( |
| 1075 Isolate* isolate) { |
| 1076 StoreBufferOverflowStub stub1(kDontSaveFPRegs); |
| 1077 stub1.GetCode(isolate)->set_is_pregenerated(true); |
| 1078 StoreBufferOverflowStub stub2(kSaveFPRegs); |
| 1079 stub2.GetCode(isolate)->set_is_pregenerated(true); |
| 1080 } |
| 1081 |
| 1082 |
| 1083 void UnaryOpStub::PrintName(StringStream* stream) { |
| 1084 const char* op_name = Token::Name(op_); |
| 1085 const char* overwrite_name = NULL; |
| 1086 switch (mode_) { |
| 1087 case UNARY_NO_OVERWRITE: |
| 1088 overwrite_name = "Alloc"; |
| 1089 break; |
| 1090 case UNARY_OVERWRITE: |
| 1091 overwrite_name = "Overwrite"; |
| 1092 break; |
| 1093 default: |
| 1094 UNREACHABLE(); |
| 1095 } |
| 1096 stream->Add("UnaryOpStub_%s_%s_%s", |
| 1097 op_name, |
| 1098 overwrite_name, |
| 1099 UnaryOpIC::GetName(operand_type_)); |
| 1100 } |
| 1101 |
| 1102 |
| 1103 void UnaryOpStub::Generate(MacroAssembler* masm) { |
| 1104 switch (operand_type_) { |
| 1105 case UnaryOpIC::UNINITIALIZED: |
| 1106 GenerateTypeTransition(masm); |
| 1107 break; |
| 1108 case UnaryOpIC::SMI: |
| 1109 GenerateSmiStub(masm); |
| 1110 break; |
| 1111 case UnaryOpIC::NUMBER: |
| 1112 GenerateNumberStub(masm); |
| 1113 break; |
| 1114 case UnaryOpIC::GENERIC: |
| 1115 GenerateGenericStub(masm); |
| 1116 break; |
| 1117 } |
| 1118 } |
| 1119 |
| 1120 |
| 1121 void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
| 1122 __ Mov(x1, Operand(Smi::FromInt(op_))); |
| 1123 __ Mov(x2, Operand(Smi::FromInt(mode_))); |
| 1124 __ Mov(x3, Operand(Smi::FromInt(operand_type_))); |
| 1125 // x0 contains the operand |
| 1126 __ Push(x0, x1, x2, x3); |
| 1127 |
| 1128 __ TailCallExternalReference( |
| 1129 ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1); |
| 1130 } |
| 1131 |
| 1132 |
| 1133 void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
| 1134 switch (op_) { |
| 1135 case Token::SUB: |
| 1136 GenerateSmiStubSub(masm); |
| 1137 break; |
| 1138 case Token::BIT_NOT: |
| 1139 GenerateSmiStubBitNot(masm); |
| 1140 break; |
| 1141 default: |
| 1142 UNREACHABLE(); |
| 1143 } |
| 1144 } |
| 1145 |
| 1146 |
| 1147 void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) { |
| 1148 Label non_smi, slow; |
| 1149 GenerateSmiCodeSub(masm, &non_smi, &slow); |
| 1150 __ Bind(&non_smi); |
| 1151 __ Bind(&slow); |
| 1152 GenerateTypeTransition(masm); |
| 1153 } |
| 1154 |
| 1155 |
| 1156 void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) { |
| 1157 Label non_smi; |
| 1158 GenerateSmiCodeBitNot(masm, &non_smi); |
| 1159 __ Bind(&non_smi); |
| 1160 GenerateTypeTransition(masm); |
| 1161 } |
| 1162 |
| 1163 |
| 1164 void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm, |
| 1165 Label* non_smi, |
| 1166 Label* slow) { |
| 1167 __ JumpIfNotSmi(x0, non_smi); |
| 1168 |
| 1169 // The result of negating zero or the smallest negative smi is not a smi. |
| 1170 __ Ands(x1, x0, 0x7fffffff00000000UL); |
| 1171 __ B(eq, slow); |
| 1172 |
| 1173 __ Neg(x0, x0); |
| 1174 __ Ret(); |
| 1175 } |
| 1176 |
| 1177 |
| 1178 void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm, |
| 1179 Label* non_smi) { |
| 1180 __ JumpIfNotSmi(x0, non_smi); |
| 1181 |
| 1182 // Eor the top 32 bits with 0xffffffff to invert. |
| 1183 __ Eor(x0, x0, 0xffffffff00000000UL); |
| 1184 __ Ret(); |
| 1185 } |
| 1186 |
| 1187 |
| 1188 void UnaryOpStub::GenerateNumberStub(MacroAssembler* masm) { |
| 1189 switch (op_) { |
| 1190 case Token::SUB: |
| 1191 GenerateNumberStubSub(masm); |
| 1192 break; |
| 1193 case Token::BIT_NOT: |
| 1194 GenerateNumberStubBitNot(masm); |
| 1195 break; |
| 1196 default: |
| 1197 UNREACHABLE(); |
| 1198 } |
| 1199 } |
| 1200 |
| 1201 |
| 1202 void UnaryOpStub::GenerateNumberStubSub(MacroAssembler* masm) { |
| 1203 Label non_smi, slow, call_builtin; |
| 1204 GenerateSmiCodeSub(masm, &non_smi, &call_builtin); |
| 1205 __ Bind(&non_smi); |
| 1206 GenerateHeapNumberCodeSub(masm, &slow); |
| 1207 __ Bind(&slow); |
| 1208 GenerateTypeTransition(masm); |
| 1209 __ Bind(&call_builtin); |
| 1210 __ Push(x0); |
| 1211 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION); |
| 1212 } |
| 1213 |
| 1214 |
| 1215 void UnaryOpStub::GenerateNumberStubBitNot(MacroAssembler* masm) { |
| 1216 Label non_smi, slow; |
| 1217 GenerateSmiCodeBitNot(masm, &non_smi); |
| 1218 __ Bind(&non_smi); |
| 1219 GenerateHeapNumberCodeBitNot(masm, &slow); |
| 1220 __ Bind(&slow); |
| 1221 GenerateTypeTransition(masm); |
| 1222 } |
| 1223 |
| 1224 |
| 1225 void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, |
| 1226 Label* slow) { |
| 1227 Register heap_num = x0; |
| 1228 Register heap_num_map = x1; |
| 1229 |
| 1230 __ LoadRoot(heap_num_map, Heap::kHeapNumberMapRootIndex); |
| 1231 __ JumpIfNotHeapNumber(heap_num, slow, heap_num_map); |
| 1232 |
| 1233 if (mode_ == UNARY_OVERWRITE) { |
| 1234 Register exponent = w2; |
| 1235 |
| 1236 // Flip the sign bit of the existing heap number. |
| 1237 __ Ldr(exponent, FieldMemOperand(heap_num, HeapNumber::kExponentOffset)); |
| 1238 __ Eor(exponent, exponent, HeapNumber::kSignMask); |
| 1239 __ Str(exponent, FieldMemOperand(heap_num, HeapNumber::kExponentOffset)); |
| 1240 } else { |
| 1241 Register allocated_num = x0; |
| 1242 Register double_bits = x2; |
| 1243 Register heap_num_orig = x3; |
| 1244 |
| 1245 __ Mov(heap_num_orig, heap_num); |
| 1246 |
| 1247 // Create a new heap number. |
| 1248 Label slow_allocate_heapnumber, heapnumber_allocated; |
| 1249 __ AllocateHeapNumber(allocated_num, &slow_allocate_heapnumber, x6, x7, |
| 1250 heap_num_map); |
| 1251 __ B(&heapnumber_allocated); |
| 1252 |
| 1253 __ Bind(&slow_allocate_heapnumber); |
| 1254 { |
| 1255 FrameScope scope(masm, StackFrame::INTERNAL); |
| 1256 __ Push(heap_num_orig); |
| 1257 __ CallRuntime(Runtime::kNumberAlloc, 0); |
| 1258 __ Pop(heap_num_orig); |
| 1259 // allocated_num is x0, so contains the result of the runtime allocation. |
| 1260 } |
| 1261 |
| 1262 __ Bind(&heapnumber_allocated); |
| 1263 // Load the original heap number as a double precision float, and flip the |
| 1264 // sign bit. |
| 1265 STATIC_ASSERT(HeapNumber::kExponentOffset == |
| 1266 (HeapNumber::kMantissaOffset + 4)); |
| 1267 __ Ldr(double_bits, FieldMemOperand(heap_num_orig, |
| 1268 HeapNumber::kMantissaOffset)); |
| 1269 __ Eor(double_bits, double_bits, Double::kSignMask); |
| 1270 |
| 1271 // Store the negated double to the newly allocated heap number. |
| 1272 __ Str(double_bits, FieldMemOperand(allocated_num, |
| 1273 HeapNumber::kValueOffset)); |
| 1274 } |
| 1275 __ Ret(); |
| 1276 } |
| 1277 |
| 1278 |
| 1279 void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm, |
| 1280 Label* slow) { |
| 1281 Register heap_num = x0; |
| 1282 Register smi_num = x0; |
| 1283 |
| 1284 __ JumpIfNotHeapNumber(heap_num, slow); |
| 1285 |
| 1286 // Convert the heap number to a smi. |
| 1287 __ HeapNumberECMA262ToInt32(smi_num, heap_num, x6, x7, d0, |
| 1288 MacroAssembler::SMI); |
| 1289 |
| 1290 // Eor the top 32 bits with 0xffffffff to invert. |
| 1291 __ Eor(x0, smi_num, 0xffffffff00000000UL); |
| 1292 __ Ret(); |
| 1293 } |
| 1294 |
| 1295 |
| 1296 void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) { |
| 1297 switch (op_) { |
| 1298 case Token::SUB: { |
| 1299 Label non_smi, slow; |
| 1300 GenerateSmiCodeSub(masm, &non_smi, &slow); |
| 1301 __ Bind(&non_smi); |
| 1302 GenerateHeapNumberCodeSub(masm, &slow); |
| 1303 __ Bind(&slow); |
| 1304 __ Push(x0); |
| 1305 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION); |
| 1306 break; |
| 1307 } |
| 1308 case Token::BIT_NOT: { |
| 1309 Label non_smi, slow; |
| 1310 GenerateSmiCodeBitNot(masm, &non_smi); |
| 1311 __ Bind(&non_smi); |
| 1312 GenerateHeapNumberCodeBitNot(masm, &slow); |
| 1313 __ Bind(&slow); |
| 1314 __ Push(x0); |
| 1315 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); |
| 1316 break; |
| 1317 } |
| 1318 default: |
| 1319 UNREACHABLE(); |
| 1320 } |
| 1321 } |
| 1322 |
| 1323 |
| 1324 void BinaryOpStub::Initialize() { |
| 1325 // Nothing to do here. |
| 1326 } |
| 1327 |
| 1328 |
| 1329 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
| 1330 ASM_LOCATION("BinaryOpStub::GenerateTypeTransition"); |
| 1331 Label get_result; |
| 1332 |
| 1333 __ Mov(x12, Operand(Smi::FromInt(MinorKey()))); |
| 1334 __ Push(x1, x0, x12); |
| 1335 |
| 1336 __ TailCallExternalReference( |
| 1337 ExternalReference(IC_Utility(IC::kBinaryOp_Patch), masm->isolate()), |
| 1338 3, |
| 1339 1); |
| 1340 } |
| 1341 |
| 1342 |
| 1343 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( |
| 1344 MacroAssembler* masm) { |
| 1345 UNIMPLEMENTED(); |
| 1346 } |
| 1347 |
| 1348 |
| 1349 void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, |
| 1350 Token::Value op) { |
| 1351 ASM_LOCATION("BinaryOpStub_GenerateSmiSmiOperation"); |
| 1352 Register left = x1; |
| 1353 Register right = x0; |
| 1354 Register scratch1 = x10; |
| 1355 Register scratch2 = x11; |
| 1356 // Note that 'result' aliases 'right'. The code below must care not to |
| 1357 // overwrite 'right' before it is certain it won't be needed. |
| 1358 Register result = x0; |
| 1359 |
| 1360 // Adapt the code below if that does not hold. |
| 1361 STATIC_ASSERT(kSmiTag == 0); |
| 1362 STATIC_ASSERT(kSmiShift == 32); |
| 1363 |
| 1364 // TODO(alexandre): The code below mostly uses 64-bits operations, knowing |
| 1365 // that the input are Smis. |
| 1366 // Use of 32-bits instructions should be investigated. For example maybe speed |
| 1367 // or power consumption could be improved. |
| 1368 |
| 1369 Label overflow, not_smi_result; |
| 1370 switch (op) { |
| 1371 case Token::ADD: |
| 1372 __ Adds(result, left, right); // Add optimistically. |
| 1373 __ B(vs, &overflow); |
| 1374 __ Ret(); |
| 1375 __ Bind(&overflow); |
| 1376 // Revert optimistic add. |
| 1377 __ Sub(right, result, left); |
| 1378 break; |
| 1379 |
| 1380 case Token::SUB: |
| 1381 // Subtract optimistically. |
| 1382 __ Subs(result, left, right); |
| 1383 __ B(vs, &overflow); |
| 1384 __ Ret(); |
| 1385 __ Bind(&overflow); |
| 1386 // Revert optimistic subtract. |
| 1387 __ Sub(right, left, result); |
| 1388 break; |
| 1389 |
| 1390 case Token::MUL: { |
| 1391 Label not_minus_zero; |
| 1392 |
| 1393 // Use smulh to avoid shifting right the inputs. |
| 1394 // scratch1 = bits<127:64> of left * right. |
| 1395 __ Smulh(scratch1, left, right); |
| 1396 |
| 1397 // Check if the result is a Smi. |
| 1398 __ Cbnz(scratch1, ¬_minus_zero); |
| 1399 |
| 1400 // Check for minus zero. |
| 1401 // Exclusive or the arguments and check the sign bit of the result. |
| 1402 __ Eor(scratch2, left, right); |
| 1403 __ Tbnz(scratch2, kXSignBit, ¬_smi_result); |
| 1404 |
| 1405 // At this point, the result is zero, which needs no smi conversion. |
| 1406 STATIC_ASSERT(kSmiTag == 0); |
| 1407 __ Mov(result, scratch1); |
| 1408 __ Ret(); |
| 1409 |
| 1410 __ Bind(¬_minus_zero); |
| 1411 // Check if the result is a signed 32 bits. |
| 1412 // It is if bits 63-31 are sign bits. |
| 1413 __ Cls(scratch2, scratch1); |
| 1414 __ Cmp(scratch2, kXRegSize - kSmiShift); |
| 1415 __ B(lt, ¬_smi_result); |
| 1416 |
| 1417 // Tag the result. |
| 1418 __ SmiTag(result, scratch1); |
| 1419 __ Ret(); |
| 1420 break; |
| 1421 } |
| 1422 |
| 1423 case Token::DIV: { |
| 1424 // Check for division by zero. |
| 1425 __ Cbz(right, ¬_smi_result); |
| 1426 // Try integer division. |
| 1427 // If the remainder is not zero jump the result is not a Smi. |
| 1428 __ Sdiv(scratch1, left, right); |
| 1429 // scratch2 = quotient * right. |
| 1430 __ Mul(scratch2, scratch1, right); |
| 1431 __ Cmp(scratch2, left); |
| 1432 __ B(ne, ¬_smi_result); |
| 1433 // Check for -0 (result is zero and right is negative). |
| 1434 Label not_minus_zero; |
| 1435 __ Cbnz(scratch1, ¬_minus_zero); |
| 1436 __ Tbnz(right, kXSignBit, ¬_smi_result); |
| 1437 __ Bind(¬_minus_zero); |
| 1438 // Check for minus_int / -1. |
| 1439 __ Eor(scratch2, scratch1, 1L << 31); |
| 1440 __ Cbz(scratch2, ¬_smi_result); |
| 1441 // Tag the result and return. |
| 1442 __ SmiTag(result, scratch1); |
| 1443 __ Ret(); |
| 1444 break; |
| 1445 } |
| 1446 |
| 1447 case Token::MOD: { |
| 1448 Label not_minus_zero; |
| 1449 // Check for division by zero. |
| 1450 __ Cbz(right, ¬_smi_result); |
| 1451 // Compute: |
| 1452 // modulo = left - quotient * right |
| 1453 __ Sdiv(scratch1, left, right); |
| 1454 __ Msub(scratch1, scratch1, right, left); |
| 1455 __ Cbnz(scratch1, ¬_minus_zero); |
| 1456 // Check if the result should be minus zero. |
| 1457 __ Tbnz(left, kXSignBit, ¬_smi_result); |
| 1458 __ Bind(¬_minus_zero); |
| 1459 __ Mov(result, scratch1); |
| 1460 __ Ret(); |
| 1461 break; |
| 1462 } |
| 1463 |
| 1464 case Token::BIT_OR: |
| 1465 __ Orr(result, left, right); |
| 1466 __ Ret(); |
| 1467 break; |
| 1468 |
| 1469 case Token::BIT_AND: |
| 1470 __ And(result, left, right); |
| 1471 __ Ret(); |
| 1472 break; |
| 1473 |
| 1474 case Token::BIT_XOR: |
| 1475 __ Eor(result, left, right); |
| 1476 __ Ret(); |
| 1477 break; |
| 1478 |
| 1479 // For shift operations, only the 5 least significant bits of the rhs |
| 1480 // are used (see ECMA-262 11.7.1 and following). |
| 1481 // We would like to use the implicit masking operation performed by the |
| 1482 // shift instructions, but that would require using W registers and thus |
| 1483 // untagging. |
| 1484 case Token::SAR: |
| 1485 __ Ubfx(right, right, kSmiShift, 5); |
| 1486 __ Asr(result, left, right); |
| 1487 __ Bic(result, result, kSmiShiftMask); |
| 1488 __ Ret(); |
| 1489 break; |
| 1490 |
| 1491 case Token::SHR: { |
| 1492 __ Ubfx(scratch1, right, kSmiShift, 5); |
| 1493 // SHR must not yield a negative value. This can only happen if left is |
| 1494 // negative and we shift right by zero. |
| 1495 Label right_not_zero; |
| 1496 __ Cbnz(scratch1, &right_not_zero); |
| 1497 __ Tbnz(left, kXSignBit, ¬_smi_result); |
| 1498 __ Bind(&right_not_zero); |
| 1499 __ Lsr(result, left, scratch1); |
| 1500 __ Bic(result, result, kSmiShiftMask); |
| 1501 __ Ret(); |
| 1502 break; |
| 1503 } |
| 1504 |
| 1505 case Token::SHL: |
| 1506 __ Ubfx(scratch1, right, kSmiShift, 5); |
| 1507 __ Lsl(result, left, scratch1); |
| 1508 __ Ret(); |
| 1509 break; |
| 1510 |
| 1511 default: |
| 1512 UNREACHABLE(); |
| 1513 } |
| 1514 |
| 1515 __ Bind(¬_smi_result); |
| 1516 } |
| 1517 |
| 1518 |
| 1519 void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, |
| 1520 Register result, |
| 1521 Register heap_number_map, |
| 1522 Register scratch1, |
| 1523 Register scratch2, |
| 1524 Label* gc_required, |
| 1525 OverwriteMode mode); |
| 1526 |
| 1527 |
| 1528 void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, |
| 1529 BinaryOpIC::TypeInfo left_type, |
| 1530 BinaryOpIC::TypeInfo right_type, |
| 1531 bool smi_operands, |
| 1532 Label* not_numbers, |
| 1533 Label* gc_required, |
| 1534 Label* miss, |
| 1535 Token::Value op, |
| 1536 OverwriteMode mode) { |
| 1537 ASM_LOCATION("BinaryOpStub_GenerateFPOperation"); |
| 1538 |
| 1539 Register result = x0; |
| 1540 FPRegister result_d = d0; |
| 1541 Register right = x0; |
| 1542 Register left = x1; |
| 1543 Register heap_result = x3; |
| 1544 |
| 1545 ASSERT(smi_operands || (not_numbers != NULL)); |
| 1546 if (smi_operands) { |
| 1547 __ AssertSmi(left); |
| 1548 __ AssertSmi(right); |
| 1549 } |
| 1550 if (left_type == BinaryOpIC::SMI) { |
| 1551 __ JumpIfNotSmi(left, miss); |
| 1552 } |
| 1553 if (right_type == BinaryOpIC::SMI) { |
| 1554 __ JumpIfNotSmi(right, miss); |
| 1555 } |
| 1556 |
| 1557 Register heap_number_map = x2; |
| 1558 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 1559 |
| 1560 switch (op) { |
| 1561 case Token::ADD: |
| 1562 case Token::SUB: |
| 1563 case Token::MUL: |
| 1564 case Token::DIV: |
| 1565 case Token::MOD: { |
| 1566 FPRegister right_d = d0; |
| 1567 FPRegister left_d = d1; |
| 1568 Label do_operation; |
| 1569 |
| 1570 __ SmiUntagToDouble(left_d, left, kSpeculativeUntag); |
| 1571 __ SmiUntagToDouble(right_d, right, kSpeculativeUntag); |
| 1572 |
| 1573 if (!smi_operands) { |
| 1574 if (left_type != BinaryOpIC::SMI) { |
| 1575 Label left_done; |
| 1576 Label* left_not_heap = |
| 1577 (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers; |
| 1578 __ JumpIfSmi(left, &left_done); |
| 1579 |
| 1580 // Left not smi: load if heap number. |
| 1581 __ JumpIfNotHeapNumber(left, left_not_heap, heap_number_map); |
| 1582 __ Ldr(left_d, FieldMemOperand(left, HeapNumber::kValueOffset)); |
| 1583 __ Bind(&left_done); |
| 1584 } |
| 1585 |
| 1586 if (right_type != BinaryOpIC::SMI) { |
| 1587 Label* right_not_heap = |
| 1588 (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers; |
| 1589 __ JumpIfSmi(right, &do_operation); |
| 1590 |
| 1591 // Right not smi: load if heap number. |
| 1592 __ JumpIfNotHeapNumber(right, right_not_heap, heap_number_map); |
| 1593 __ Ldr(right_d, FieldMemOperand(right, HeapNumber::kValueOffset)); |
| 1594 } |
| 1595 } |
| 1596 |
| 1597 // Left and right are doubles in left_d and right_d. Calculate the result. |
| 1598 __ Bind(&do_operation); |
| 1599 switch (op) { |
| 1600 case Token::ADD: __ Fadd(result_d, left_d, right_d); break; |
| 1601 case Token::SUB: __ Fsub(result_d, left_d, right_d); break; |
| 1602 case Token::MUL: __ Fmul(result_d, left_d, right_d); break; |
| 1603 case Token::DIV: __ Fdiv(result_d, left_d, right_d); break; |
| 1604 case Token::MOD: |
| 1605 ASM_UNIMPLEMENTED("Implement HeapNumber modulo"); |
| 1606 __ B(miss); |
| 1607 break; |
| 1608 default: UNREACHABLE(); |
| 1609 } |
| 1610 |
| 1611 BinaryOpStub_GenerateHeapResultAllocation( |
| 1612 masm, heap_result, heap_number_map, x10, x11, gc_required, mode); |
| 1613 |
| 1614 __ Str(result_d, FieldMemOperand(heap_result, HeapNumber::kValueOffset)); |
| 1615 __ Mov(result, heap_result); |
| 1616 __ Ret(); |
| 1617 break; |
| 1618 } |
| 1619 |
| 1620 case Token::BIT_OR: |
| 1621 case Token::BIT_XOR: |
| 1622 case Token::BIT_AND: |
| 1623 case Token::SAR: |
| 1624 case Token::SHR: |
| 1625 case Token::SHL: { |
| 1626 Label do_operation, result_not_smi; |
| 1627 |
| 1628 if (!smi_operands) { |
| 1629 Label left_is_smi; |
| 1630 // Convert heap number operands to smis. |
| 1631 if (left_type != BinaryOpIC::SMI) { |
| 1632 __ JumpIfSmi(left, &left_is_smi); |
| 1633 __ JumpIfNotHeapNumber(left, not_numbers, heap_number_map); |
| 1634 __ HeapNumberECMA262ToInt32(left, left, x10, x11, d0, |
| 1635 MacroAssembler::SMI); |
| 1636 } |
| 1637 __ Bind(&left_is_smi); |
| 1638 if (right_type != BinaryOpIC::SMI) { |
| 1639 __ JumpIfSmi(right, &do_operation); |
| 1640 __ JumpIfNotHeapNumber(right, not_numbers, heap_number_map); |
| 1641 __ HeapNumberECMA262ToInt32(right, right, x10, x11, d0, |
| 1642 MacroAssembler::SMI); |
| 1643 } |
| 1644 } |
| 1645 |
| 1646 // Left and right are smis. Calculate the result. |
| 1647 __ Bind(&do_operation); |
| 1648 switch (op) { |
| 1649 case Token::BIT_OR: __ Orr(result, left, right); break; |
| 1650 case Token::BIT_XOR: __ Eor(result, left, right); break; |
| 1651 case Token::BIT_AND: __ And(result, left, right); break; |
| 1652 |
| 1653 // For shift operations, only the 5 least significant bits of the rhs |
| 1654 // are used (see ECMA-262 11.7.1 and following). |
| 1655 // We would like to use the implicit masking operation performed by the |
| 1656 // shift instructions, but that would require using W registers and thus |
| 1657 // untagging. |
| 1658 case Token::SAR: |
| 1659 __ Ubfx(right, right, kSmiShift, 5); |
| 1660 __ Asr(result, left, right); |
| 1661 // Clear bits shifted right. |
| 1662 __ Bic(result, result, kSmiShiftMask); |
| 1663 break; |
| 1664 case Token::SHL: |
| 1665 __ Ubfx(right, right, kSmiShift, 5); |
| 1666 __ Lsl(result, left, right); |
| 1667 break; |
| 1668 case Token::SHR: { |
| 1669 Label ok; |
| 1670 // SHR must always yield a positive result. |
| 1671 // This is a problem if right is zero and left is negative. |
| 1672 __ Ubfx(right, right, kSmiShift, 5); |
| 1673 __ Cbnz(right, &ok); |
| 1674 __ Cmp(left, 0); |
| 1675 __ B(mi, &result_not_smi); |
| 1676 __ Bind(&ok); |
| 1677 __ Lsr(result, left, right); |
| 1678 // Clear bits shifted right. |
| 1679 __ Bic(result, result, kSmiShiftMask); |
| 1680 break; |
| 1681 } |
| 1682 default: UNREACHABLE(); |
| 1683 } |
| 1684 __ Ret(); |
| 1685 |
| 1686 __ Bind(&result_not_smi); |
| 1687 // We know the operation was shift right, the left operand is negative, |
| 1688 // and the right is zero. The result will be the left operand cast to a |
| 1689 // positive value, as a heap number. |
| 1690 __ Ucvtf(result_d, left, kSmiShift); |
| 1691 if (smi_operands) { |
| 1692 __ AllocateHeapNumber(heap_result, gc_required, x10, x11, |
| 1693 heap_number_map); |
| 1694 } else { |
| 1695 BinaryOpStub_GenerateHeapResultAllocation(masm, heap_result, |
| 1696 heap_number_map, x10, x11, |
| 1697 gc_required, mode); |
| 1698 } |
| 1699 |
| 1700 // Nothing can go wrong now, so move the heap number to the result |
| 1701 // register. |
| 1702 __ Mov(result, heap_result); |
| 1703 |
| 1704 // Now store the double result into the allocated heap number, and return. |
| 1705 __ Str(result_d, FieldMemOperand(result, HeapNumber::kValueOffset)); |
| 1706 __ Ret(); |
| 1707 break; |
| 1708 } |
| 1709 default: |
| 1710 UNREACHABLE(); |
| 1711 } |
| 1712 } |
| 1713 |
| 1714 |
| 1715 // Generate the smi code. If the operation on smis are successful this return is |
| 1716 // generated. If the result is not a smi and heap number allocation is not |
| 1717 // requested the code falls through. If number allocation is requested but a |
| 1718 // heap number cannot be allocated the code jumps to the label gc_required. |
| 1719 void BinaryOpStub_GenerateSmiCode( |
| 1720 MacroAssembler* masm, |
| 1721 Label* use_runtime, |
| 1722 Label* gc_required, |
| 1723 Token::Value op, |
| 1724 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, |
| 1725 OverwriteMode mode) { |
| 1726 ASM_LOCATION("BinaryOpStub_GenerateSmiCode"); |
| 1727 Label not_smis; |
| 1728 |
| 1729 Register left = x1; |
| 1730 Register right = x0; |
| 1731 |
| 1732 // Perform combined smi check on both operands. |
| 1733 __ JumpIfEitherNotSmi(left, right, ¬_smis); |
| 1734 |
| 1735 // If the smi-smi operation results in a smi, the result is returned from the |
| 1736 // code generated for the operation. Otherwise, execution falls through to |
| 1737 // the following code. |
| 1738 BinaryOpStub_GenerateSmiSmiOperation(masm, op); |
| 1739 |
| 1740 // If heap number results are allowed, generate the result in an allocated |
| 1741 // heap number. |
| 1742 if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) { |
| 1743 BinaryOpStub_GenerateFPOperation(masm, BinaryOpIC::UNINITIALIZED, |
| 1744 BinaryOpIC::UNINITIALIZED, true, |
| 1745 use_runtime, gc_required, ¬_smis, op, |
| 1746 mode); |
| 1747 } |
| 1748 |
| 1749 __ Bind(¬_smis); |
| 1750 } |
| 1751 |
| 1752 |
| 1753 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
| 1754 ASM_LOCATION("BinaryOpStub::GenerateSmiStub"); |
| 1755 Label right_arg_changed, call_runtime; |
| 1756 |
| 1757 if ((op_ == Token::MOD) && has_fixed_right_arg_) { |
| 1758 // It is guaranteed that the value will fit into a Smi, because if it |
| 1759 // didn't, we wouldn't be here, see BinaryOp_Patch. |
| 1760 __ CompareAndBranch(x0, Operand(Smi::FromInt(fixed_right_arg_value())), ne, |
| 1761 &right_arg_changed); |
| 1762 } |
| 1763 |
| 1764 #ifdef DEBUG |
| 1765 Register saved_left = x18; |
| 1766 Register saved_right = x19; |
| 1767 if (masm->emit_debug_code()) { |
| 1768 __ Mov(saved_left, x1); |
| 1769 __ Mov(saved_right, x0); |
| 1770 } |
| 1771 #endif |
| 1772 |
| 1773 if (result_type_ == BinaryOpIC::UNINITIALIZED || |
| 1774 result_type_ == BinaryOpIC::SMI) { |
| 1775 // Only allow smi results. No allocation should take place, so we don't need |
| 1776 // a label for gc. |
| 1777 BinaryOpStub_GenerateSmiCode(masm, &call_runtime, NULL, op_, |
| 1778 NO_HEAPNUMBER_RESULTS, mode_); |
| 1779 } else { |
| 1780 // Allow heap number result and don't make a transition if a heap number |
| 1781 // cannot be allocated. |
| 1782 BinaryOpStub_GenerateSmiCode(masm, &call_runtime, &call_runtime, op_, |
| 1783 ALLOW_HEAPNUMBER_RESULTS, mode_); |
| 1784 } |
| 1785 |
| 1786 // Code falls through if the result is not returned as either a smi or heap |
| 1787 // number. |
| 1788 __ Bind(&right_arg_changed); |
| 1789 GenerateTypeTransition(masm); |
| 1790 |
| 1791 __ Bind(&call_runtime); |
| 1792 #ifdef DEBUG |
| 1793 if (masm->emit_debug_code()) { |
| 1794 __ Cmp(saved_left, x1); |
| 1795 __ Assert(eq, "lhs has been clobbered."); |
| 1796 __ Cmp(saved_right, x0); |
| 1797 __ Assert(eq, "lhs has been clobbered."); |
| 1798 } |
| 1799 #endif |
| 1800 { |
| 1801 FrameScope scope(masm, StackFrame::INTERNAL); |
| 1802 GenerateRegisterArgsPush(masm); |
| 1803 GenerateCallRuntime(masm); |
| 1804 } |
| 1805 __ Ret(); |
| 1806 } |
| 1807 |
| 1808 |
| 1809 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { |
| 1810 ASM_LOCATION("BinaryOpStub::GenerateBothStringStub"); |
| 1811 ASSERT((left_type_ == BinaryOpIC::STRING) && |
| 1812 (right_type_ == BinaryOpIC::STRING)); |
| 1813 ASSERT(op_ == Token::ADD); |
| 1814 Label call_transition; |
| 1815 |
| 1816 // If both arguments are strings, call the string add stub. Otherwise, do a |
| 1817 // transition. |
| 1818 |
| 1819 Register left = x1; |
| 1820 Register right = x0; |
| 1821 |
| 1822 // Test if left operand is a smi or string. |
| 1823 __ JumpIfSmi(left, &call_transition); |
| 1824 __ JumpIfObjectType(left, x2, x2, FIRST_NONSTRING_TYPE, &call_transition, ge); |
| 1825 |
| 1826 // Test if right operand is a smi or string. |
| 1827 __ JumpIfSmi(right, &call_transition); |
| 1828 __ JumpIfObjectType(right, x2, x2, FIRST_NONSTRING_TYPE, &call_transition, |
| 1829 ge); |
| 1830 |
| 1831 StringAddStub string_add_stub( |
| 1832 static_cast<StringAddFlags>(ERECT_FRAME | NO_STRING_CHECK_IN_STUB)); |
| 1833 GenerateRegisterArgsPush(masm); |
| 1834 __ TailCallStub(&string_add_stub); |
| 1835 |
| 1836 __ Bind(&call_transition); |
| 1837 GenerateTypeTransition(masm); |
| 1838 } |
| 1839 |
| 1840 |
| 1841 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
| 1842 // On a64 the smis are 32 bits, so we should never see the INT32 type. |
| 1843 UNREACHABLE(); |
| 1844 } |
| 1845 |
| 1846 |
| 1847 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { |
| 1848 ASM_LOCATION("BinaryOpStub::GenerateOddballStub"); |
| 1849 Register right = x0; |
| 1850 Register left = x1; |
| 1851 |
| 1852 if (op_ == Token::ADD) { |
| 1853 // Handle string addition here, because it is the only operation that does |
| 1854 // not do a ToNumber conversion on the operands. |
| 1855 GenerateAddStrings(masm); |
| 1856 } |
| 1857 |
| 1858 // Convert oddball arguments to numbers. |
| 1859 Label check, done; |
| 1860 __ JumpIfNotRoot(left, Heap::kUndefinedValueRootIndex, &check); |
| 1861 if (Token::IsBitOp(op_)) { |
| 1862 __ Mov(left, 0); |
| 1863 } else { |
| 1864 __ LoadRoot(left, Heap::kNanValueRootIndex); |
| 1865 } |
| 1866 __ B(&done); |
| 1867 |
| 1868 __ Bind(&check); |
| 1869 __ JumpIfNotRoot(right, Heap::kUndefinedValueRootIndex, &done); |
| 1870 if (Token::IsBitOp(op_)) { |
| 1871 __ Mov(right, 0); |
| 1872 } else { |
| 1873 __ LoadRoot(right, Heap::kNanValueRootIndex); |
| 1874 } |
| 1875 |
| 1876 __ Bind(&done); |
| 1877 |
| 1878 GenerateNumberStub(masm); |
| 1879 } |
| 1880 |
| 1881 |
| 1882 void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { |
| 1883 ASM_LOCATION("BinaryOpStub::GenerateNumberStub"); |
| 1884 Label call_runtime, transition; |
| 1885 |
| 1886 BinaryOpStub_GenerateFPOperation(masm, left_type_, right_type_, false, |
| 1887 &transition, &call_runtime, &transition, |
| 1888 op_, mode_); |
| 1889 |
| 1890 __ Bind(&transition); |
| 1891 GenerateTypeTransition(masm); |
| 1892 |
| 1893 __ Bind(&call_runtime); |
| 1894 { |
| 1895 FrameScope scope(masm, StackFrame::INTERNAL); |
| 1896 GenerateRegisterArgsPush(masm); |
| 1897 GenerateCallRuntime(masm); |
| 1898 } |
| 1899 __ Ret(); |
| 1900 } |
| 1901 |
| 1902 |
| 1903 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
| 1904 ASM_LOCATION("BinaryOpStub::GenerateGeneric"); |
| 1905 Label call_runtime, call_string_add_or_runtime, transition; |
| 1906 |
| 1907 BinaryOpStub_GenerateSmiCode(masm, &call_runtime, &call_runtime, op_, |
| 1908 ALLOW_HEAPNUMBER_RESULTS, mode_); |
| 1909 |
| 1910 BinaryOpStub_GenerateFPOperation(masm, left_type_, right_type_, false, |
| 1911 &call_string_add_or_runtime, &call_runtime, |
| 1912 &transition, op_, mode_); |
| 1913 |
| 1914 __ Bind(&transition); |
| 1915 GenerateTypeTransition(masm); |
| 1916 |
| 1917 __ Bind(&call_string_add_or_runtime); |
| 1918 if (op_ == Token::ADD) { |
| 1919 GenerateAddStrings(masm); |
| 1920 } |
| 1921 |
| 1922 __ Bind(&call_runtime); |
| 1923 { |
| 1924 FrameScope scope(masm, StackFrame::INTERNAL); |
| 1925 GenerateRegisterArgsPush(masm); |
| 1926 GenerateCallRuntime(masm); |
| 1927 } |
| 1928 __ Ret(); |
| 1929 } |
| 1930 |
| 1931 |
| 1932 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { |
| 1933 ASM_LOCATION("BinaryOpStub::GenerateAddStrings"); |
| 1934 ASSERT(op_ == Token::ADD); |
| 1935 Label left_not_string, call_runtime; |
| 1936 |
| 1937 Register left = x1; |
| 1938 Register right = x0; |
| 1939 |
| 1940 // Check if left argument is a string. |
| 1941 __ JumpIfSmi(left, &left_not_string); |
| 1942 __ JumpIfObjectType(left, x2, x2, FIRST_NONSTRING_TYPE, &left_not_string, ge); |
| 1943 |
| 1944 StringAddStub string_add_left_stub( |
| 1945 static_cast<StringAddFlags>(ERECT_FRAME | NO_STRING_CHECK_LEFT_IN_STUB)); |
| 1946 GenerateRegisterArgsPush(masm); |
| 1947 __ TailCallStub(&string_add_left_stub); |
| 1948 |
| 1949 // Left operand is not a string, test right. |
| 1950 __ Bind(&left_not_string); |
| 1951 __ JumpIfSmi(right, &call_runtime); |
| 1952 __ JumpIfObjectType(right, x2, x2, FIRST_NONSTRING_TYPE, &call_runtime, ge); |
| 1953 |
| 1954 StringAddStub string_add_right_stub( |
| 1955 static_cast<StringAddFlags>(ERECT_FRAME | NO_STRING_CHECK_RIGHT_IN_STUB)); |
| 1956 GenerateRegisterArgsPush(masm); |
| 1957 __ TailCallStub(&string_add_right_stub); |
| 1958 |
| 1959 // Neither argument is a string. |
| 1960 __ Bind(&call_runtime); |
| 1961 } |
| 1962 |
| 1963 |
| 1964 void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, |
| 1965 Register result, |
| 1966 Register heap_number_map, |
| 1967 Register scratch1, |
| 1968 Register scratch2, |
| 1969 Label* gc_required, |
| 1970 OverwriteMode mode) { |
| 1971 ASM_LOCATION("BinaryOpStub::GenerateHeapResultAllocation"); |
| 1972 ASSERT(!AreAliased(result, heap_number_map, scratch1, scratch2)); |
| 1973 |
| 1974 if ((mode == OVERWRITE_LEFT) || (mode == OVERWRITE_RIGHT)) { |
| 1975 Label skip_allocation, allocated; |
| 1976 Register overwritable_operand = (mode == OVERWRITE_LEFT) ? x1 : x0; |
| 1977 if (masm->emit_debug_code()) { |
| 1978 // Check that the overwritable operand is a Smi or a HeapNumber. |
| 1979 Label ok; |
| 1980 __ JumpIfSmi(overwritable_operand, &ok); |
| 1981 __ JumpIfHeapNumber(overwritable_operand, &ok); |
| 1982 __ Abort("The overwritable operand should be a HeapNumber"); |
| 1983 __ Bind(&ok); |
| 1984 } |
| 1985 // If the overwritable operand is already a HeapNumber, we can skip |
| 1986 // allocation of a heap number. |
| 1987 __ JumpIfNotSmi(overwritable_operand, &skip_allocation); |
| 1988 // Allocate a heap number for the result. |
| 1989 __ AllocateHeapNumber(result, gc_required, scratch1, scratch2, |
| 1990 heap_number_map); |
| 1991 __ B(&allocated); |
| 1992 __ Bind(&skip_allocation); |
| 1993 // Use object holding the overwritable operand for result. |
| 1994 __ Mov(result, overwritable_operand); |
| 1995 __ Bind(&allocated); |
| 1996 } else { |
| 1997 ASSERT(mode == NO_OVERWRITE); |
| 1998 __ AllocateHeapNumber(result, gc_required, scratch1, scratch2, |
| 1999 heap_number_map); |
| 2000 } |
| 2001 } |
| 2002 |
| 2003 |
| 2004 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { |
| 2005 __ Push(x1, x0); |
| 2006 } |
| 2007 |
| 2008 |
| 2009 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
| 2010 // Untagged case: |
| 2011 // Input: double in d0 |
| 2012 // Result: double in d0 |
| 2013 // |
| 2014 // Tagged case: |
| 2015 // Input: tagged value in jssp[0] |
| 2016 // Result: tagged value in x0 |
| 2017 |
| 2018 const bool tagged = (argument_type_ == TAGGED); |
| 2019 |
| 2020 Label calculate; |
| 2021 Label invalid_cache; |
| 2022 Register scratch0 = x10; |
| 2023 Register scratch1 = x11; |
| 2024 Register cache_entry = x12; |
| 2025 Register hash = x13; |
| 2026 Register hash_w = hash.W(); |
| 2027 Register input_double_bits = x14; |
| 2028 Register input_tagged = x15; |
| 2029 Register result_tagged = x0; |
| 2030 FPRegister result_double = d0; |
| 2031 FPRegister input_double = d0; |
| 2032 |
| 2033 // First, get the input as a double, in an integer register (so we can |
| 2034 // calculate a hash). |
| 2035 if (tagged) { |
| 2036 Label input_not_smi, loaded; |
| 2037 // Load argument and check if it is a smi. |
| 2038 __ Pop(input_tagged); |
| 2039 __ JumpIfNotSmi(input_tagged, &input_not_smi); |
| 2040 |
| 2041 // Input is a smi, so convert it to a double. |
| 2042 __ SmiUntagToDouble(input_double, input_tagged); |
| 2043 __ Fmov(input_double_bits, input_double); |
| 2044 __ B(&loaded); |
| 2045 |
| 2046 __ Bind(&input_not_smi); |
| 2047 // Check if input is a HeapNumber. |
| 2048 __ JumpIfNotHeapNumber(input_tagged, &calculate); |
| 2049 // The input is a HeapNumber. Load it into input_double_bits. |
| 2050 __ Ldr(input_double_bits, |
| 2051 FieldMemOperand(input_tagged, HeapNumber::kValueOffset)); |
| 2052 |
| 2053 __ Bind(&loaded); |
| 2054 } else { |
| 2055 // Get the integer representation of the double. |
| 2056 __ Fmov(input_double_bits, input_double); |
| 2057 } |
| 2058 |
| 2059 // Compute hash (the shifts are arithmetic): |
| 2060 // h = (input_double_bits[31:0] ^ input_double_bits[63:32]); |
| 2061 // h ^= h >> 16; |
| 2062 // h ^= h >> 8; |
| 2063 // h = h % cacheSize; |
| 2064 __ Eor(hash, input_double_bits, Operand(input_double_bits, LSR, 32)); |
| 2065 __ Eor(hash_w, hash_w, Operand(hash_w, ASR, 16)); |
| 2066 __ Eor(hash_w, hash_w, Operand(hash_w, ASR, 8)); |
| 2067 __ And(hash_w, hash_w, TranscendentalCache::SubCache::kCacheSize - 1); |
| 2068 STATIC_ASSERT(IS_POWER_OF_TWO(TranscendentalCache::SubCache::kCacheSize)); |
| 2069 |
| 2070 // d0 input_double Double input value (if UNTAGGED). |
| 2071 // x13(w13) hash(_w) TranscendentalCache::hash(input). |
| 2072 // x14 input_double_bits Input value as double bits. |
| 2073 // x15 input_tagged Tagged input value (if TAGGED). |
| 2074 Isolate* isolate = masm->isolate(); |
| 2075 ExternalReference cache_array = |
| 2076 ExternalReference::transcendental_cache_array_address(isolate); |
| 2077 int cache_array_index = |
| 2078 type_ * sizeof(isolate->transcendental_cache()->caches_[0]); |
| 2079 |
| 2080 __ Mov(cache_entry, Operand(cache_array)); |
| 2081 __ Ldr(cache_entry, MemOperand(cache_entry, cache_array_index)); |
| 2082 |
| 2083 // x12 cache_entry The address of the cache for type_. |
| 2084 // If NULL, the cache hasn't been initialized yet, so go through runtime. |
| 2085 __ Cbz(cache_entry, &invalid_cache); |
| 2086 |
| 2087 #ifdef DEBUG |
| 2088 // Check that the layout of cache elements match expectations. |
| 2089 { TranscendentalCache::SubCache::Element test_elem[2]; |
| 2090 uintptr_t elem_start = reinterpret_cast<uintptr_t>(&test_elem[0]); |
| 2091 uintptr_t elem2_start = reinterpret_cast<uintptr_t>(&test_elem[1]); |
| 2092 uintptr_t elem_in0 = reinterpret_cast<uintptr_t>(&(test_elem[0].in[0])); |
| 2093 uintptr_t elem_in1 = reinterpret_cast<uintptr_t>(&(test_elem[0].in[1])); |
| 2094 uintptr_t elem_out = reinterpret_cast<uintptr_t>(&(test_elem[0].output)); |
| 2095 CHECK_EQ(16, elem2_start - elem_start); // Two uint_32s and a pointer. |
| 2096 CHECK_EQ(0, elem_in0 - elem_start); |
| 2097 CHECK_EQ(kIntSize, elem_in1 - elem_start); |
| 2098 CHECK_EQ(2 * kIntSize, elem_out - elem_start); |
| 2099 } |
| 2100 #endif |
| 2101 |
| 2102 // The (candidate) cached element is at cache[hash*16]. |
| 2103 __ Add(cache_entry, cache_entry, Operand(hash, LSL, 4)); |
| 2104 __ Ldp(scratch0, result_tagged, MemOperand(cache_entry)); |
| 2105 __ Cmp(scratch0, input_double_bits); |
| 2106 __ B(&calculate, ne); |
| 2107 |
| 2108 // Cache hit: Load the result and return. |
| 2109 |
| 2110 __ IncrementCounter(isolate->counters()->transcendental_cache_hit(), 1, |
| 2111 scratch0, scratch1); |
| 2112 if (!tagged) { |
| 2113 // result_tagged now already holds the tagged result from the cache, but we |
| 2114 // need to untag it for the untagged case. |
| 2115 __ Ldr(result_double, FieldMemOperand(result_tagged, |
| 2116 HeapNumber::kValueOffset)); |
| 2117 } |
| 2118 __ Ret(); |
| 2119 |
| 2120 // Cache miss: Calculate the result. |
| 2121 |
| 2122 __ Bind(&calculate); |
| 2123 __ IncrementCounter(isolate->counters()->transcendental_cache_miss(), 1, |
| 2124 scratch0, scratch1); |
| 2125 if (tagged) { |
| 2126 __ Bind(&invalid_cache); |
| 2127 __ Push(input_tagged); |
| 2128 ExternalReference runtime_function = ExternalReference(RuntimeFunction(), |
| 2129 masm->isolate()); |
| 2130 __ TailCallExternalReference(runtime_function, 1, 1); |
| 2131 } else { |
| 2132 Label gc_required; |
| 2133 Label calculation_and_gc_required; |
| 2134 |
| 2135 // Call a C function to calculate the result, then update the cache. |
| 2136 // The following caller-saved registers need to be preserved for the call: |
| 2137 // x12 cache_entry The address of the cache for type_. |
| 2138 // x14 input_double_bits The bit representation of the input. |
| 2139 // lr The return address of the stub. |
| 2140 __ Push(cache_entry, input_double_bits, lr); |
| 2141 ASSERT(input_double.Is(d0)); |
| 2142 { AllowExternalCallThatCantCauseGC scope(masm); |
| 2143 __ CallCFunction(CFunction(isolate), 0, 1); |
| 2144 } |
| 2145 ASSERT(result_double.Is(d0)); |
| 2146 __ Pop(lr, input_double_bits, cache_entry); |
| 2147 |
| 2148 // Try to update the cache. |
| 2149 __ AllocateHeapNumber(result_tagged, &gc_required, scratch0, scratch1); |
| 2150 __ Str(result_double, FieldMemOperand(result_tagged, |
| 2151 HeapNumber::kValueOffset)); |
| 2152 __ Stp(input_double_bits, result_tagged, MemOperand(cache_entry)); |
| 2153 __ Ret(); |
| 2154 |
| 2155 |
| 2156 __ Bind(&invalid_cache); |
| 2157 // Handle an invalid (uninitialized) cache by calling the runtime. |
| 2158 // d0 input_double Double input value (if UNTAGGED). |
| 2159 __ AllocateHeapNumber(result_tagged, &calculation_and_gc_required, |
| 2160 scratch0, scratch1); |
| 2161 __ Str(input_double, FieldMemOperand(result_tagged, |
| 2162 HeapNumber::kValueOffset)); |
| 2163 { FrameScope scope(masm, StackFrame::INTERNAL); |
| 2164 __ Push(result_tagged); |
| 2165 __ CallRuntime(RuntimeFunction(), 1); |
| 2166 } |
| 2167 __ Ldr(result_double, FieldMemOperand(result_tagged, |
| 2168 HeapNumber::kValueOffset)); |
| 2169 __ Ret(); |
| 2170 |
| 2171 |
| 2172 __ Bind(&calculation_and_gc_required); |
| 2173 // Call C function to calculate the result and answer directly without |
| 2174 // updating the cache. |
| 2175 ASSERT(input_double.Is(d0)); |
| 2176 { AllowExternalCallThatCantCauseGC scope(masm); |
| 2177 __ CallCFunction(CFunction(isolate), 0, 1); |
| 2178 } |
| 2179 ASSERT(result_double.Is(d0)); |
| 2180 |
| 2181 |
| 2182 // We got here because an allocation failed. Trigger a scavenging GC so that |
| 2183 // future allocations will succeed. |
| 2184 __ Bind(&gc_required); |
| 2185 __ Push(result_double); |
| 2186 { FrameScope scope(masm, StackFrame::INTERNAL); |
| 2187 // Allocate an aligned object larger than a HeapNumber. |
| 2188 int alloc_size = 2 * kPointerSize; |
| 2189 ASSERT(alloc_size >= HeapNumber::kSize); |
| 2190 __ Mov(scratch0, Operand(Smi::FromInt(alloc_size))); |
| 2191 __ Push(scratch0); |
| 2192 __ CallRuntime(Runtime::kAllocateInNewSpace, 1); |
| 2193 } |
| 2194 __ Pop(result_double); |
| 2195 __ Ret(); |
| 2196 } |
| 2197 } |
| 2198 |
| 2199 |
| 2200 ExternalReference TranscendentalCacheStub::CFunction(Isolate* isolate) { |
| 2201 switch (type_) { |
| 2202 // Add more cases when necessary. |
| 2203 default: |
| 2204 // There's no NULL ExternalReference, so fall into an existing case to |
| 2205 // avoid compiler warnings about not having a return value. |
| 2206 UNIMPLEMENTED(); |
| 2207 case TranscendentalCache::SIN: |
| 2208 return ExternalReference::math_sin_double_function(isolate); |
| 2209 case TranscendentalCache::COS: |
| 2210 return ExternalReference::math_cos_double_function(isolate); |
| 2211 case TranscendentalCache::TAN: |
| 2212 return ExternalReference::math_tan_double_function(isolate); |
| 2213 case TranscendentalCache::LOG: |
| 2214 return ExternalReference::math_log_double_function(isolate); |
| 2215 } |
| 2216 } |
| 2217 |
| 2218 |
| 2219 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { |
| 2220 switch (type_) { |
| 2221 // Add more cases when necessary. |
| 2222 case TranscendentalCache::SIN: return Runtime::kMath_sin; |
| 2223 case TranscendentalCache::COS: return Runtime::kMath_cos; |
| 2224 case TranscendentalCache::TAN: return Runtime::kMath_tan; |
| 2225 case TranscendentalCache::LOG: return Runtime::kMath_log; |
| 2226 default: |
| 2227 UNIMPLEMENTED(); |
| 2228 return Runtime::kAbort; |
| 2229 } |
| 2230 } |
| 2231 |
| 2232 |
| 2233 void StackCheckStub::Generate(MacroAssembler* masm) { |
| 2234 __ TailCallRuntime(Runtime::kStackGuard, 0, 1); |
| 2235 } |
| 2236 |
| 2237 |
| 2238 void InterruptStub::Generate(MacroAssembler* masm) { |
| 2239 __ TailCallRuntime(Runtime::kInterrupt, 0, 1); |
| 2240 } |
| 2241 |
| 2242 |
| 2243 void MathPowStub::Generate(MacroAssembler* masm) { |
| 2244 // Stack on entry: |
| 2245 // jssp[0]: Exponent (as a tagged value). |
| 2246 // jssp[1]: Base (as a tagged value). |
| 2247 // |
| 2248 // The (tagged) result will be returned in x0, as a heap number. |
| 2249 |
| 2250 Register result_tagged = x0; |
| 2251 Register base_tagged = x10; |
| 2252 Register exponent_tagged = x11; |
| 2253 Register exponent_integer = x12; |
| 2254 Register scratch1 = x14; |
| 2255 Register scratch0 = x15; |
| 2256 FPRegister result_double = d0; |
| 2257 FPRegister base_double = d1; |
| 2258 FPRegister exponent_double = d2; |
| 2259 FPRegister scratch1_double = d6; |
| 2260 FPRegister scratch0_double = d7; |
| 2261 |
| 2262 // A fast-path for integer exponents. |
| 2263 Label exponent_is_smi, exponent_is_integer; |
| 2264 // Bail out to runtime. |
| 2265 Label call_runtime; |
| 2266 // Allocate a heap number for the result, and return it. |
| 2267 Label done; |
| 2268 |
| 2269 // TODO(all): Cases other than ON_STACK are only used by Lithium, and we do |
| 2270 // not yet support them. |
| 2271 ASSERT(exponent_type_ == ON_STACK); |
| 2272 |
| 2273 // Unpack the inputs. |
| 2274 if (exponent_type_ == ON_STACK) { |
| 2275 Label base_is_smi; |
| 2276 Label unpack_exponent; |
| 2277 |
| 2278 __ Pop(exponent_tagged, base_tagged); |
| 2279 |
| 2280 __ JumpIfSmi(base_tagged, &base_is_smi); |
| 2281 __ JumpIfNotHeapNumber(base_tagged, &call_runtime); |
| 2282 // base_tagged is a heap number, so load its double value. |
| 2283 __ Ldr(base_double, FieldMemOperand(base_tagged, HeapNumber::kValueOffset)); |
| 2284 __ B(&unpack_exponent); |
| 2285 __ Bind(&base_is_smi); |
| 2286 // base_tagged is a SMI, so untag it and convert it to a double. |
| 2287 __ SmiUntagToDouble(base_double, base_tagged); |
| 2288 |
| 2289 __ Bind(&unpack_exponent); |
| 2290 // x10 base_tagged The tagged base (input). |
| 2291 // x11 exponent_tagged The tagged exponent (input). |
| 2292 // d1 base_double The base as a double. |
| 2293 __ JumpIfSmi(exponent_tagged, &exponent_is_smi); |
| 2294 __ JumpIfNotHeapNumber(exponent_tagged, &call_runtime); |
| 2295 // exponent_tagged is a heap number, so load its double value. |
| 2296 __ Ldr(exponent_double, |
| 2297 FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset)); |
| 2298 } else { |
| 2299 UNIMPLEMENTED_M("MathPowStub types other than ON_STACK are unimplemented."); |
| 2300 } |
| 2301 |
| 2302 // Handle double (heap number) exponents. |
| 2303 if (exponent_type_ != INTEGER) { |
| 2304 // Detect integer exponents stored as doubles and handle those in the |
| 2305 // integer fast-path. |
| 2306 __ TryConvertDoubleToInt64(exponent_integer, exponent_double, |
| 2307 scratch0_double, &exponent_is_integer); |
| 2308 |
| 2309 if (exponent_type_ == ON_STACK) { |
| 2310 FPRegister half_double = d3; |
| 2311 FPRegister minus_half_double = d4; |
| 2312 FPRegister zero_double = d5; |
| 2313 // Detect square root case. Crankshaft detects constant +/-0.5 at compile |
| 2314 // time and uses DoMathPowHalf instead. We then skip this check for |
| 2315 // non-constant cases of +/-0.5 as these hardly occur. |
| 2316 |
| 2317 __ Fmov(minus_half_double, -0.5); |
| 2318 __ Fmov(half_double, 0.5); |
| 2319 __ Fcmp(minus_half_double, exponent_double); |
| 2320 __ Fccmp(half_double, exponent_double, NZFlag, ne); |
| 2321 // Condition flags at this point: |
| 2322 // 0.5; nZCv // Identified by eq && pl |
| 2323 // -0.5: NZcv // Identified by eq && mi |
| 2324 // other: ?z?? // Identified by ne |
| 2325 __ B(ne, &call_runtime); |
| 2326 |
| 2327 // The exponent is 0.5 or -0.5. |
| 2328 |
| 2329 // Given that exponent is known to be either 0.5 or -0.5, the following |
| 2330 // special cases could apply (according to ECMA-262 15.8.2.13): |
| 2331 // |
| 2332 // base.isNaN(): The result is NaN. |
| 2333 // (base == +INFINITY) || (base == -INFINITY) |
| 2334 // exponent == 0.5: The result is +INFINITY. |
| 2335 // exponent == -0.5: The result is +0. |
| 2336 // (base == +0) || (base == -0) |
| 2337 // exponent == 0.5: The result is +0. |
| 2338 // exponent == -0.5: The result is +INFINITY. |
| 2339 // (base < 0) && base.isFinite(): The result is NaN. |
| 2340 // |
| 2341 // Fsqrt (and Fdiv for the -0.5 case) can handle all of those except |
| 2342 // where base is -INFINITY or -0. |
| 2343 |
| 2344 // Add +0 to base. This has no effect other than turning -0 into +0. |
| 2345 __ Fmov(zero_double, 0.0); |
| 2346 __ Fadd(base_double, base_double, zero_double); |
| 2347 // The operation -0+0 results in +0 in all cases except where the |
| 2348 // FPCR rounding mode is 'round towards minus infinity' (RM). The |
| 2349 // A64 simulator does not currently simulate FPCR (where the rounding |
| 2350 // mode is set), so test the operation with some debug code. |
| 2351 if (masm->emit_debug_code()) { |
| 2352 Register temp = masm->Tmp1(); |
| 2353 // d5 zero_double The value +0.0 as a double. |
| 2354 __ Fneg(scratch0_double, zero_double); |
| 2355 // Verify that we correctly generated +0.0 and -0.0. |
| 2356 // bits(+0.0) = 0x0000000000000000 |
| 2357 // bits(-0.0) = 0x8000000000000000 |
| 2358 __ Fmov(temp, zero_double); |
| 2359 __ CheckRegisterIsClear(temp, "Could not generate +0.0."); |
| 2360 __ Fmov(temp, scratch0_double); |
| 2361 __ Eor(temp, temp, kDSignMask); |
| 2362 __ CheckRegisterIsClear(temp, "Could not generate -0.0."); |
| 2363 // Check that -0.0 + 0.0 == +0.0. |
| 2364 __ Fadd(scratch0_double, scratch0_double, zero_double); |
| 2365 __ Fmov(temp, scratch0_double); |
| 2366 __ CheckRegisterIsClear(temp, "-0.0 + 0.0 did not produce +0.0."); |
| 2367 } |
| 2368 |
| 2369 // If base is -INFINITY, make it +INFINITY. |
| 2370 // * Calculate base - base: All infinities will become NaNs since both |
| 2371 // -INFINITY+INFINITY and +INFINITY-INFINITY are NaN in A64. |
| 2372 // * If the result is NaN, calculate abs(base). |
| 2373 __ Fsub(scratch0_double, base_double, base_double); |
| 2374 __ Fcmp(scratch0_double, 0.0); |
| 2375 __ Fabs(scratch1_double, base_double); |
| 2376 __ Fcsel(base_double, scratch1_double, base_double, vs); |
| 2377 |
| 2378 // Calculate the square root of base. |
| 2379 __ Fsqrt(result_double, base_double); |
| 2380 __ Fcmp(exponent_double, 0.0); |
| 2381 __ B(ge, &done); // Finish now for exponents of 0.5. |
| 2382 // Find the inverse for exponents of -0.5. |
| 2383 __ Fmov(scratch0_double, 1.0); |
| 2384 __ Fdiv(result_double, scratch0_double, result_double); |
| 2385 __ B(&done); |
| 2386 } else { |
| 2387 UNIMPLEMENTED_M( |
| 2388 "MathPowStub types other than ON_STACK are unimplemented."); |
| 2389 } |
| 2390 |
| 2391 // TODO(all): From here, call the C power function for non-ON_STACK types. |
| 2392 // ON_STACK types should not be able to reach this point. |
| 2393 ASM_UNIMPLEMENTED_BREAK( |
| 2394 "MathPowStub types other than ON_STACK are unimplemented."); |
| 2395 } else { |
| 2396 UNIMPLEMENTED_M("MathPowStub types other than ON_STACK are unimplemented."); |
| 2397 } |
| 2398 |
| 2399 // Handle integer (and SMI) exponents. |
| 2400 __ Bind(&exponent_is_smi); |
| 2401 // x10 base_tagged The tagged base (input). |
| 2402 // x11 exponent_tagged The tagged exponent (input). |
| 2403 // d1 base_double The base as a double. |
| 2404 __ SmiUntag(exponent_integer, exponent_tagged); |
| 2405 __ Bind(&exponent_is_integer); |
| 2406 // x10 base_tagged The tagged base (input). |
| 2407 // x11 exponent_tagged The tagged exponent (input). |
| 2408 // x12 exponent_integer The exponent as an integer. |
| 2409 // d1 base_double The base as a double. |
| 2410 |
| 2411 // Find abs(exponent). For negative exponents, we can find the inverse later. |
| 2412 Register exponent_abs = x13; |
| 2413 __ Cmp(exponent_integer, 0); |
| 2414 __ Cneg(exponent_abs, exponent_integer, mi); |
| 2415 // x13 exponent_abs The value of abs(exponent_integer). |
| 2416 |
| 2417 // Repeatedly multiply to calculate the power. |
| 2418 // result = 1.0; |
| 2419 // For each bit n (exponent_integer{n}) { |
| 2420 // if (exponent_integer{n}) { |
| 2421 // result *= base; |
| 2422 // } |
| 2423 // base *= base; |
| 2424 // if (remaining bits in exponent_integer are all zero) { |
| 2425 // break; |
| 2426 // } |
| 2427 // } |
| 2428 Label power_loop, power_loop_entry, power_loop_exit; |
| 2429 __ Fmov(scratch1_double, base_double); |
| 2430 __ Fmov(result_double, 1.0); |
| 2431 __ B(&power_loop_entry); |
| 2432 |
| 2433 __ Bind(&power_loop); |
| 2434 __ Fmul(scratch1_double, scratch1_double, scratch1_double); |
| 2435 __ Lsr(exponent_abs, exponent_abs, 1); |
| 2436 __ Cbz(exponent_abs, &power_loop_exit); |
| 2437 |
| 2438 __ Bind(&power_loop_entry); |
| 2439 __ Tbz(exponent_abs, 0, &power_loop); |
| 2440 __ Fmul(result_double, result_double, scratch1_double); |
| 2441 __ B(&power_loop); |
| 2442 |
| 2443 __ Bind(&power_loop_exit); |
| 2444 |
| 2445 // If the exponent was positive, result_double holds the result. |
| 2446 __ Tbz(exponent_integer, kXSignBit, &done); |
| 2447 |
| 2448 // The exponent was negative, so find the inverse. |
| 2449 __ Fmov(scratch0_double, 1.0); |
| 2450 __ Fdiv(result_double, scratch0_double, result_double); |
| 2451 // ECMA-262 only requires Math.pow to return an 'implementation-dependent |
| 2452 // approximation' of base^exponent. However, mjsunit/math-pow uses Math.pow |
| 2453 // to calculate the subnormal value 2^-1074. This method of calculating |
| 2454 // negative powers doesn't work because 2^1074 overflows to infinity. To |
| 2455 // catch this corner-case, we bail out if the result was 0. (This can only |
| 2456 // occur if the divisor is infinity or the base is zero.) |
| 2457 __ Fcmp(result_double, 0.0); |
| 2458 __ B(&done, ne); |
| 2459 |
| 2460 if (exponent_type_ == ON_STACK) { |
| 2461 // Bail out to runtime code. |
| 2462 __ Bind(&call_runtime); |
| 2463 // Put the arguments back on the stack. |
| 2464 __ Push(base_tagged, exponent_tagged); |
| 2465 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); |
| 2466 |
| 2467 // Return. |
| 2468 __ Bind(&done); |
| 2469 __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1); |
| 2470 __ Str(result_double, |
| 2471 FieldMemOperand(result_tagged, HeapNumber::kValueOffset)); |
| 2472 ASSERT(result_tagged.is(x0)); |
| 2473 __ IncrementCounter( |
| 2474 masm->isolate()->counters()->math_pow(), 1, scratch0, scratch1); |
| 2475 __ Ret(); |
| 2476 } else { |
| 2477 UNIMPLEMENTED_M("MathPowStub types other than ON_STACK are unimplemented."); |
| 2478 } |
| 2479 } |
| 2480 |
| 2481 |
| 2482 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { |
| 2483 // It is important that the following stubs are generated in this order |
| 2484 // because pregenerated stubs can only call other pregenerated stubs. |
| 2485 // RecordWriteStub uses StoreBufferOverflowStub, which in turn uses |
| 2486 // CEntryStub. |
| 2487 CEntryStub::GenerateAheadOfTime(isolate); |
| 2488 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); |
| 2489 StubFailureTrampolineStub::GenerateAheadOfTime(isolate); |
| 2490 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); |
| 2491 |
| 2492 if (FLAG_optimize_constructed_arrays) { |
| 2493 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); |
| 2494 } |
| 2495 } |
| 2496 |
| 2497 |
| 2498 void CodeStub::GenerateFPStubs(Isolate* isolate) { |
| 2499 // Floating-point code doesn't get special handling in A64, so there's |
| 2500 // nothing to do here. |
| 2501 USE(isolate); |
| 2502 } |
| 2503 |
| 2504 |
| 2505 static void JumpIfOOM(MacroAssembler* masm, |
| 2506 Register value, |
| 2507 Register scratch, |
| 2508 Label* oom_label) { |
| 2509 STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3); |
| 2510 STATIC_ASSERT(kFailureTag == 3); |
| 2511 __ And(scratch, value, 0xf); |
| 2512 __ Cmp(scratch, 0xf); |
| 2513 __ B(eq, oom_label); |
| 2514 } |
| 2515 |
| 2516 |
| 2517 bool CEntryStub::NeedsImmovableCode() { |
| 2518 // CEntryStub stores the return address on the stack before calling into |
| 2519 // C++ code. In some cases, the VM accesses this address, but it is not used |
| 2520 // when the C++ code returns to the stub because LR holds the return address |
| 2521 // in AAPCS64. If the stub is moved (perhaps during a GC), we could end up |
| 2522 // returning to dead code. |
| 2523 // TODO(jbramley): Whilst this is the only analysis that makes sense, I can't |
| 2524 // find any comment to confirm this, and I don't hit any crashes whatever |
| 2525 // this function returns. The anaylsis should be properly confirmed. |
| 2526 return true; |
| 2527 } |
| 2528 |
| 2529 |
| 2530 bool CEntryStub::IsPregenerated() { |
| 2531 // TODO(jbramley): We should pregenerate kSaveFPRegs too, once we support it. |
| 2532 return (save_doubles_ == kDontSaveFPRegs) && (result_size_ == 1); |
| 2533 } |
| 2534 |
| 2535 |
| 2536 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { |
| 2537 CEntryStub stub(1, kDontSaveFPRegs); |
| 2538 stub.GetCode(isolate)->set_is_pregenerated(true); |
| 2539 // TODO(jbramley): We should generate kSaveFPRegs here too, but it is not yet |
| 2540 // implemented by CEntryStub because it is only used by Lithium. |
| 2541 } |
| 2542 |
| 2543 |
| 2544 void CEntryStub::GenerateCore(MacroAssembler* masm, |
| 2545 Label* throw_normal, |
| 2546 Label* throw_termination, |
| 2547 Label* throw_out_of_memory, |
| 2548 bool do_gc, |
| 2549 bool always_allocate) { |
| 2550 // x0 : Result parameter for PerformGC, if do_gc is true. |
| 2551 // x21 : argv |
| 2552 // x22 : argc |
| 2553 // x23 : target |
| 2554 // |
| 2555 // The stack (on entry) holds the arguments and the receiver, with the |
| 2556 // receiver at the highest address: |
| 2557 // |
| 2558 // argv[8]: receiver |
| 2559 // argv -> argv[0]: arg[argc-2] |
| 2560 // ... ... |
| 2561 // argv[...]: arg[1] |
| 2562 // argv[...]: arg[0] |
| 2563 // |
| 2564 // Immediately below (after) this is the exit frame, as constructed by |
| 2565 // EnterExitFrame: |
| 2566 // fp[8]: CallerPC (lr) |
| 2567 // fp -> fp[0]: CallerFP (old fp) |
| 2568 // fp[-8]: Space reserved for SPOffset. |
| 2569 // fp[-16]: CodeObject() |
| 2570 // csp[...]: Saved doubles, if saved_doubles is true. |
| 2571 // csp[32]: Alignment padding, if necessary. |
| 2572 // csp[24]: Preserved x23 (used for target). |
| 2573 // csp[16]: Preserved x22 (used for argc). |
| 2574 // csp[8]: Preserved x21 (used for argv). |
| 2575 // csp -> csp[0]: Space reserved for the return address. |
| 2576 // |
| 2577 // After a successful call, the exit frame, preserved registers (x21-x23) and |
| 2578 // the arguments (including the receiver) are dropped or popped as |
| 2579 // appropriate. The stub then returns. |
| 2580 // |
| 2581 // After an unsuccessful call, the exit frame and suchlike are left |
| 2582 // untouched, and the stub either throws an exception by jumping to one of |
| 2583 // the provided throw_ labels, or it falls through. The failure details are |
| 2584 // passed through in x0. |
| 2585 ASSERT(csp.Is(__ StackPointer())); |
| 2586 |
| 2587 Isolate* isolate = masm->isolate(); |
| 2588 |
| 2589 const Register& argv = x21; |
| 2590 const Register& argc = x22; |
| 2591 const Register& target = x23; |
| 2592 |
| 2593 if (do_gc) { |
| 2594 // Call Runtime::PerformGC, passing x0 (the result parameter for |
| 2595 // PerformGC). |
| 2596 __ CallCFunction( |
| 2597 ExternalReference::perform_gc_function(isolate), 1, 0); |
| 2598 } |
| 2599 |
| 2600 ExternalReference scope_depth = |
| 2601 ExternalReference::heap_always_allocate_scope_depth(isolate); |
| 2602 if (always_allocate) { |
| 2603 __ Mov(x10, Operand(scope_depth)); |
| 2604 __ Ldr(x11, MemOperand(x10)); |
| 2605 __ Add(x11, x11, 1); |
| 2606 __ Str(x11, MemOperand(x10)); |
| 2607 } |
| 2608 |
| 2609 // Prepare AAPCS64 arguments to pass to the builtin. |
| 2610 __ Mov(x0, argc); |
| 2611 __ Mov(x1, argv); |
| 2612 __ Mov(x2, Operand(ExternalReference::isolate_address(isolate))); |
| 2613 |
| 2614 // Store the return address on the stack, in the space previously allocated |
| 2615 // by EnterExitFrame. The return address is queried by |
| 2616 // ExitFrame::GetStateForFramePointer. |
| 2617 Label return_location; |
| 2618 __ Adr(x12, &return_location); |
| 2619 __ Poke(x12, 0); |
| 2620 if (__ emit_debug_code()) { |
| 2621 // Verify that the slot below fp[kSPOffset]-8 points to the return location |
| 2622 // (currently in x12). |
| 2623 Register temp = masm->Tmp1(); |
| 2624 __ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset)); |
| 2625 __ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSizeInBytes))); |
| 2626 __ Cmp(temp, x12); |
| 2627 __ Check(eq, "fp[kSPOffset]-8 does not hold the return address."); |
| 2628 } |
| 2629 |
| 2630 // Call the builtin. |
| 2631 __ Blr(target); |
| 2632 __ Bind(&return_location); |
| 2633 const Register& result = x0; |
| 2634 |
| 2635 if (always_allocate) { |
| 2636 __ Mov(x10, Operand(scope_depth)); |
| 2637 __ Ldr(x11, MemOperand(x10)); |
| 2638 __ Sub(x11, x11, 1); |
| 2639 __ Str(x11, MemOperand(x10)); |
| 2640 } |
| 2641 |
| 2642 // x0 result The return code from the call. |
| 2643 // x21 argv |
| 2644 // x22 argc |
| 2645 // x23 target |
| 2646 // |
| 2647 // If all of the result bits matching kFailureTagMask are '1', the result is |
| 2648 // a failure. Otherwise, it's an ordinary tagged object and the call was a |
| 2649 // success. |
| 2650 Label failure; |
| 2651 __ And(x10, result, kFailureTagMask); |
| 2652 __ Cmp(x10, kFailureTagMask); |
| 2653 __ B(&failure, eq); |
| 2654 |
| 2655 // The call succeeded, so unwind the stack and return. |
| 2656 |
| 2657 // Restore callee-saved registers x21-x23. |
| 2658 __ Mov(x11, argc); |
| 2659 |
| 2660 __ Peek(argv, 1 * kPointerSize); |
| 2661 __ Peek(argc, 2 * kPointerSize); |
| 2662 __ Peek(target, 3 * kPointerSize); |
| 2663 |
| 2664 __ LeaveExitFrame(save_doubles_, x10); |
| 2665 ASSERT(jssp.Is(__ StackPointer())); |
| 2666 // Pop or drop the remaining stack slots and return from the stub. |
| 2667 // jssp[24]: Arguments array (of size argc), including receiver. |
| 2668 // jssp[16]: Preserved x23 (used for target). |
| 2669 // jssp[8]: Preserved x22 (used for argc). |
| 2670 // jssp[0]: Preserved x21 (used for argv). |
| 2671 __ Drop(x11); |
| 2672 __ Ret(); |
| 2673 |
| 2674 // The stack pointer is still csp if we aren't returning, and the frame |
| 2675 // hasn't changed (except for the return address). |
| 2676 __ SetStackPointer(csp); |
| 2677 |
| 2678 __ Bind(&failure); |
| 2679 // The call failed, so check if we need to throw an exception, and fall |
| 2680 // through (to retry) otherwise. |
| 2681 |
| 2682 Label retry; |
| 2683 // x0 result The return code from the call, including the failure |
| 2684 // code and details. |
| 2685 // x21 argv |
| 2686 // x22 argc |
| 2687 // x23 target |
| 2688 // Refer to the Failure class for details of the bit layout. |
| 2689 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); |
| 2690 __ Tst(result, kFailureTypeTagMask << kFailureTagSize); |
| 2691 __ B(eq, &retry); // RETRY_AFTER_GC |
| 2692 |
| 2693 // Special handling of out-of-memory exceptions: Pass the failure result, |
| 2694 // rather than the exception descriptor. |
| 2695 JumpIfOOM(masm, result, x10, throw_out_of_memory); |
| 2696 |
| 2697 // Retrieve the pending exception. |
| 2698 const Register& exception = result; |
| 2699 const Register& exception_address = x11; |
| 2700 __ Mov(exception_address, |
| 2701 Operand(ExternalReference(Isolate::kPendingExceptionAddress, |
| 2702 isolate))); |
| 2703 __ Ldr(exception, MemOperand(exception_address)); |
| 2704 |
| 2705 // See if we just retrieved an OOM exception. |
| 2706 JumpIfOOM(masm, exception, x10, throw_out_of_memory); |
| 2707 |
| 2708 // Clear the pending exception. |
| 2709 __ Mov(x10, Operand(isolate->factory()->the_hole_value())); |
| 2710 __ Str(x10, MemOperand(exception_address)); |
| 2711 |
| 2712 // x0 exception The exception descriptor. |
| 2713 // x21 argv |
| 2714 // x22 argc |
| 2715 // x23 target |
| 2716 |
| 2717 // Special handling of termination exceptions, which are uncatchable by |
| 2718 // JavaScript code. |
| 2719 __ Cmp(exception, Operand(isolate->factory()->termination_exception())); |
| 2720 __ B(eq, throw_termination); |
| 2721 |
| 2722 // Handle normal exception. |
| 2723 __ B(throw_normal); |
| 2724 |
| 2725 __ Bind(&retry); |
| 2726 // The result (x0) is passed through as the next PerformGC parameter. |
| 2727 } |
| 2728 |
| 2729 |
| 2730 void CEntryStub::Generate(MacroAssembler* masm) { |
| 2731 // The Abort mechanism relies on CallRuntime, which in turn relies on |
| 2732 // CEntryStub, so until this stub has been generated, we have to use a |
| 2733 // fall-back Abort mechanism. |
| 2734 // |
| 2735 // Note that this stub must be generated before any use of Abort. |
| 2736 masm->set_use_real_aborts(false); |
| 2737 |
| 2738 ASM_LOCATION("CEntryStub::Generate entry"); |
| 2739 // Register parameters: |
| 2740 // x0: argc (including receiver, untagged) |
| 2741 // x1: target |
| 2742 // |
| 2743 // The stack on entry holds the arguments and the receiver, with the receiver |
| 2744 // at the highest address: |
| 2745 // |
| 2746 // jssp]argc-1]: receiver |
| 2747 // jssp[argc-2]: arg[argc-2] |
| 2748 // ... ... |
| 2749 // jssp[1]: arg[1] |
| 2750 // jssp[0]: arg[0] |
| 2751 // |
| 2752 // The arguments are in reverse order, so that arg[argc-2] is actually the |
| 2753 // first argument to the target function and arg[0] is the last. |
| 2754 ASSERT(jssp.Is(__ StackPointer())); |
| 2755 const Register& argc_input = x0; |
| 2756 const Register& target_input = x1; |
| 2757 |
| 2758 // Calculate argv, argc and the target address, and store them in |
| 2759 // callee-saved registers so we can retry the call without having to reload |
| 2760 // these arguments. |
| 2761 // TODO(jbramley): If the first call attempt succeeds in the common case (as |
| 2762 // it should), then we might be better off putting these parameters directly |
| 2763 // into their argument registers, rather than using callee-saved registers and |
| 2764 // preserving them on the stack. |
| 2765 const Register& argv = x21; |
| 2766 const Register& argc = x22; |
| 2767 const Register& target = x23; |
| 2768 |
| 2769 // Derive argv from the stack pointer so that it points to the first argument |
| 2770 // (arg[argc-2]), or just below the receiver in case there are no arguments. |
| 2771 // - Adjust for the arg[] array. |
| 2772 Register temp_argv = x11; |
| 2773 __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2)); |
| 2774 // - Adjust for the receiver. |
| 2775 __ Sub(temp_argv, temp_argv, 1 * kPointerSize); |
| 2776 |
| 2777 // Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved |
| 2778 // registers. |
| 2779 FrameScope scope(masm, StackFrame::MANUAL); |
| 2780 __ EnterExitFrame(save_doubles_, x10, 3); |
| 2781 ASSERT(csp.Is(__ StackPointer())); |
| 2782 |
| 2783 // Poke callee-saved registers into reserved space. |
| 2784 __ Poke(argv, 1 * kPointerSize); |
| 2785 __ Poke(argc, 2 * kPointerSize); |
| 2786 __ Poke(target, 3 * kPointerSize); |
| 2787 |
| 2788 // We normally only keep tagged values in callee-saved registers, as they |
| 2789 // could be pushed onto the stack by called stubs and functions, and on the |
| 2790 // stack they can confuse the GC. However, we're only calling C functions |
| 2791 // which can push arbitrary data onto the stack anyway, and so the GC won't |
| 2792 // examine that part of the stack. |
| 2793 __ Mov(argc, argc_input); |
| 2794 __ Mov(target, target_input); |
| 2795 __ Mov(argv, temp_argv); |
| 2796 |
| 2797 Label throw_normal; |
| 2798 Label throw_termination; |
| 2799 Label throw_out_of_memory; |
| 2800 |
| 2801 // Call the runtime function. |
| 2802 GenerateCore(masm, |
| 2803 &throw_normal, |
| 2804 &throw_termination, |
| 2805 &throw_out_of_memory, |
| 2806 false, |
| 2807 false); |
| 2808 |
| 2809 // If successful, the previous GenerateCore will have returned to the |
| 2810 // calling code. Otherwise, we fall through into the following. |
| 2811 |
| 2812 // Do space-specific GC and retry runtime call. |
| 2813 GenerateCore(masm, |
| 2814 &throw_normal, |
| 2815 &throw_termination, |
| 2816 &throw_out_of_memory, |
| 2817 true, |
| 2818 false); |
| 2819 |
| 2820 // Do full GC and retry runtime call one final time. |
| 2821 __ Mov(x0, reinterpret_cast<uint64_t>(Failure::InternalError())); |
| 2822 GenerateCore(masm, |
| 2823 &throw_normal, |
| 2824 &throw_termination, |
| 2825 &throw_out_of_memory, |
| 2826 true, |
| 2827 true); |
| 2828 |
| 2829 // We didn't execute a return case, so the stack frame hasn't been updated |
| 2830 // (except for the return address slot). However, we don't need to initialize |
| 2831 // jssp because the throw method will immediately overwrite it when it |
| 2832 // unwinds the stack. |
| 2833 if (__ emit_debug_code()) { |
| 2834 __ Mov(jssp, kDebugZapValue); |
| 2835 } |
| 2836 __ SetStackPointer(jssp); |
| 2837 |
| 2838 // Throw exceptions. |
| 2839 // If we throw an exception, we can end up re-entering CEntryStub before we |
| 2840 // pop the exit frame, so need to ensure that x21-x23 contain GC-safe values |
| 2841 // here. |
| 2842 __ Bind(&throw_out_of_memory); |
| 2843 ASM_LOCATION("Throw out of memory"); |
| 2844 __ Mov(argv, 0); |
| 2845 __ Mov(argc, 0); |
| 2846 __ Mov(target, 0); |
| 2847 // Set external caught exception to false. |
| 2848 Isolate* isolate = masm->isolate(); |
| 2849 __ Mov(x2, Operand(ExternalReference(Isolate::kExternalCaughtExceptionAddress, |
| 2850 isolate))); |
| 2851 __ Str(xzr, MemOperand(x2)); |
| 2852 |
| 2853 // Set pending exception and x0 to out of memory exception. |
| 2854 Label already_have_failure; |
| 2855 JumpIfOOM(masm, x0, x10, &already_have_failure); |
| 2856 Failure* out_of_memory = Failure::OutOfMemoryException(0x1); |
| 2857 __ Mov(x0, Operand(reinterpret_cast<uint64_t>(out_of_memory))); |
| 2858 __ Bind(&already_have_failure); |
| 2859 __ Mov(x2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, |
| 2860 isolate))); |
| 2861 __ Str(x0, MemOperand(x2)); |
| 2862 // Fall through to the next label. |
| 2863 |
| 2864 __ Bind(&throw_termination); |
| 2865 ASM_LOCATION("Throw termination"); |
| 2866 __ Mov(argv, 0); |
| 2867 __ Mov(argc, 0); |
| 2868 __ Mov(target, 0); |
| 2869 __ ThrowUncatchable(x0, x10, x11, x12, x13); |
| 2870 |
| 2871 __ Bind(&throw_normal); |
| 2872 ASM_LOCATION("Throw normal"); |
| 2873 __ Mov(argv, 0); |
| 2874 __ Mov(argc, 0); |
| 2875 __ Mov(target, 0); |
| 2876 __ Throw(x0, x10, x11, x12, x13); |
| 2877 |
| 2878 masm->set_use_real_aborts(true); |
| 2879 } |
| 2880 |
| 2881 |
| 2882 // This is the entry point from C++. 5 arguments are provided in x0-x4. |
| 2883 // See use of the CALL_GENERATED_CODE macro for example in src/execution.cc. |
| 2884 // Input: |
| 2885 // x0: code entry. |
| 2886 // x1: function. |
| 2887 // x2: receiver. |
| 2888 // x3: argc. |
| 2889 // x4: argv. |
| 2890 // Output: |
| 2891 // x0: result. |
| 2892 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { |
| 2893 ASSERT(jssp.Is(__ StackPointer())); |
| 2894 Register code_entry = x0; |
| 2895 |
| 2896 // TODO(all): We shouldn't emit debug instructions unconditionally since they |
| 2897 // will not work outside the simulator. We need to rethink how these commands |
| 2898 // interact with --trace-sim. For now, though, this turns on instruction |
| 2899 // tracing _if_ --trace-sim is specified. |
| 2900 __ Debug("TRACE ENTRY", 0, TRACE_ENABLE | LOG_ALL); |
| 2901 |
| 2902 // Enable instruction instrumentation. This only works on the simulator, and |
| 2903 // will have no effect on the model or real hardware. |
| 2904 __ EnableInstrumentation(); |
| 2905 |
| 2906 Label invoke, handler_entry, exit; |
| 2907 |
| 2908 // Push callee-saved registers and synchronize the system stack pointer (csp) |
| 2909 // and the JavaScript stack pointer (jssp). |
| 2910 // |
| 2911 // We must not write to jssp until after the PushCalleeSavedRegisters() |
| 2912 // call, since jssp is itself a callee-saved register. |
| 2913 __ SetStackPointer(csp); |
| 2914 __ PushCalleeSavedRegisters(); |
| 2915 __ Mov(jssp, csp); |
| 2916 __ SetStackPointer(jssp); |
| 2917 |
| 2918 // Build an entry frame (see layout below). |
| 2919 Isolate* isolate = masm->isolate(); |
| 2920 |
| 2921 // Build an entry frame. |
| 2922 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; |
| 2923 int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used. |
| 2924 __ Mov(x13, bad_frame_pointer); |
| 2925 __ Mov(x12, Operand(Smi::FromInt(marker))); |
| 2926 __ Mov(x11, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate))); |
| 2927 __ Ldr(x10, MemOperand(x11)); |
| 2928 |
| 2929 // TODO(all): Pushing the marker twice seems unnecessary. |
| 2930 // In this case perhaps we could push xzr in the slot for the context |
| 2931 // (see MAsm::EnterFrame). |
| 2932 __ Push(x13, x12, x12, x10); |
| 2933 // Set up fp. |
| 2934 __ Sub(fp, jssp, EntryFrameConstants::kCallerFPOffset); |
| 2935 |
| 2936 // Push the JS entry frame marker. Also set js_entry_sp if this is the |
| 2937 // outermost JS call. |
| 2938 Label non_outermost_js, done; |
| 2939 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate); |
| 2940 __ Mov(x10, Operand(ExternalReference(js_entry_sp))); |
| 2941 __ Ldr(x11, MemOperand(x10)); |
| 2942 __ Cbnz(x11, &non_outermost_js); |
| 2943 __ Str(fp, MemOperand(x10)); |
| 2944 __ Mov(x12, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); |
| 2945 __ Push(x12); |
| 2946 __ B(&done); |
| 2947 __ Bind(&non_outermost_js); |
| 2948 // We spare one instruction by pushing xzr since the marker is 0. |
| 2949 ASSERT(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL); |
| 2950 __ Push(xzr); |
| 2951 __ Bind(&done); |
| 2952 |
| 2953 // The frame set up looks like this: |
| 2954 // jssp[0] : JS entry frame marker. |
| 2955 // jssp[1] : C entry FP. |
| 2956 // jssp[2] : stack frame marker. |
| 2957 // jssp[3] : stack frmae marker. |
| 2958 // jssp[4] : bad frame pointer 0xfff...ff <- fp points here. |
| 2959 |
| 2960 |
| 2961 // Jump to a faked try block that does the invoke, with a faked catch |
| 2962 // block that sets the pending exception. |
| 2963 __ B(&invoke); |
| 2964 |
| 2965 // Prevent the constant pool from being emitted between the record of the |
| 2966 // handler_entry position and the first instruction of the sequence here. |
| 2967 // There is no risk because Assembler::Emit() emits the instruction before |
| 2968 // checking for constant pool emission, but we do not want to depend on |
| 2969 // that. |
| 2970 { |
| 2971 Assembler::BlockConstPoolScope block_const_pool(masm); |
| 2972 __ bind(&handler_entry); |
| 2973 handler_offset_ = handler_entry.pos(); |
| 2974 // Caught exception: Store result (exception) in the pending exception |
| 2975 // field in the JSEnv and return a failure sentinel. Coming in here the |
| 2976 // fp will be invalid because the PushTryHandler below sets it to 0 to |
| 2977 // signal the existence of the JSEntry frame. |
| 2978 // TODO(jbramley): Do this in the Assembler. |
| 2979 __ Mov(x10, Operand(ExternalReference(Isolate::kPendingExceptionAddress, |
| 2980 isolate))); |
| 2981 } |
| 2982 __ Str(code_entry, MemOperand(x10)); |
| 2983 __ Mov(x0, Operand(reinterpret_cast<int64_t>(Failure::Exception()))); |
| 2984 __ B(&exit); |
| 2985 |
| 2986 // Invoke: Link this frame into the handler chain. There's only one |
| 2987 // handler block in this code object, so its index is 0. |
| 2988 __ Bind(&invoke); |
| 2989 __ PushTryHandler(StackHandler::JS_ENTRY, 0); |
| 2990 // If an exception not caught by another handler occurs, this handler |
| 2991 // returns control to the code after the B(&invoke) above, which |
| 2992 // restores all callee-saved registers (including cp and fp) to their |
| 2993 // saved values before returning a failure to C. |
| 2994 |
| 2995 // Clear any pending exceptions. |
| 2996 __ Mov(x10, Operand(isolate->factory()->the_hole_value())); |
| 2997 __ Mov(x11, Operand(ExternalReference(Isolate::kPendingExceptionAddress, |
| 2998 isolate))); |
| 2999 __ Str(x10, MemOperand(x11)); |
| 3000 |
| 3001 // Invoke the function by calling through the JS entry trampoline builtin. |
| 3002 // Notice that we cannot store a reference to the trampoline code directly in |
| 3003 // this stub, because runtime stubs are not traversed when doing GC. |
| 3004 |
| 3005 // Expected registers by Builtins::JSEntryTrampoline |
| 3006 // x0: code entry. |
| 3007 // x1: function. |
| 3008 // x2: receiver. |
| 3009 // x3: argc. |
| 3010 // x4: argv. |
| 3011 // TODO(jbramley): The latest ARM code checks is_construct and conditionally |
| 3012 // uses construct_entry. We probably need to do the same here. |
| 3013 ExternalReference entry(is_construct ? Builtins::kJSConstructEntryTrampoline |
| 3014 : Builtins::kJSEntryTrampoline, |
| 3015 isolate); |
| 3016 __ Mov(x10, Operand(entry)); |
| 3017 |
| 3018 // Call the JSEntryTrampoline. |
| 3019 __ Ldr(x11, MemOperand(x10)); // Dereference the address. |
| 3020 __ Add(x12, x11, Code::kHeaderSize - kHeapObjectTag); |
| 3021 __ Blr(x12); |
| 3022 |
| 3023 // Unlink this frame from the handler chain. |
| 3024 __ PopTryHandler(); |
| 3025 |
| 3026 |
| 3027 __ Bind(&exit); |
| 3028 // x0 holds the result. |
| 3029 // The stack pointer points to the top of the entry frame pushed on entry from |
| 3030 // C++ (at the beginning of this stub): |
| 3031 // jssp[0] : JS entry frame marker. |
| 3032 // jssp[1] : C entry FP. |
| 3033 // jssp[2] : stack frame marker. |
| 3034 // jssp[3] : stack frmae marker. |
| 3035 // jssp[4] : bad frame pointer 0xfff...ff <- fp points here. |
| 3036 |
| 3037 // Check if the current stack frame is marked as the outermost JS frame. |
| 3038 Label non_outermost_js_2; |
| 3039 __ Pop(x10); |
| 3040 __ Cmp(x10, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); |
| 3041 __ B(ne, &non_outermost_js_2); |
| 3042 __ Mov(x11, Operand(ExternalReference(js_entry_sp))); |
| 3043 __ Str(xzr, MemOperand(x11)); |
| 3044 __ Bind(&non_outermost_js_2); |
| 3045 |
| 3046 // Restore the top frame descriptors from the stack. |
| 3047 __ Pop(x10); |
| 3048 __ Mov(x11, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate))); |
| 3049 __ Str(x10, MemOperand(x11)); |
| 3050 |
| 3051 // Reset the stack to the callee saved registers. |
| 3052 __ Drop(-EntryFrameConstants::kCallerFPOffset, kByteSizeInBytes); |
| 3053 // Restore the callee-saved registers and return. |
| 3054 ASSERT(jssp.Is(__ StackPointer())); |
| 3055 __ Mov(csp, jssp); |
| 3056 __ SetStackPointer(csp); |
| 3057 __ PopCalleeSavedRegisters(); |
| 3058 // After this point, we must not modify jssp because it is a callee-saved |
| 3059 // register which we have just restored. |
| 3060 __ Ret(); |
| 3061 } |
| 3062 |
| 3063 |
| 3064 void FunctionPrototypeStub::Generate(MacroAssembler* masm) { |
| 3065 Label miss; |
| 3066 Register receiver; |
| 3067 if (kind() == Code::KEYED_LOAD_IC) { |
| 3068 // ----------- S t a t e ------------- |
| 3069 // -- lr : return address |
| 3070 // -- x1 : receiver |
| 3071 // -- x0 : key |
| 3072 // ----------------------------------- |
| 3073 Register key = x0; |
| 3074 receiver = x1; |
| 3075 __ Cmp(key, Operand(masm->isolate()->factory()->prototype_string())); |
| 3076 __ B(ne, &miss); |
| 3077 } else { |
| 3078 ASSERT(kind() == Code::LOAD_IC); |
| 3079 // ----------- S t a t e ------------- |
| 3080 // -- lr : return address |
| 3081 // -- x2 : name |
| 3082 // -- x0 : receiver |
| 3083 // -- sp[0] : receiver |
| 3084 // ----------------------------------- |
| 3085 receiver = x0; |
| 3086 } |
| 3087 |
| 3088 StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10, x11, &miss); |
| 3089 |
| 3090 __ Bind(&miss); |
| 3091 StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind())); |
| 3092 } |
| 3093 |
| 3094 |
| 3095 void StringLengthStub::Generate(MacroAssembler* masm) { |
| 3096 Label miss; |
| 3097 Register receiver; |
| 3098 if (kind() == Code::KEYED_LOAD_IC) { |
| 3099 // ----------- S t a t e ------------- |
| 3100 // -- lr : return address |
| 3101 // -- x1 : receiver |
| 3102 // -- x0 : key |
| 3103 // ----------------------------------- |
| 3104 Register key = x0; |
| 3105 receiver = x1; |
| 3106 __ Cmp(key, Operand(masm->isolate()->factory()->length_string())); |
| 3107 __ B(ne, &miss); |
| 3108 } else { |
| 3109 ASSERT(kind() == Code::LOAD_IC); |
| 3110 // ----------- S t a t e ------------- |
| 3111 // -- lr : return address |
| 3112 // -- x2 : name |
| 3113 // -- x0 : receiver |
| 3114 // -- sp[0] : receiver |
| 3115 // ----------------------------------- |
| 3116 receiver = x0; |
| 3117 } |
| 3118 |
| 3119 StubCompiler::GenerateLoadStringLength(masm, receiver, x10, x11, &miss, |
| 3120 support_wrapper_); |
| 3121 |
| 3122 __ Bind(&miss); |
| 3123 StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind())); |
| 3124 } |
| 3125 |
| 3126 |
| 3127 void StoreArrayLengthStub::Generate(MacroAssembler* masm) { |
| 3128 ASM_LOCATION("StoreArrayLengthStub::Generate"); |
| 3129 // This accepts as a receiver anything JSArray::SetElementsLength accepts |
| 3130 // (currently anything except for external arrays which means anything with |
| 3131 // elements of FixedArray type). Value must be a number, but only smis are |
| 3132 // accepted as the most common case. |
| 3133 Label miss; |
| 3134 |
| 3135 Register receiver; |
| 3136 Register value; |
| 3137 if (kind() == Code::KEYED_STORE_IC) { |
| 3138 // ----------- S t a t e ------------- |
| 3139 // -- lr : return address |
| 3140 // -- x2 : receiver |
| 3141 // -- x1 : key |
| 3142 // -- x0 : value |
| 3143 // ----------------------------------- |
| 3144 Register key = x1; |
| 3145 receiver = x2; |
| 3146 value = x0; |
| 3147 __ Cmp(key, Operand(masm->isolate()->factory()->length_string())); |
| 3148 __ B(ne, &miss); |
| 3149 } else { |
| 3150 ASSERT(kind() == Code::STORE_IC); |
| 3151 // ----------- S t a t e ------------- |
| 3152 // -- lr : return address |
| 3153 // -- x2 : key |
| 3154 // -- x1 : receiver |
| 3155 // -- x0 : value |
| 3156 // ----------------------------------- |
| 3157 receiver = x1; |
| 3158 value = x0; |
| 3159 } |
| 3160 |
| 3161 // Check that the receiver isn't a smi. |
| 3162 __ JumpIfSmi(receiver, &miss); |
| 3163 |
| 3164 // Check that the object is a JS array. |
| 3165 __ CompareObjectType(receiver, x10, x11, JS_ARRAY_TYPE); |
| 3166 __ B(ne, &miss); |
| 3167 |
| 3168 // Check that elements are FixedArray. |
| 3169 // We rely on StoreIC_ArrayLength below to deal with all types of |
| 3170 // fast elements (including COW). |
| 3171 __ Ldr(x10, FieldMemOperand(receiver, JSArray::kElementsOffset)); |
| 3172 __ CompareObjectType(x10, x11, x12, FIXED_ARRAY_TYPE); |
| 3173 __ B(ne, &miss); |
| 3174 |
| 3175 // Check that the array has fast properties, otherwise the length |
| 3176 // property might have been redefined. |
| 3177 __ Ldr(x10, FieldMemOperand(receiver, JSArray::kPropertiesOffset)); |
| 3178 __ Ldr(x10, FieldMemOperand(x10, FixedArray::kMapOffset)); |
| 3179 __ CompareRoot(x10, Heap::kHashTableMapRootIndex); |
| 3180 __ B(eq, &miss); |
| 3181 |
| 3182 // Check that value is a smi. |
| 3183 __ JumpIfNotSmi(value, &miss); |
| 3184 |
| 3185 // Prepare tail call to StoreIC_ArrayLength. |
| 3186 __ Push(receiver, value); |
| 3187 |
| 3188 ExternalReference ref = |
| 3189 ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate()); |
| 3190 __ TailCallExternalReference(ref, 2, 1); |
| 3191 |
| 3192 __ Bind(&miss); |
| 3193 StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind())); |
| 3194 } |
| 3195 |
| 3196 |
| 3197 void InstanceofStub::Generate(MacroAssembler* masm) { |
| 3198 // Stack on entry: |
| 3199 // jssp[0]: function. |
| 3200 // jssp[8]: object. |
| 3201 // |
| 3202 // Returns result in x0. Zero indicates instanceof, smi 1 indicates not |
| 3203 // instanceof. |
| 3204 |
| 3205 // Instanceof supports the kArgsInRegisters flag but not the others, ie. |
| 3206 // No call site inlining. |
| 3207 // No return of true/false objects. |
| 3208 ASSERT((flags_ == kNoFlags) || (flags_ == kArgsInRegisters)); |
| 3209 |
| 3210 Register result = x0; |
| 3211 Register function = right(); |
| 3212 Register object = left(); |
| 3213 Label not_js_object, slow; |
| 3214 |
| 3215 if (!HasArgsInRegisters()) { |
| 3216 __ Pop(function, object); |
| 3217 } |
| 3218 |
| 3219 // Check that the left hand side is a JS object and load its map as a side |
| 3220 // effect. |
| 3221 Register map = x12; |
| 3222 __ JumpIfSmi(object, ¬_js_object); |
| 3223 __ IsObjectJSObjectType(object, map, x7, ¬_js_object); |
| 3224 |
| 3225 // If there is a call site cache, don't look in the global cache, but do the |
| 3226 // real lookup and update the call site cache. |
| 3227 if (!HasCallSiteInlineCheck()) { |
| 3228 Label miss; |
| 3229 __ JumpIfNotRoot(function, Heap::kInstanceofCacheFunctionRootIndex, &miss); |
| 3230 __ JumpIfNotRoot(map, Heap::kInstanceofCacheMapRootIndex, &miss); |
| 3231 __ LoadRoot(result, Heap::kInstanceofCacheAnswerRootIndex); |
| 3232 __ Ret(); |
| 3233 __ Bind(&miss); |
| 3234 } |
| 3235 |
| 3236 // Get the prototype of the function. |
| 3237 Register prototype = x13; |
| 3238 __ TryGetFunctionPrototype(function, prototype, x7, &slow, |
| 3239 MacroAssembler::kMissOnBoundFunction); |
| 3240 |
| 3241 // Check that the function prototype is a JS object. |
| 3242 __ JumpIfSmi(prototype, &slow); |
| 3243 __ IsObjectJSObjectType(prototype, x6, x7, &slow); |
| 3244 |
| 3245 // Update the global instanceof or call site inlined cache with the current |
| 3246 // map and function. The cached answer will be set when it is known below. |
| 3247 if (!HasCallSiteInlineCheck()) { |
| 3248 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); |
| 3249 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); |
| 3250 } else { |
| 3251 ASM_UNIMPLEMENTED("InstanceofStub inline patching"); |
| 3252 } |
| 3253 |
| 3254 Label return_result; |
| 3255 { |
| 3256 // Loop through the prototype chain looking for the function prototype. |
| 3257 Register chain_map = x1; |
| 3258 Register chain_prototype = x14; |
| 3259 Register null_value = x15; |
| 3260 Label loop; |
| 3261 __ Ldr(chain_prototype, FieldMemOperand(map, Map::kPrototypeOffset)); |
| 3262 __ LoadRoot(null_value, Heap::kNullValueRootIndex); |
| 3263 // Speculatively set a result. |
| 3264 __ Mov(result, Operand(Smi::FromInt(1))); |
| 3265 |
| 3266 __ Bind(&loop); |
| 3267 |
| 3268 // If the chain prototype is the object prototype, return smi(0). |
| 3269 __ Cmp(chain_prototype, prototype); |
| 3270 ASSERT(Smi::FromInt(0) == 0UL); |
| 3271 __ CzeroX(result, eq); |
| 3272 __ B(eq, &return_result); |
| 3273 |
| 3274 // If the chain prototype is null, we've reached the end of the chain, so |
| 3275 // return smi(1). |
| 3276 __ Cmp(chain_prototype, null_value); |
| 3277 __ B(eq, &return_result); |
| 3278 |
| 3279 // Otherwise, load the next prototype in the chain, and loop. |
| 3280 __ Ldr(chain_map, FieldMemOperand(chain_prototype, HeapObject::kMapOffset)); |
| 3281 __ Ldr(chain_prototype, FieldMemOperand(chain_map, Map::kPrototypeOffset)); |
| 3282 __ B(&loop); |
| 3283 } |
| 3284 |
| 3285 // Return sequence when no arguments are on the stack. |
| 3286 __ Bind(&return_result); |
| 3287 if (!HasCallSiteInlineCheck()) { |
| 3288 __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex); |
| 3289 } else { |
| 3290 ASM_UNIMPLEMENTED("InstanceofStub call site patcher"); |
| 3291 } |
| 3292 __ Ret(); |
| 3293 |
| 3294 Label object_not_null, object_not_null_or_smi; |
| 3295 |
| 3296 __ Bind(¬_js_object); |
| 3297 Register object_type = x14; |
| 3298 // x0 result result return register (uninit) |
| 3299 // x10 function pointer to function |
| 3300 // x11 object pointer to object |
| 3301 // x14 object_type type of object (uninit) |
| 3302 |
| 3303 // Before null, smi and string checks, check that the rhs is a function. |
| 3304 // For a non-function rhs, an exception must be thrown. |
| 3305 __ JumpIfSmi(function, &slow); |
| 3306 __ JumpIfNotObjectType(function, x6, object_type, JS_FUNCTION_TYPE, &slow); |
| 3307 |
| 3308 // Null is not instance of anything. |
| 3309 __ Cmp(object_type, Operand(masm->isolate()->factory()->null_value())); |
| 3310 __ B(ne, &object_not_null); |
| 3311 __ Mov(result, Operand(Smi::FromInt(1))); |
| 3312 __ Ret(); |
| 3313 |
| 3314 __ Bind(&object_not_null); |
| 3315 // Smi values are not instances of anything. |
| 3316 __ JumpIfNotSmi(object, &object_not_null_or_smi); |
| 3317 __ Mov(result, Operand(Smi::FromInt(1))); |
| 3318 __ Ret(); |
| 3319 |
| 3320 __ Bind(&object_not_null_or_smi); |
| 3321 // String values are not instances of anything. |
| 3322 __ IsObjectJSStringType(object, x7, &slow); |
| 3323 __ Mov(result, Operand(Smi::FromInt(1))); |
| 3324 __ Ret(); |
| 3325 |
| 3326 // Slow-case. Tail call builtin. |
| 3327 __ Bind(&slow); |
| 3328 if (!ReturnTrueFalseObject()) { |
| 3329 // Arguments have either been passed into registers or have been previously |
| 3330 // popped. We need to push them before calling builtin. |
| 3331 __ Push(object, function); |
| 3332 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); |
| 3333 } else { |
| 3334 ASM_UNIMPLEMENTED("InstanceofStub call builtin and return object"); |
| 3335 } |
| 3336 } |
| 3337 |
| 3338 |
| 3339 Register InstanceofStub::left() { |
| 3340 // Object to check (instanceof lhs). |
| 3341 return x11; |
| 3342 } |
| 3343 |
| 3344 |
| 3345 Register InstanceofStub::right() { |
| 3346 // Constructor function (instanceof rhs). |
| 3347 return x10; |
| 3348 } |
| 3349 |
| 3350 |
| 3351 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { |
| 3352 Register arg_count = x0; |
| 3353 Register key = x1; |
| 3354 |
| 3355 // The displacement is the offset of the last parameter (if any) relative |
| 3356 // to the frame pointer. |
| 3357 static const int kDisplacement = |
| 3358 StandardFrameConstants::kCallerSPOffset - kPointerSize; |
| 3359 |
| 3360 // Check that the key is a smi. |
| 3361 Label slow; |
| 3362 __ JumpIfNotSmi(key, &slow); |
| 3363 |
| 3364 // Check if the calling frame is an arguments adaptor frame. |
| 3365 Register local_fp = x11; |
| 3366 Register caller_fp = x11; |
| 3367 Register caller_ctx = x12; |
| 3368 Label skip_adaptor; |
| 3369 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
| 3370 __ Ldr(caller_ctx, MemOperand(caller_fp, |
| 3371 StandardFrameConstants::kContextOffset)); |
| 3372 __ Cmp(caller_ctx, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
| 3373 __ Csel(local_fp, fp, caller_fp, ne); |
| 3374 __ B(ne, &skip_adaptor); |
| 3375 |
| 3376 // Load the actual arguments limit found in the arguments adaptor frame. |
| 3377 __ Ldr(arg_count, MemOperand(caller_fp, |
| 3378 ArgumentsAdaptorFrameConstants::kLengthOffset)); |
| 3379 __ Bind(&skip_adaptor); |
| 3380 |
| 3381 // Check index against formal parameters count limit. Use unsigned comparison |
| 3382 // to get negative check for free: branch if key < 0 or key >= arg_count. |
| 3383 __ Cmp(key, arg_count); |
| 3384 __ B(hs, &slow); |
| 3385 |
| 3386 // Read the argument from the stack and return it. |
| 3387 __ Sub(x10, arg_count, key); |
| 3388 __ Add(x10, local_fp, Operand::UntagSmiAndScale(x10, kPointerSizeLog2)); |
| 3389 __ Ldr(x0, MemOperand(x10, kDisplacement)); |
| 3390 __ Ret(); |
| 3391 |
| 3392 // Slow case: handle non-smi or out-of-bounds access to arguments by calling |
| 3393 // the runtime system. |
| 3394 __ Bind(&slow); |
| 3395 __ Push(key); |
| 3396 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); |
| 3397 } |
| 3398 |
| 3399 |
| 3400 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) { |
| 3401 // Stack layout on entry. |
| 3402 // jssp[0]: number of parameters (tagged) |
| 3403 // jssp[8]: address of receiver argument |
| 3404 // jssp[16]: function |
| 3405 |
| 3406 ASM_UNIMPLEMENTED("GenerateNewNonStrictSlow: This has not been tested."); |
| 3407 |
| 3408 // Check if the calling frame is an arguments adaptor frame. |
| 3409 Label runtime; |
| 3410 Register caller_fp = x10; |
| 3411 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
| 3412 // Load and untag the context. |
| 3413 STATIC_ASSERT((kSmiShift / kBitsPerByte) == 4); |
| 3414 __ Ldr(w11, MemOperand(caller_fp, StandardFrameConstants::kContextOffset + |
| 3415 (kSmiShift / kBitsPerByte))); |
| 3416 __ Cmp(w11, StackFrame::ARGUMENTS_ADAPTOR); |
| 3417 __ B(ne, &runtime); |
| 3418 |
| 3419 // Patch the arguments.length and parameters pointer in the current frame. |
| 3420 __ Ldr(x11, MemOperand(caller_fp, |
| 3421 ArgumentsAdaptorFrameConstants::kLengthOffset)); |
| 3422 __ Poke(x11, 0 * kXRegSizeInBytes); |
| 3423 __ Add(x10, caller_fp, Operand::UntagSmiAndScale(x11, kPointerSizeLog2)); |
| 3424 __ Add(x10, x10, Operand(StandardFrameConstants::kCallerSPOffset)); |
| 3425 __ Poke(x10, 1 * kXRegSizeInBytes); |
| 3426 |
| 3427 __ Bind(&runtime); |
| 3428 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); |
| 3429 } |
| 3430 |
| 3431 |
| 3432 void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { |
| 3433 // Stack layout on entry. |
| 3434 // jssp[0]: number of parameters (tagged) |
| 3435 // jssp[8]: address of receiver argument |
| 3436 // jssp[16]: function |
| 3437 // |
| 3438 // Returns pointer to result object in x0. |
| 3439 |
| 3440 // Note: arg_count_smi is an alias of param_count_smi. |
| 3441 Register arg_count_smi = x3; |
| 3442 Register param_count_smi = x3; |
| 3443 Register param_count = x7; |
| 3444 Register recv_arg = x14; |
| 3445 Register function = x4; |
| 3446 __ Pop(param_count_smi, recv_arg, function); |
| 3447 __ SmiUntag(param_count, param_count_smi); |
| 3448 |
| 3449 // Check if the calling frame is an arguments adaptor frame. |
| 3450 Register caller_fp = x11; |
| 3451 Register caller_ctx = x12; |
| 3452 Label runtime; |
| 3453 Label adaptor_frame, try_allocate; |
| 3454 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
| 3455 __ Ldr(caller_ctx, MemOperand(caller_fp, |
| 3456 StandardFrameConstants::kContextOffset)); |
| 3457 __ Cmp(caller_ctx, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
| 3458 __ B(eq, &adaptor_frame); |
| 3459 |
| 3460 // No adaptor, parameter count = argument count. |
| 3461 |
| 3462 // x1 mapped_params number of mapped params, min(params, args) (uninit) |
| 3463 // x2 arg_count number of function arguments (uninit) |
| 3464 // x3 arg_count_smi number of function arguments (smi) |
| 3465 // x4 function function pointer |
| 3466 // x7 param_count number of function parameters |
| 3467 // x11 caller_fp caller's frame pointer |
| 3468 // x14 recv_arg pointer to receiver arguments |
| 3469 |
| 3470 Register arg_count = x2; |
| 3471 __ Mov(arg_count, param_count); |
| 3472 __ B(&try_allocate); |
| 3473 |
| 3474 // We have an adaptor frame. Patch the parameters pointer. |
| 3475 __ Bind(&adaptor_frame); |
| 3476 __ Ldr(arg_count_smi, |
| 3477 MemOperand(caller_fp, |
| 3478 ArgumentsAdaptorFrameConstants::kLengthOffset)); |
| 3479 __ SmiUntag(arg_count, arg_count_smi); |
| 3480 __ Add(x10, caller_fp, Operand(arg_count, LSL, kPointerSizeLog2)); |
| 3481 __ Add(recv_arg, x10, StandardFrameConstants::kCallerSPOffset); |
| 3482 |
| 3483 // Compute the mapped parameter count = min(param_count, arg_count) |
| 3484 Register mapped_params = x1; |
| 3485 __ Cmp(param_count, arg_count); |
| 3486 __ Csel(mapped_params, param_count, arg_count, lt); |
| 3487 |
| 3488 __ Bind(&try_allocate); |
| 3489 |
| 3490 // x0 alloc_obj pointer to allocated objects: param map, backing |
| 3491 // store, arguments (uninit) |
| 3492 // x1 mapped_params number of mapped parameters, min(params, args) |
| 3493 // x2 arg_count number of function arguments |
| 3494 // x3 arg_count_smi number of function arguments (smi) |
| 3495 // x4 function function pointer |
| 3496 // x7 param_count number of function parameters |
| 3497 // x10 size size of objects to allocate (uninit) |
| 3498 // x14 recv_arg pointer to receiver arguments |
| 3499 |
| 3500 // Compute the size of backing store, parameter map, and arguments object. |
| 3501 // 1. Parameter map, has two extra words containing context and backing |
| 3502 // store. |
| 3503 const int kParameterMapHeaderSize = |
| 3504 FixedArray::kHeaderSize + 2 * kPointerSize; |
| 3505 |
| 3506 // Calculate the parameter map size, assuming it exists. |
| 3507 Register size = x10; |
| 3508 __ Mov(size, Operand(mapped_params, LSL, kPointerSizeLog2)); |
| 3509 __ Add(size, size, kParameterMapHeaderSize); |
| 3510 |
| 3511 // If there are no mapped parameters, set the running size total to zero. |
| 3512 // Otherwise, use the parameter map size calculated earlier. |
| 3513 __ Cmp(mapped_params, 0); |
| 3514 __ CzeroX(size, eq); |
| 3515 |
| 3516 // 2. Add the size of the backing store and arguments object. |
| 3517 __ Add(size, size, Operand(arg_count, LSL, kPointerSizeLog2)); |
| 3518 __ Add(size, size, FixedArray::kHeaderSize + Heap::kArgumentsObjectSize); |
| 3519 |
| 3520 // Do the allocation of all three objects in one go. Assign this to x0, as it |
| 3521 // will be returned to the caller. |
| 3522 Register alloc_obj = x0; |
| 3523 __ Allocate(size, alloc_obj, x11, x12, &runtime, TAG_OBJECT); |
| 3524 |
| 3525 // Get the arguments boilerplate from the current (global) context. |
| 3526 |
| 3527 // x0 alloc_obj pointer to allocated objects (param map, backing |
| 3528 // store, arguments) |
| 3529 // x1 mapped_params number of mapped parameters, min(params, args) |
| 3530 // x2 arg_count number of function arguments |
| 3531 // x3 arg_count_smi number of function arguments (smi) |
| 3532 // x4 function function pointer |
| 3533 // x7 param_count number of function parameters |
| 3534 // x11 args_offset offset to args (or aliased args) boilerplate (uninit) |
| 3535 // x14 recv_arg pointer to receiver arguments |
| 3536 |
| 3537 Register global_object = x10; |
| 3538 Register global_ctx = x10; |
| 3539 Register args_offset = x11; |
| 3540 Register aliased_args_offset = x10; |
| 3541 __ Ldr(global_object, GlobalObjectMemOperand()); |
| 3542 __ Ldr(global_ctx, FieldMemOperand(global_object, |
| 3543 GlobalObject::kNativeContextOffset)); |
| 3544 |
| 3545 __ Ldr(args_offset, ContextMemOperand(global_ctx, |
| 3546 Context::ARGUMENTS_BOILERPLATE_INDEX)); |
| 3547 __ Ldr(aliased_args_offset, |
| 3548 ContextMemOperand(global_ctx, |
| 3549 Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX)); |
| 3550 __ Cmp(mapped_params, 0); |
| 3551 __ CmovX(args_offset, aliased_args_offset, ne); |
| 3552 |
| 3553 // Copy the JS object part. |
| 3554 __ CopyFields(alloc_obj, args_offset, CPURegList(x10, x12, x13), |
| 3555 JSObject::kHeaderSize / kPointerSize); |
| 3556 |
| 3557 // Set up the callee in-object property. |
| 3558 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); |
| 3559 const int kCalleeOffset = JSObject::kHeaderSize + |
| 3560 Heap::kArgumentsCalleeIndex * kPointerSize; |
| 3561 __ Str(function, FieldMemOperand(alloc_obj, kCalleeOffset)); |
| 3562 |
| 3563 // Use the length and set that as an in-object property. |
| 3564 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); |
| 3565 const int kLengthOffset = JSObject::kHeaderSize + |
| 3566 Heap::kArgumentsLengthIndex * kPointerSize; |
| 3567 __ Str(arg_count_smi, FieldMemOperand(alloc_obj, kLengthOffset)); |
| 3568 |
| 3569 // Set up the elements pointer in the allocated arguments object. |
| 3570 // If we allocated a parameter map, "elements" will point there, otherwise |
| 3571 // it will point to the backing store. |
| 3572 |
| 3573 // x0 alloc_obj pointer to allocated objects (param map, backing |
| 3574 // store, arguments) |
| 3575 // x1 mapped_params number of mapped parameters, min(params, args) |
| 3576 // x2 arg_count number of function arguments |
| 3577 // x3 arg_count_smi number of function arguments (smi) |
| 3578 // x4 function function pointer |
| 3579 // x5 elements pointer to parameter map or backing store (uninit) |
| 3580 // x6 backing_store pointer to backing store (uninit) |
| 3581 // x7 param_count number of function parameters |
| 3582 // x14 recv_arg pointer to receiver arguments |
| 3583 |
| 3584 Register elements = x5; |
| 3585 __ Add(elements, alloc_obj, Heap::kArgumentsObjectSize); |
| 3586 __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset)); |
| 3587 |
| 3588 // Initialize parameter map. If there are no mapped arguments, we're done. |
| 3589 Label skip_parameter_map; |
| 3590 __ Cmp(mapped_params, 0); |
| 3591 // Set up backing store address, because it is needed later for filling in |
| 3592 // the unmapped arguments. |
| 3593 Register backing_store = x6; |
| 3594 __ CmovX(backing_store, elements, eq); |
| 3595 __ B(eq, &skip_parameter_map); |
| 3596 |
| 3597 __ LoadRoot(x10, Heap::kNonStrictArgumentsElementsMapRootIndex); |
| 3598 __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset)); |
| 3599 __ Add(x10, mapped_params, 2); |
| 3600 __ SmiTag(x10); |
| 3601 __ Str(x10, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
| 3602 __ Str(cp, FieldMemOperand(elements, |
| 3603 FixedArray::kHeaderSize + 0 * kPointerSize)); |
| 3604 __ Add(x10, elements, Operand(mapped_params, LSL, kPointerSizeLog2)); |
| 3605 __ Add(x10, x10, kParameterMapHeaderSize); |
| 3606 __ Str(x10, FieldMemOperand(elements, |
| 3607 FixedArray::kHeaderSize + 1 * kPointerSize)); |
| 3608 |
| 3609 // Copy the parameter slots and the holes in the arguments. |
| 3610 // We need to fill in mapped_parameter_count slots. Then index the context, |
| 3611 // where parameters are stored in reverse order, at: |
| 3612 // |
| 3613 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS + parameter_count - 1 |
| 3614 // |
| 3615 // The mapped parameter thus needs to get indices: |
| 3616 // |
| 3617 // MIN_CONTEXT_SLOTS + parameter_count - 1 .. |
| 3618 // MIN_CONTEXT_SLOTS + parameter_count - mapped_parameter_count |
| 3619 // |
| 3620 // We loop from right to left. |
| 3621 |
| 3622 // x0 alloc_obj pointer to allocated objects (param map, backing |
| 3623 // store, arguments) |
| 3624 // x1 mapped_params number of mapped parameters, min(params, args) |
| 3625 // x2 arg_count number of function arguments |
| 3626 // x3 arg_count_smi number of function arguments (smi) |
| 3627 // x4 function function pointer |
| 3628 // x5 elements pointer to parameter map or backing store (uninit) |
| 3629 // x6 backing_store pointer to backing store (uninit) |
| 3630 // x7 param_count number of function parameters |
| 3631 // x11 loop_count parameter loop counter (uninit) |
| 3632 // x12 index parameter index (smi, uninit) |
| 3633 // x13 the_hole hole value (uninit) |
| 3634 // x14 recv_arg pointer to receiver arguments |
| 3635 |
| 3636 Register loop_count = x11; |
| 3637 Register index = x12; |
| 3638 Register the_hole = x13; |
| 3639 Label parameters_loop, parameters_test; |
| 3640 __ Mov(loop_count, mapped_params); |
| 3641 __ Add(index, param_count, Context::MIN_CONTEXT_SLOTS); |
| 3642 __ Sub(index, index, mapped_params); |
| 3643 __ SmiTag(index); |
| 3644 __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex); |
| 3645 __ Add(backing_store, elements, Operand(loop_count, LSL, kPointerSizeLog2)); |
| 3646 __ Add(backing_store, backing_store, kParameterMapHeaderSize); |
| 3647 |
| 3648 __ B(¶meters_test); |
| 3649 |
| 3650 __ Bind(¶meters_loop); |
| 3651 __ Sub(loop_count, loop_count, 1); |
| 3652 __ Mov(x10, Operand(loop_count, LSL, kPointerSizeLog2)); |
| 3653 __ Add(x10, x10, kParameterMapHeaderSize - kHeapObjectTag); |
| 3654 __ Str(index, MemOperand(elements, x10)); |
| 3655 __ Sub(x10, x10, kParameterMapHeaderSize - FixedArray::kHeaderSize); |
| 3656 __ Str(the_hole, MemOperand(backing_store, x10)); |
| 3657 __ Add(index, index, Operand(Smi::FromInt(1))); |
| 3658 __ Bind(¶meters_test); |
| 3659 __ Cbnz(loop_count, ¶meters_loop); |
| 3660 |
| 3661 __ Bind(&skip_parameter_map); |
| 3662 // Copy arguments header and remaining slots (if there are any.) |
| 3663 __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex); |
| 3664 __ Str(x10, FieldMemOperand(backing_store, FixedArray::kMapOffset)); |
| 3665 __ Str(arg_count_smi, FieldMemOperand(backing_store, |
| 3666 FixedArray::kLengthOffset)); |
| 3667 |
| 3668 // x0 alloc_obj pointer to allocated objects (param map, backing |
| 3669 // store, arguments) |
| 3670 // x1 mapped_params number of mapped parameters, min(params, args) |
| 3671 // x2 arg_count number of function arguments |
| 3672 // x4 function function pointer |
| 3673 // x3 arg_count_smi number of function arguments (smi) |
| 3674 // x6 backing_store pointer to backing store (uninit) |
| 3675 // x14 recv_arg pointer to receiver arguments |
| 3676 |
| 3677 Label arguments_loop, arguments_test; |
| 3678 __ Mov(x10, mapped_params); |
| 3679 __ Sub(recv_arg, recv_arg, Operand(x10, LSL, kPointerSizeLog2)); |
| 3680 __ B(&arguments_test); |
| 3681 |
| 3682 __ Bind(&arguments_loop); |
| 3683 __ Sub(recv_arg, recv_arg, kPointerSize); |
| 3684 __ Ldr(x11, MemOperand(recv_arg)); |
| 3685 __ Add(x12, backing_store, Operand(x10, LSL, kPointerSizeLog2)); |
| 3686 __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize)); |
| 3687 __ Add(x10, x10, 1); |
| 3688 |
| 3689 __ Bind(&arguments_test); |
| 3690 __ Cmp(x10, arg_count); |
| 3691 __ B(lt, &arguments_loop); |
| 3692 |
| 3693 __ Ret(); |
| 3694 |
| 3695 // Do the runtime call to allocate the arguments object. |
| 3696 __ Bind(&runtime); |
| 3697 __ Push(function, recv_arg, arg_count_smi); |
| 3698 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); |
| 3699 } |
| 3700 |
| 3701 |
| 3702 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { |
| 3703 // Stack layout on entry. |
| 3704 // jssp[0]: number of parameters (tagged) |
| 3705 // jssp[8]: address of receiver argument |
| 3706 // jssp[16]: function |
| 3707 // |
| 3708 // Returns pointer to result object in x0. |
| 3709 |
| 3710 // Get the stub arguments from the frame, and make an untagged copy of the |
| 3711 // parameter count. |
| 3712 Register param_count_smi = x1; |
| 3713 Register params = x2; |
| 3714 Register function = x3; |
| 3715 Register param_count = x13; |
| 3716 __ Pop(param_count_smi, params, function); |
| 3717 __ SmiUntag(param_count, param_count_smi); |
| 3718 |
| 3719 // Test if arguments adaptor needed. |
| 3720 Register caller_fp = x11; |
| 3721 Register caller_ctx = x12; |
| 3722 Label try_allocate, runtime; |
| 3723 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
| 3724 __ Ldr(caller_ctx, MemOperand(caller_fp, |
| 3725 StandardFrameConstants::kContextOffset)); |
| 3726 __ Cmp(caller_ctx, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
| 3727 __ B(ne, &try_allocate); |
| 3728 |
| 3729 // x1 param_count_smi number of parameters passed to function (smi) |
| 3730 // x2 params pointer to parameters |
| 3731 // x3 function function pointer |
| 3732 // x11 caller_fp caller's frame pointer |
| 3733 // x13 param_count number of parameters passed to function |
| 3734 |
| 3735 // Patch the argument length and parameters pointer. |
| 3736 __ Ldr(param_count_smi, |
| 3737 MemOperand(caller_fp, |
| 3738 ArgumentsAdaptorFrameConstants::kLengthOffset)); |
| 3739 __ SmiUntag(param_count, param_count_smi); |
| 3740 __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2)); |
| 3741 __ Add(params, x10, StandardFrameConstants::kCallerSPOffset); |
| 3742 |
| 3743 // Try the new space allocation. Start out with computing the size of the |
| 3744 // arguments object and the elements array in words. |
| 3745 Register size = x10; |
| 3746 __ Bind(&try_allocate); |
| 3747 __ Add(size, param_count, FixedArray::kHeaderSize / kPointerSize); |
| 3748 __ Cmp(param_count, 0); |
| 3749 __ CzeroX(size, eq); |
| 3750 __ Add(size, size, Heap::kArgumentsObjectSizeStrict / kPointerSize); |
| 3751 |
| 3752 // Do the allocation of both objects in one go. Assign this to x0, as it will |
| 3753 // be returned to the caller. |
| 3754 Register alloc_obj = x0; |
| 3755 __ Allocate(size, alloc_obj, x11, x12, &runtime, |
| 3756 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); |
| 3757 |
| 3758 // Get the arguments boilerplate from the current (native) context. |
| 3759 Register global_object = x10; |
| 3760 Register global_ctx = x10; |
| 3761 Register args_offset = x4; |
| 3762 __ Ldr(global_object, GlobalObjectMemOperand()); |
| 3763 __ Ldr(global_ctx, FieldMemOperand(global_object, |
| 3764 GlobalObject::kNativeContextOffset)); |
| 3765 __ Ldr(args_offset, |
| 3766 ContextMemOperand(global_ctx, |
| 3767 Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)); |
| 3768 |
| 3769 // x0 alloc_obj pointer to allocated objects: parameter array and |
| 3770 // arguments object |
| 3771 // x1 param_count_smi number of parameters passed to function (smi) |
| 3772 // x2 params pointer to parameters |
| 3773 // x3 function function pointer |
| 3774 // x4 args_offset offset to arguments boilerplate |
| 3775 // x13 param_count number of parameters passed to function |
| 3776 |
| 3777 // Copy the JS object part. |
| 3778 __ CopyFields(alloc_obj, args_offset, CPURegList(x5, x6, x7), |
| 3779 JSObject::kHeaderSize / kPointerSize); |
| 3780 |
| 3781 // Set the smi-tagged length as an in-object property. |
| 3782 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); |
| 3783 const int kLengthOffset = JSObject::kHeaderSize + |
| 3784 Heap::kArgumentsLengthIndex * kPointerSize; |
| 3785 __ Str(param_count_smi, FieldMemOperand(alloc_obj, kLengthOffset)); |
| 3786 |
| 3787 // If there are no actual arguments, we're done. |
| 3788 Label done; |
| 3789 __ Cbz(param_count, &done); |
| 3790 |
| 3791 // Set up the elements pointer in the allocated arguments object and |
| 3792 // initialize the header in the elements fixed array. |
| 3793 Register elements = x5; |
| 3794 __ Add(elements, alloc_obj, Heap::kArgumentsObjectSizeStrict); |
| 3795 __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset)); |
| 3796 __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex); |
| 3797 __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset)); |
| 3798 __ Str(param_count_smi, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
| 3799 |
| 3800 // x0 alloc_obj pointer to allocated objects: parameter array and |
| 3801 // arguments object |
| 3802 // x1 param_count_smi number of parameters passed to function (smi) |
| 3803 // x2 params pointer to parameters |
| 3804 // x3 function function pointer |
| 3805 // x4 array pointer to array slot (uninit) |
| 3806 // x5 elements pointer to elements array of alloc_obj |
| 3807 // x13 param_count number of parameters passed to function |
| 3808 |
| 3809 // Copy the fixed array slots. |
| 3810 Label loop; |
| 3811 Register array = x4; |
| 3812 // Set up pointer to first array slot. |
| 3813 __ Add(array, elements, FixedArray::kHeaderSize - kHeapObjectTag); |
| 3814 |
| 3815 __ Bind(&loop); |
| 3816 // Pre-decrement the parameters pointer by kPointerSize on each iteration. |
| 3817 // Pre-decrement in order to skip receiver. |
| 3818 __ Ldr(x10, MemOperand(params, -kPointerSize, PreIndex)); |
| 3819 // Post-increment elements by kPointerSize on each iteration. |
| 3820 __ Str(x10, MemOperand(array, kPointerSize, PostIndex)); |
| 3821 __ Sub(param_count, param_count, 1); |
| 3822 __ Cbnz(param_count, &loop); |
| 3823 |
| 3824 // Return from stub. |
| 3825 __ Bind(&done); |
| 3826 __ Ret(); |
| 3827 |
| 3828 // Do the runtime call to allocate the arguments object. |
| 3829 __ Bind(&runtime); |
| 3830 __ Push(function, params, param_count_smi); |
| 3831 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1); |
| 3832 } |
| 3833 |
| 3834 |
| 3835 void RegExpExecStub::Generate(MacroAssembler* masm) { |
| 3836 #ifdef V8_INTERPRETED_REGEXP |
| 3837 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); |
| 3838 #else // V8_INTERPRETED_REGEXP |
| 3839 |
| 3840 // Stack frame on entry. |
| 3841 // jssp[0]: last_match_info (expected JSArray) |
| 3842 // jssp[8]: previous index |
| 3843 // jssp[16]: subject string |
| 3844 // jssp[24]: JSRegExp object |
| 3845 Label runtime; |
| 3846 |
| 3847 // Use of registers for this function. |
| 3848 |
| 3849 // Variable registers: |
| 3850 // x10-x13 used as scratch registers |
| 3851 // w0 string_type type of subject string |
| 3852 // x2 jsstring_length subject string length |
| 3853 // x3 jsregexp_object JSRegExp object |
| 3854 // w4 string_encoding ASCII or UC16 |
| 3855 // w5 sliced_string_offset if the string is a SlicedString |
| 3856 // offset to the underlying string |
| 3857 // w6 string_representation groups attributes of the string: |
| 3858 // - is a string |
| 3859 // - type of the string |
| 3860 // - is a short external string |
| 3861 Register string_type = w0; |
| 3862 Register jsstring_length = x2; |
| 3863 Register jsregexp_object = x3; |
| 3864 Register string_encoding = w4; |
| 3865 Register sliced_string_offset = w5; |
| 3866 Register string_representation = w6; |
| 3867 |
| 3868 // These are in callee save registers and will be preserved by the call |
| 3869 // to the native RegExp code, as this code is called using the normal |
| 3870 // C calling convention. When calling directly from generated code the |
| 3871 // native RegExp code will not do a GC and therefore the content of |
| 3872 // these registers are safe to use after the call. |
| 3873 |
| 3874 // x19 subject subject string |
| 3875 // x20 regexp_data RegExp data (FixedArray) |
| 3876 // x21 last_match_info_elements info relative to the last match |
| 3877 // (FixedArray) |
| 3878 // x22 code_object generated regexp code |
| 3879 Register subject = x19; |
| 3880 Register regexp_data = x20; |
| 3881 Register last_match_info_elements = x21; |
| 3882 Register code_object = x22; |
| 3883 |
| 3884 // TODO(jbramley): Is it necessary to preserve these? I don't think ARM does. |
| 3885 CPURegList used_callee_saved_registers(subject, |
| 3886 regexp_data, |
| 3887 last_match_info_elements, |
| 3888 code_object); |
| 3889 __ PushCPURegList(used_callee_saved_registers); |
| 3890 |
| 3891 // Stack frame. |
| 3892 // jssp[0] : x19 |
| 3893 // jssp[8] : x20 |
| 3894 // jssp[16]: x21 |
| 3895 // jssp[24]: x22 |
| 3896 // jssp[32]: last_match_info (JSArray) |
| 3897 // jssp[40]: previous index |
| 3898 // jssp[48]: subject string |
| 3899 // jssp[56]: JSRegExp object |
| 3900 |
| 3901 const int kLastMatchInfoOffset = 4 * kPointerSize; |
| 3902 const int kPreviousIndexOffset = 5 * kPointerSize; |
| 3903 const int kSubjectOffset = 6 * kPointerSize; |
| 3904 const int kJSRegExpOffset = 7 * kPointerSize; |
| 3905 |
| 3906 // Ensure that a RegExp stack is allocated. |
| 3907 Isolate* isolate = masm->isolate(); |
| 3908 ExternalReference address_of_regexp_stack_memory_address = |
| 3909 ExternalReference::address_of_regexp_stack_memory_address(isolate); |
| 3910 ExternalReference address_of_regexp_stack_memory_size = |
| 3911 ExternalReference::address_of_regexp_stack_memory_size(isolate); |
| 3912 __ Mov(x10, Operand(address_of_regexp_stack_memory_size)); |
| 3913 __ Ldr(x10, MemOperand(x10)); |
| 3914 __ Cbz(x10, &runtime); |
| 3915 |
| 3916 // Check that the first argument is a JSRegExp object. |
| 3917 ASSERT(jssp.Is(__ StackPointer())); |
| 3918 __ Peek(jsregexp_object, kJSRegExpOffset); |
| 3919 __ JumpIfSmi(jsregexp_object, &runtime); |
| 3920 __ JumpIfNotObjectType(jsregexp_object, x10, x10, JS_REGEXP_TYPE, &runtime); |
| 3921 |
| 3922 // Check that the RegExp has been compiled (data contains a fixed array). |
| 3923 __ Ldr(regexp_data, FieldMemOperand(jsregexp_object, JSRegExp::kDataOffset)); |
| 3924 if (FLAG_debug_code) { |
| 3925 STATIC_ASSERT(kSmiTag == 0); |
| 3926 __ Tst(regexp_data, kSmiTagMask); |
| 3927 __ Check(ne, "Unexpected type for RegExp data, FixedArray expected"); |
| 3928 __ CompareObjectType(regexp_data, x10, x10, FIXED_ARRAY_TYPE); |
| 3929 __ Check(eq, "Unexpected type for RegExp data, FixedArray expected"); |
| 3930 } |
| 3931 |
| 3932 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. |
| 3933 __ Ldr(x10, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); |
| 3934 __ Cmp(x10, Operand(Smi::FromInt(JSRegExp::IRREGEXP))); |
| 3935 __ B(ne, &runtime); |
| 3936 |
| 3937 // Check that the number of captures fit in the static offsets vector buffer. |
| 3938 // We have always at least one capture for the whole match, plus additional |
| 3939 // ones due to capturing parentheses. A capture takes 2 registers. |
| 3940 // The number of capture registers then is (number_of_captures + 1) * 2. |
| 3941 __ Ldrsw(x10, |
| 3942 UntagSmiFieldMemOperand(regexp_data, |
| 3943 JSRegExp::kIrregexpCaptureCountOffset)); |
| 3944 // Check (number_of_captures + 1) * 2 <= offsets vector size |
| 3945 // number_of_captures * 2 <= offsets vector size - 2 |
| 3946 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2); |
| 3947 __ Add(x10, x10, x10); |
| 3948 __ Cmp(x10, Isolate::kJSRegexpStaticOffsetsVectorSize - 2); |
| 3949 __ B(hi, &runtime); |
| 3950 |
| 3951 // Initialize offset for possibly sliced string. |
| 3952 __ Mov(sliced_string_offset, 0); |
| 3953 |
| 3954 ASSERT(jssp.Is(__ StackPointer())); |
| 3955 __ Peek(subject, kSubjectOffset); |
| 3956 __ JumpIfSmi(subject, &runtime); |
| 3957 |
| 3958 __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset)); |
| 3959 __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset)); |
| 3960 |
| 3961 __ Ldr(jsstring_length, FieldMemOperand(subject, String::kLengthOffset)); |
| 3962 |
| 3963 // Handle subject string according to its encoding and representation: |
| 3964 // (1) Sequential string? If yes, go to (5). |
| 3965 // (2) Anything but sequential or cons? If yes, go to (6). |
| 3966 // (3) Cons string. If the string is flat, replace subject with first string. |
| 3967 // Otherwise bailout. |
| 3968 // (4) Is subject external? If yes, go to (7). |
| 3969 // (5) Sequential string. Load regexp code according to encoding. |
| 3970 // (E) Carry on. |
| 3971 /// [...] |
| 3972 |
| 3973 // Deferred code at the end of the stub: |
| 3974 // (6) Not a long external string? If yes, go to (8). |
| 3975 // (7) External string. Make it, offset-wise, look like a sequential string. |
| 3976 // Go to (5). |
| 3977 // (8) Short external string or not a string? If yes, bail out to runtime. |
| 3978 // (9) Sliced string. Replace subject with parent. Go to (4). |
| 3979 |
| 3980 Label check_underlying; // (4) |
| 3981 Label seq_string; // (5) |
| 3982 Label not_seq_nor_cons; // (6) |
| 3983 Label external_string; // (7) |
| 3984 Label not_long_external; // (8) |
| 3985 |
| 3986 // (1) Sequential string? If yes, go to (5). |
| 3987 __ And(string_representation, |
| 3988 string_type, |
| 3989 kIsNotStringMask | |
| 3990 kStringRepresentationMask | |
| 3991 kShortExternalStringMask); |
| 3992 // We depend on the fact that Strings of type |
| 3993 // SeqString and not ShortExternalString are defined |
| 3994 // by the following pattern: |
| 3995 // string_type: 0XX0 XX00 |
| 3996 // ^ ^ ^^ |
| 3997 // | | || |
| 3998 // | | is a SeqString |
| 3999 // | is not a short external String |
| 4000 // is a String |
| 4001 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0); |
| 4002 STATIC_ASSERT(kShortExternalStringTag != 0); |
| 4003 __ Cbz(string_representation, &seq_string); // Go to (5). |
| 4004 |
| 4005 // (2) Anything but sequential or cons? If yes, go to (6). |
| 4006 STATIC_ASSERT(kConsStringTag < kExternalStringTag); |
| 4007 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); |
| 4008 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag); |
| 4009 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag); |
| 4010 __ Cmp(string_representation, kExternalStringTag); |
| 4011 __ B(ge, ¬_seq_nor_cons); // Go to (6). |
| 4012 |
| 4013 // (3) Cons string. Check that it's flat. |
| 4014 __ Ldr(x10, FieldMemOperand(subject, ConsString::kSecondOffset)); |
| 4015 __ JumpIfNotRoot(x10, Heap::kempty_stringRootIndex, &runtime); |
| 4016 // Replace subject with first string. |
| 4017 __ Ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); |
| 4018 |
| 4019 // (4) Is subject external? If yes, go to (7). |
| 4020 __ Bind(&check_underlying); |
| 4021 // Reload the string type. |
| 4022 __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset)); |
| 4023 __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset)); |
| 4024 STATIC_ASSERT(kSeqStringTag == 0); |
| 4025 // The underlying external string is never a short external string. |
| 4026 STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength); |
| 4027 STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength); |
| 4028 __ TestAndBranchIfAnySet(string_type.X(), |
| 4029 kStringRepresentationMask, |
| 4030 &external_string); // Go to (7). |
| 4031 |
| 4032 // (5) Sequential string. Load regexp code according to encoding. |
| 4033 __ Bind(&seq_string); |
| 4034 |
| 4035 // Check that the third argument is a positive smi less than the subject |
| 4036 // string length. A negative value will be greater (unsigned comparison). |
| 4037 ASSERT(jssp.Is(__ StackPointer())); |
| 4038 __ Peek(x10, kPreviousIndexOffset); |
| 4039 __ JumpIfNotSmi(x10, &runtime); |
| 4040 __ Cmp(jsstring_length, x10); |
| 4041 __ B(ls, &runtime); |
| 4042 |
| 4043 // Argument 2 (x1): We need to load argument 2 (the previous index) into x1 |
| 4044 // before entering the exit frame. |
| 4045 __ SmiUntag(x1, x10); |
| 4046 |
| 4047 // The third bit determines the string encoding in string_type. |
| 4048 STATIC_ASSERT(kOneByteStringTag == 0x04); |
| 4049 STATIC_ASSERT(kTwoByteStringTag == 0x00); |
| 4050 STATIC_ASSERT(kStringEncodingMask == 0x04); |
| 4051 |
| 4052 // Find the code object based on the assumptions above. |
| 4053 // kDataAsciiCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset |
| 4054 // of kPointerSize to reach the latter. |
| 4055 ASSERT_EQ(JSRegExp::kDataAsciiCodeOffset + kPointerSize, |
| 4056 JSRegExp::kDataUC16CodeOffset); |
| 4057 __ Mov(x10, kPointerSize); |
| 4058 // We will need the encoding later: ASCII = 0x04 |
| 4059 // UC16 = 0x00 |
| 4060 __ Ands(string_encoding, string_type, kStringEncodingMask); |
| 4061 __ CzeroX(x10, ne); |
| 4062 __ Add(x10, regexp_data, x10); |
| 4063 __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataAsciiCodeOffset)); |
| 4064 |
| 4065 // (E) Carry on. String handling is done. |
| 4066 |
| 4067 // Check that the irregexp code has been generated for the actual string |
| 4068 // encoding. If it has, the field contains a code object otherwise it contains |
| 4069 // a smi (code flushing support). |
| 4070 __ JumpIfSmi(code_object, &runtime); |
| 4071 |
| 4072 // All checks done. Now push arguments for native regexp code. |
| 4073 __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, |
| 4074 x10, |
| 4075 x11); |
| 4076 |
| 4077 // Isolates: note we add an additional parameter here (isolate pointer). |
| 4078 __ EnterExitFrame(false, x10, 1); |
| 4079 ASSERT(csp.Is(__ StackPointer())); |
| 4080 |
| 4081 // We have 9 arguments to pass to the regexp code, therefore we have to pass |
| 4082 // one on the stack and the rest as registers. |
| 4083 |
| 4084 // Note that the placement of the argument on the stack isn't standard |
| 4085 // AAPCS64: |
| 4086 // csp[0]: Space for the return address placed by DirectCEntryStub. |
| 4087 // csp[8]: Argument 9, the current isolate address. |
| 4088 |
| 4089 __ Mov(x10, Operand(ExternalReference::isolate_address(isolate))); |
| 4090 __ Poke(x10, kPointerSize); |
| 4091 |
| 4092 Register length = w11; |
| 4093 Register previous_index_in_bytes = w12; |
| 4094 Register start = x13; |
| 4095 |
| 4096 // Load start of the subject string. |
| 4097 __ Add(start, subject, SeqString::kHeaderSize - kHeapObjectTag); |
| 4098 // Load the length from the original subject string from the previous stack |
| 4099 // frame. Therefore we have to use fp, which points exactly to two pointer |
| 4100 // sizes below the previous sp. (Because creating a new stack frame pushes |
| 4101 // the previous fp onto the stack and decrements sp by 2 * kPointerSize.) |
| 4102 __ Ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize)); |
| 4103 __ Ldr(length, UntagSmiFieldMemOperand(subject, String::kLengthOffset)); |
| 4104 |
| 4105 // Handle UC16 encoding, two bytes make one character. |
| 4106 // string_encoding: if ASCII: 0x04 |
| 4107 // if UC16: 0x00 |
| 4108 STATIC_ASSERT(kStringEncodingMask == 0x04); |
| 4109 __ Ubfx(string_encoding, string_encoding, 2, 1); |
| 4110 __ Eor(string_encoding, string_encoding, 1); |
| 4111 // string_encoding: if ASCII: 0 |
| 4112 // if UC16: 1 |
| 4113 |
| 4114 // Convert string positions from characters to bytes. |
| 4115 // Previous index is in x1. |
| 4116 __ Lsl(previous_index_in_bytes, w1, string_encoding); |
| 4117 __ Lsl(length, length, string_encoding); |
| 4118 __ Lsl(sliced_string_offset, sliced_string_offset, string_encoding); |
| 4119 |
| 4120 // Argument 1 (x0): Subject string. |
| 4121 __ Mov(x0, subject); |
| 4122 |
| 4123 // Argument 2 (x1): Previous index, already there. |
| 4124 |
| 4125 // Argument 3 (x2): Get the start of input. |
| 4126 // Start of input = start of string + previous index + substring offset |
| 4127 // (0 if the string |
| 4128 // is not sliced). |
| 4129 __ Add(w10, previous_index_in_bytes, sliced_string_offset); |
| 4130 __ Add(x2, start, Operand(w10, UXTW)); |
| 4131 |
| 4132 // Argument 4 (x3): |
| 4133 // End of input = start of input + (length of input - previous index) |
| 4134 __ Sub(w10, length, previous_index_in_bytes); |
| 4135 __ Add(x3, x2, Operand(w10, UXTW)); |
| 4136 |
| 4137 // Argument 5 (x4): static offsets vector buffer. |
| 4138 __ Mov(x4, |
| 4139 Operand(ExternalReference::address_of_static_offsets_vector(isolate))); |
| 4140 |
| 4141 // Argument 6 (x5): Set the number of capture registers to zero to force |
| 4142 // global regexps to behave as non-global. This stub is not used for global |
| 4143 // regexps. |
| 4144 __ Mov(x5, 0); |
| 4145 |
| 4146 // Argument 7 (x6): Start (high end) of backtracking stack memory area. |
| 4147 __ Mov(x10, Operand(address_of_regexp_stack_memory_address)); |
| 4148 __ Ldr(x10, MemOperand(x10)); |
| 4149 __ Mov(x11, Operand(address_of_regexp_stack_memory_size)); |
| 4150 __ Ldr(x11, MemOperand(x11)); |
| 4151 __ Add(x6, x10, x11); |
| 4152 |
| 4153 // Argument 8 (x7): Indicate that this is a direct call from JavaScript. |
| 4154 __ Mov(x7, 1); |
| 4155 |
| 4156 // Locate the code entry and call it. |
| 4157 __ Add(code_object, code_object, Code::kHeaderSize - kHeapObjectTag); |
| 4158 DirectCEntryStub stub; |
| 4159 stub.GenerateCall(masm, code_object); |
| 4160 |
| 4161 __ LeaveExitFrame(false, x10); |
| 4162 |
| 4163 // The generated regexp code returns an int32 in w0. |
| 4164 Label failure, exception; |
| 4165 __ CompareAndBranch(w0, NativeRegExpMacroAssembler::FAILURE, eq, &failure); |
| 4166 __ CompareAndBranch(w0, |
| 4167 NativeRegExpMacroAssembler::EXCEPTION, |
| 4168 eq, |
| 4169 &exception); |
| 4170 __ CompareAndBranch(w0, NativeRegExpMacroAssembler::RETRY, eq, &runtime); |
| 4171 |
| 4172 // Success: process the result from the native regexp code. |
| 4173 Register number_of_capture_registers = x12; |
| 4174 |
| 4175 // Calculate number of capture registers (number_of_captures + 1) * 2 |
| 4176 // and store it in the last match info. |
| 4177 __ Ldrsw(x10, |
| 4178 UntagSmiFieldMemOperand(regexp_data, |
| 4179 JSRegExp::kIrregexpCaptureCountOffset)); |
| 4180 __ Add(x10, x10, x10); |
| 4181 __ Add(number_of_capture_registers, x10, 2); |
| 4182 |
| 4183 // Check that the fourth object is a JSArray object. |
| 4184 ASSERT(jssp.Is(__ StackPointer())); |
| 4185 __ Peek(x10, kLastMatchInfoOffset); |
| 4186 __ JumpIfSmi(x10, &runtime); |
| 4187 __ JumpIfNotObjectType(x10, x11, x11, JS_ARRAY_TYPE, &runtime); |
| 4188 |
| 4189 // Check that the JSArray is the fast case. |
| 4190 __ Ldr(last_match_info_elements, |
| 4191 FieldMemOperand(x10, JSArray::kElementsOffset)); |
| 4192 __ Ldr(x10, |
| 4193 FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); |
| 4194 __ JumpIfNotRoot(x10, Heap::kFixedArrayMapRootIndex, &runtime); |
| 4195 |
| 4196 // Check that the last match info has space for the capture registers and the |
| 4197 // additional information (overhead). |
| 4198 // (number_of_captures + 1) * 2 + overhead <= last match info size |
| 4199 // (number_of_captures * 2) + 2 + overhead <= last match info size |
| 4200 // number_of_capture_registers + overhead <= last match info size |
| 4201 __ Ldrsw(x10, |
| 4202 UntagSmiFieldMemOperand(last_match_info_elements, |
| 4203 FixedArray::kLengthOffset)); |
| 4204 __ Add(x11, number_of_capture_registers, RegExpImpl::kLastMatchOverhead); |
| 4205 __ Cmp(x11, x10); |
| 4206 __ B(gt, &runtime); |
| 4207 |
| 4208 // Store the capture count. |
| 4209 __ SmiTag(x10, number_of_capture_registers); |
| 4210 __ Str(x10, |
| 4211 FieldMemOperand(last_match_info_elements, |
| 4212 RegExpImpl::kLastCaptureCountOffset)); |
| 4213 // Store last subject and last input. |
| 4214 __ Str(subject, |
| 4215 FieldMemOperand(last_match_info_elements, |
| 4216 RegExpImpl::kLastSubjectOffset)); |
| 4217 // Use x10 as the subject string in order to only need |
| 4218 // one RecordWriteStub. |
| 4219 __ Mov(x10, subject); |
| 4220 __ RecordWriteField(last_match_info_elements, |
| 4221 RegExpImpl::kLastSubjectOffset, |
| 4222 x10, |
| 4223 x11, |
| 4224 kLRHasNotBeenSaved, |
| 4225 kDontSaveFPRegs); |
| 4226 __ Str(subject, |
| 4227 FieldMemOperand(last_match_info_elements, |
| 4228 RegExpImpl::kLastInputOffset)); |
| 4229 __ Mov(x10, subject); |
| 4230 __ RecordWriteField(last_match_info_elements, |
| 4231 RegExpImpl::kLastInputOffset, |
| 4232 x10, |
| 4233 x11, |
| 4234 kLRHasNotBeenSaved, |
| 4235 kDontSaveFPRegs); |
| 4236 |
| 4237 Register last_match_offsets = x13; |
| 4238 Register offsets_vector_index = x14; |
| 4239 Register current_offset = x15; |
| 4240 |
| 4241 // Get the static offsets vector filled by the native regexp code |
| 4242 // and fill the last match info. |
| 4243 ExternalReference address_of_static_offsets_vector = |
| 4244 ExternalReference::address_of_static_offsets_vector(isolate); |
| 4245 __ Mov(offsets_vector_index, Operand(address_of_static_offsets_vector)); |
| 4246 |
| 4247 Label next_capture, done; |
| 4248 // Capture register counter starts from number of capture registers and |
| 4249 // iterates down to zero (inclusive). |
| 4250 __ Add(last_match_offsets, |
| 4251 last_match_info_elements, |
| 4252 RegExpImpl::kFirstCaptureOffset - kHeapObjectTag); |
| 4253 __ Bind(&next_capture); |
| 4254 __ Subs(number_of_capture_registers, number_of_capture_registers, 2); |
| 4255 __ B(mi, &done); |
| 4256 // Read two 32 bit values from the static offsets vector buffer into |
| 4257 // an X register |
| 4258 __ Ldr(current_offset, |
| 4259 MemOperand(offsets_vector_index, kWRegSizeInBytes * 2, PostIndex)); |
| 4260 // Store the smi values in the last match info. |
| 4261 __ SmiTag(x10, current_offset); |
| 4262 // Clearing the 32 bottom bits gives us a Smi. |
| 4263 STATIC_ASSERT(kSmiShift == 32); |
| 4264 __ And(x11, current_offset, ~kWRegMask); |
| 4265 __ Stp(x10, |
| 4266 x11, |
| 4267 MemOperand(last_match_offsets, kXRegSizeInBytes * 2, PostIndex)); |
| 4268 __ B(&next_capture); |
| 4269 __ Bind(&done); |
| 4270 |
| 4271 // Return last match info. |
| 4272 __ Peek(x0, kLastMatchInfoOffset); |
| 4273 __ PopCPURegList(used_callee_saved_registers); |
| 4274 // Drop the 4 arguments of the stub from the stack. |
| 4275 __ Drop(4); |
| 4276 __ Ret(); |
| 4277 |
| 4278 __ Bind(&exception); |
| 4279 Register exception_value = x0; |
| 4280 // A stack overflow (on the backtrack stack) may have occured |
| 4281 // in the RegExp code but no exception has been created yet. |
| 4282 // If there is no pending exception, handle that in the runtime system. |
| 4283 __ Mov(x10, Operand(isolate->factory()->the_hole_value())); |
| 4284 __ Mov(x11, |
| 4285 Operand(ExternalReference(Isolate::kPendingExceptionAddress, |
| 4286 isolate))); |
| 4287 __ Ldr(exception_value, MemOperand(x11)); |
| 4288 __ Cmp(x10, exception_value); |
| 4289 __ B(eq, &runtime); |
| 4290 |
| 4291 __ Str(x10, MemOperand(x11)); // Clear pending exception. |
| 4292 |
| 4293 // Check if the exception is a termination. If so, throw as uncatchable. |
| 4294 Label termination_exception; |
| 4295 __ JumpIfRoot(exception_value, |
| 4296 Heap::kTerminationExceptionRootIndex, |
| 4297 &termination_exception); |
| 4298 |
| 4299 __ Throw(exception_value, x10, x11, x12, x13); |
| 4300 |
| 4301 __ Bind(&termination_exception); |
| 4302 __ ThrowUncatchable(exception_value, x10, x11, x12, x13); |
| 4303 |
| 4304 __ Bind(&failure); |
| 4305 __ Mov(x0, Operand(masm->isolate()->factory()->null_value())); |
| 4306 __ PopCPURegList(used_callee_saved_registers); |
| 4307 // Drop the 4 arguments of the stub from the stack. |
| 4308 __ Drop(4); |
| 4309 __ Ret(); |
| 4310 |
| 4311 __ Bind(&runtime); |
| 4312 __ PopCPURegList(used_callee_saved_registers); |
| 4313 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); |
| 4314 |
| 4315 // Deferred code for string handling. |
| 4316 // (6) Not a long external string? If yes, go to (8). |
| 4317 __ Bind(¬_seq_nor_cons); |
| 4318 // Compare flags are still set. |
| 4319 __ B(ne, ¬_long_external); // Go to (8). |
| 4320 |
| 4321 // (7) External string. Make it, offset-wise, look like a sequential string. |
| 4322 __ Bind(&external_string); |
| 4323 if (masm->emit_debug_code()) { |
| 4324 // Assert that we do not have a cons or slice (indirect strings) here. |
| 4325 // Sequential strings have already been ruled out. |
| 4326 __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset)); |
| 4327 __ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset)); |
| 4328 __ Tst(x10, kIsIndirectStringMask); |
| 4329 __ Check(eq, "external string expected, but cons or sliced string found"); |
| 4330 __ And(x10, x10, kStringRepresentationMask); |
| 4331 __ Cmp(x10, 0); |
| 4332 __ Check(ne, "external string expected, but sequential string found"); |
| 4333 } |
| 4334 __ Ldr(subject, |
| 4335 FieldMemOperand(subject, ExternalString::kResourceDataOffset)); |
| 4336 // Move the pointer so that offset-wise, it looks like a sequential string. |
| 4337 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); |
| 4338 __ Sub(subject, subject, SeqTwoByteString::kHeaderSize - kHeapObjectTag); |
| 4339 __ B(&seq_string); // Go to (5). |
| 4340 |
| 4341 // (8) If this is a short external string or not a string, bail out to |
| 4342 // runtime. |
| 4343 __ Bind(¬_long_external); |
| 4344 STATIC_ASSERT(kShortExternalStringTag != 0); |
| 4345 __ TestAndBranchIfAnySet(string_representation, |
| 4346 kShortExternalStringMask | kIsNotStringMask, |
| 4347 &runtime); |
| 4348 |
| 4349 // (9) Sliced string. Replace subject with parent. |
| 4350 __ Ldr(sliced_string_offset, |
| 4351 UntagSmiFieldMemOperand(subject, SlicedString::kOffsetOffset)); |
| 4352 __ Ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset)); |
| 4353 __ B(&check_underlying); // Go to (4). |
| 4354 #endif |
| 4355 } |
| 4356 |
| 4357 |
| 4358 void RegExpConstructResultStub::Generate(MacroAssembler* masm) { |
| 4359 // Stack layout on entry. |
| 4360 // jssp[0]: pointer to string object |
| 4361 // jssp[8]: start index of last regexp match (smi) |
| 4362 // jssp[16]: number of results (smi) |
| 4363 // |
| 4364 // Returns pointer to result object in x0. |
| 4365 |
| 4366 static const int kMaxInlineLength = 100; |
| 4367 Label slow; |
| 4368 Factory* factory = masm->isolate()->factory(); |
| 4369 Register input = x10; |
| 4370 Register index_smi = x11; |
| 4371 Register length_smi = x12; |
| 4372 |
| 4373 __ Pop(input, index_smi, length_smi); |
| 4374 __ JumpIfNotSmi(length_smi, &slow); |
| 4375 __ Cmp(length_smi, Operand(Smi::FromInt(kMaxInlineLength))); |
| 4376 __ B(hi, &slow); |
| 4377 |
| 4378 Register length = x13; |
| 4379 __ SmiUntag(length, length_smi); |
| 4380 |
| 4381 // Allocate RegExpResult followed by FixedArray. |
| 4382 // JSArray: [Map][empty properties][Elements][Length-smi][index-smi][input] |
| 4383 // Elements: [Map][Length][..elements..] |
| 4384 // Size of JSArray with two in-object properties and the header of a |
| 4385 // FixedArray. |
| 4386 Register alloc_obj = x0; // Result register for allocated object. |
| 4387 Register alloc_size = x1; |
| 4388 int objects_size = JSRegExpResult::kSize + FixedArray::kHeaderSize; |
| 4389 __ Mov(alloc_size, objects_size); |
| 4390 __ Add(alloc_size, alloc_size, Operand(length, LSL, kPointerSizeLog2)); |
| 4391 __ Allocate(alloc_size, alloc_obj, x14, x15, &slow, TAG_OBJECT); |
| 4392 |
| 4393 // Set JSArray map to global.regexp_result_map(). |
| 4394 Register global_obj = x14; |
| 4395 Register global_ctx = x14; |
| 4396 Register regexp_map = x14; |
| 4397 __ Ldr(global_obj, GlobalObjectMemOperand()); |
| 4398 __ Ldr(global_ctx, FieldMemOperand(global_obj, |
| 4399 GlobalObject::kNativeContextOffset)); |
| 4400 __ Ldr(regexp_map, ContextMemOperand(global_ctx, |
| 4401 Context::REGEXP_RESULT_MAP_INDEX)); |
| 4402 __ Str(regexp_map, FieldMemOperand(alloc_obj, HeapObject::kMapOffset)); |
| 4403 |
| 4404 // Set empty properties FixedArray. |
| 4405 Register empty_array = x14; |
| 4406 __ Mov(empty_array, Operand(factory->empty_fixed_array())); |
| 4407 __ Str(empty_array, FieldMemOperand(alloc_obj, JSObject::kPropertiesOffset)); |
| 4408 |
| 4409 // Set elements to point to FixedArray allocated right after the JSArray. |
| 4410 Register elements = x15; |
| 4411 __ Add(elements, alloc_obj, JSRegExpResult::kSize); |
| 4412 __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset)); |
| 4413 |
| 4414 // Set input, index and length field from arguments. |
| 4415 __ Str(input, FieldMemOperand(alloc_obj, JSRegExpResult::kInputOffset)); |
| 4416 __ Str(index_smi, FieldMemOperand(alloc_obj, JSRegExpResult::kIndexOffset)); |
| 4417 __ Str(length_smi, FieldMemOperand(alloc_obj, JSArray::kLengthOffset)); |
| 4418 |
| 4419 // Fill in the elements FixedArray. First, set the map. |
| 4420 Register map = x14; |
| 4421 __ Mov(map, Operand(factory->fixed_array_map())); |
| 4422 __ Str(map, FieldMemOperand(elements, HeapObject::kMapOffset)); |
| 4423 // Set FixedArray length. |
| 4424 __ Str(length_smi, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
| 4425 // Fill contents of FixedArray with undefined. |
| 4426 Register undef = x14; |
| 4427 Register fixed_array_elts = x15; |
| 4428 __ LoadRoot(undef, Heap::kUndefinedValueRootIndex); |
| 4429 __ Add(fixed_array_elts, elements, FixedArray::kHeaderSize - kHeapObjectTag); |
| 4430 |
| 4431 // Fill fixed array elements with hole. |
| 4432 Label loop, done; |
| 4433 __ Bind(&loop); |
| 4434 __ Cbz(length, &done); |
| 4435 __ Sub(length, length, 1); |
| 4436 __ Str(undef, MemOperand(fixed_array_elts, length, LSL, kPointerSizeLog2)); |
| 4437 __ B(&loop); |
| 4438 |
| 4439 __ Bind(&done); |
| 4440 __ Ret(); |
| 4441 |
| 4442 __ Bind(&slow); |
| 4443 __ Push(length_smi, index_smi, input); |
| 4444 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1); |
| 4445 } |
| 4446 |
| 4447 |
| 4448 // TODO(mcapewel): This code has been ported as part of the merge process, but |
| 4449 // is currently untested. |
| 4450 // TODO(jbramley): Don't use static registers here, but take them as arguments. |
| 4451 static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) { |
| 4452 ASM_UNIMPLEMENTED_BREAK("Untested: GenerateRecordCallTargetNoArray"); |
| 4453 // Cache the called function in a global property cell. Cache states are |
| 4454 // uninitialized, monomorphic (indicated by a JSFunction), and megamorphic. |
| 4455 // x1 : the function to call |
| 4456 // x2 : cache cell for the call target |
| 4457 Label done; |
| 4458 |
| 4459 ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()), |
| 4460 masm->isolate()->heap()->undefined_value()); |
| 4461 ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()), |
| 4462 masm->isolate()->heap()->the_hole_value()); |
| 4463 |
| 4464 // Load the cache state. |
| 4465 __ Ldr(x3, FieldMemOperand(x2, JSGlobalPropertyCell::kValueOffset)); |
| 4466 |
| 4467 // A monomorphic cache hit or an already megamorphic state: invoke the |
| 4468 // function without changing the state. |
| 4469 __ Cmp(x3, x1); |
| 4470 __ B(eq, &done); |
| 4471 __ JumpIfRoot(x3, Heap::kUndefinedValueRootIndex, &done); |
| 4472 |
| 4473 // A monomorphic miss (i.e, here the cache is not uninitialized) goes |
| 4474 // megamorphic. MegamorphicSentinal is an immortal immovable object |
| 4475 // (undefined) so no write-barrier is needed. |
| 4476 Label skip_undef_store; |
| 4477 __ JumpIfRoot(x3, Heap::kTheHoleValueRootIndex, &skip_undef_store); |
| 4478 __ LoadRoot(ip0, Heap::kUndefinedValueRootIndex); |
| 4479 __ Str(ip0, FieldMemOperand(x2, JSGlobalPropertyCell::kValueOffset)); |
| 4480 __ B(&done); |
| 4481 __ Bind(&skip_undef_store); |
| 4482 |
| 4483 // An uninitialized cache is patched with the function. |
| 4484 __ Str(x1, FieldMemOperand(x2, JSGlobalPropertyCell::kValueOffset)); |
| 4485 // No need for a write barrier here - cells are rescanned. |
| 4486 |
| 4487 __ Bind(&done); |
| 4488 } |
| 4489 |
| 4490 |
| 4491 // TODO(jbramley): Don't use static registers here, but take them as arguments. |
| 4492 static void GenerateRecordCallTarget(MacroAssembler* masm) { |
| 4493 // Cache the called function in a global property cell. Cache states are |
| 4494 // uninitialized, monomorphic (indicated by a JSFunction), and megamorphic. |
| 4495 // x1 : the function to call |
| 4496 // x2 : cache cell for the call target |
| 4497 ASSERT(FLAG_optimize_constructed_arrays); |
| 4498 Label initialize, done, miss, megamorphic, not_array_function; |
| 4499 |
| 4500 ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()), |
| 4501 masm->isolate()->heap()->undefined_value()); |
| 4502 ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()), |
| 4503 masm->isolate()->heap()->the_hole_value()); |
| 4504 |
| 4505 // Load the cache state. |
| 4506 __ Ldr(x3, FieldMemOperand(x2, JSGlobalPropertyCell::kValueOffset)); |
| 4507 |
| 4508 // A monomorphic cache hit or an already megamorphic state: invoke the |
| 4509 // function without changing the state. |
| 4510 __ Cmp(x3, x1); |
| 4511 __ B(eq, &done); |
| 4512 __ JumpIfRoot(x3, Heap::kUndefinedValueRootIndex, &done); |
| 4513 |
| 4514 // Special handling of the Array() function, which caches not only the |
| 4515 // monomorphic Array function but the initial ElementsKind with special |
| 4516 // sentinels |
| 4517 Handle<Object> terminal_kind_sentinel = |
| 4518 TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(), |
| 4519 LAST_FAST_ELEMENTS_KIND); |
| 4520 __ JumpIfNotSmi(x3, &miss); |
| 4521 __ Cmp(x3, Operand(terminal_kind_sentinel)); |
| 4522 __ B(gt, &miss); |
| 4523 // Make sure the function is the Array() function |
| 4524 __ LoadArrayFunction(x3); |
| 4525 __ Cmp(x1, x3); |
| 4526 __ B(ne, &megamorphic); |
| 4527 __ B(&done); |
| 4528 |
| 4529 __ Bind(&miss); |
| 4530 |
| 4531 // A monomorphic miss (i.e, here the cache is not uninitialized) goes |
| 4532 // megamorphic. |
| 4533 __ JumpIfRoot(x3, Heap::kTheHoleValueRootIndex, &initialize); |
| 4534 // MegamorphicSentinel is an immortal immovable object (undefined) so no |
| 4535 // write-barrier is needed. |
| 4536 __ Bind(&megamorphic); |
| 4537 __ LoadRoot(x3, Heap::kUndefinedValueRootIndex); |
| 4538 __ Str(x3, FieldMemOperand(x2, JSGlobalPropertyCell::kValueOffset)); |
| 4539 __ B(&done); |
| 4540 |
| 4541 // An uninitialized cache is patched with the function or sentinel to |
| 4542 // indicate the ElementsKind if function is the Array constructor. |
| 4543 __ Bind(&initialize); |
| 4544 // Make sure the function is the Array() function |
| 4545 __ LoadArrayFunction(x3); |
| 4546 __ Cmp(x1, x3); |
| 4547 __ B(ne, ¬_array_function); |
| 4548 |
| 4549 // The target function is the Array constructor, install a sentinel value in |
| 4550 // the constructor's type info cell that will track the initial ElementsKind |
| 4551 // that should be used for the array when its constructed. |
| 4552 Handle<Object> initial_kind_sentinel = |
| 4553 TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(), |
| 4554 GetInitialFastElementsKind()); |
| 4555 __ Mov(x3, Operand(initial_kind_sentinel)); |
| 4556 __ Str(x3, FieldMemOperand(x2, JSGlobalPropertyCell::kValueOffset)); |
| 4557 __ B(&done); |
| 4558 |
| 4559 __ Bind(¬_array_function); |
| 4560 // An uninitialized cache is patched with the function. |
| 4561 __ Str(x1, FieldMemOperand(x2, JSGlobalPropertyCell::kValueOffset)); |
| 4562 // No need for a write barrier here - cells are rescanned. |
| 4563 |
| 4564 __ Bind(&done); |
| 4565 } |
| 4566 |
| 4567 |
| 4568 void CallFunctionStub::Generate(MacroAssembler* masm) { |
| 4569 ASM_LOCATION("CallFunctionStub::Generate"); |
| 4570 // x1 function the function to call |
| 4571 // x2 cache_cell cache cell for call target |
| 4572 Register function = x1; |
| 4573 Register cache_cell = x2; |
| 4574 Label slow, non_function; |
| 4575 |
| 4576 // The receiver might implicitly be the global object. This is |
| 4577 // indicated by passing the hole as the receiver to the call |
| 4578 // function stub. |
| 4579 if (ReceiverMightBeImplicit()) { |
| 4580 Label call; |
| 4581 // Get the receiver from the stack. |
| 4582 // jssp[0] - jssp[argc_ - 1] : arguments |
| 4583 // jssp[argc_] : receiver |
| 4584 // jssp[argc_ + 1] : function |
| 4585 __ Peek(x4, argc_ * kXRegSizeInBytes); |
| 4586 // Call as function is indicated with the hole. |
| 4587 __ JumpIfNotRoot(x4, Heap::kTheHoleValueRootIndex, &call); |
| 4588 // Patch the receiver on the stack with the global receiver object. |
| 4589 __ Ldr(x10, GlobalObjectMemOperand()); |
| 4590 __ Ldr(x11, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset)); |
| 4591 __ Poke(x11, argc_ * kXRegSizeInBytes); |
| 4592 __ Bind(&call); |
| 4593 } |
| 4594 |
| 4595 // Check that the function is really a JavaScript function. |
| 4596 // x1 function pushed function (to be verified) |
| 4597 __ JumpIfSmi(function, &non_function); |
| 4598 // Get the map of the function object. |
| 4599 __ JumpIfNotObjectType(function, x10, x10, JS_FUNCTION_TYPE, &slow); |
| 4600 |
| 4601 if (RecordCallTarget()) { |
| 4602 if (FLAG_optimize_constructed_arrays) { |
| 4603 GenerateRecordCallTarget(masm); |
| 4604 } else { |
| 4605 GenerateRecordCallTargetNoArray(masm); |
| 4606 } |
| 4607 } |
| 4608 |
| 4609 // Fast-case: Invoke the function now. |
| 4610 // x1 function pushed function |
| 4611 ParameterCount actual(argc_); |
| 4612 |
| 4613 if (ReceiverMightBeImplicit()) { |
| 4614 Label call_as_function; |
| 4615 __ JumpIfRoot(x4, Heap::kTheHoleValueRootIndex, &call_as_function); |
| 4616 __ InvokeFunction(function, |
| 4617 actual, |
| 4618 JUMP_FUNCTION, |
| 4619 NullCallWrapper(), |
| 4620 CALL_AS_METHOD); |
| 4621 __ Bind(&call_as_function); |
| 4622 } |
| 4623 __ InvokeFunction(function, |
| 4624 actual, |
| 4625 JUMP_FUNCTION, |
| 4626 NullCallWrapper(), |
| 4627 CALL_AS_FUNCTION); |
| 4628 |
| 4629 // Slow-case: Non-function called. |
| 4630 __ Bind(&slow); |
| 4631 if (RecordCallTarget()) { |
| 4632 // If there is a call target cache, mark it megamorphic in the |
| 4633 // non-function case. MegamorphicSentinel is an immortal immovable object |
| 4634 // (undefined) so no write barrier is needed. |
| 4635 ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()), |
| 4636 masm->isolate()->heap()->undefined_value()); |
| 4637 __ LoadRoot(x11, Heap::kUndefinedValueRootIndex); |
| 4638 __ Str(x11, FieldMemOperand(cache_cell, |
| 4639 JSGlobalPropertyCell::kValueOffset)); |
| 4640 } |
| 4641 // Check for function proxy. |
| 4642 // x10 : function type. |
| 4643 __ Cmp(x10, JS_FUNCTION_PROXY_TYPE); |
| 4644 __ B(ne, &non_function); |
| 4645 __ Push(function); // put proxy as additional argument |
| 4646 __ Mov(x0, argc_ + 1); |
| 4647 __ Mov(x2, 0); |
| 4648 __ GetBuiltinEntry(x3, Builtins::CALL_FUNCTION_PROXY); |
| 4649 __ SetCallKind(x5, CALL_AS_METHOD); |
| 4650 { |
| 4651 Handle<Code> adaptor = |
| 4652 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); |
| 4653 __ Jump(adaptor, RelocInfo::CODE_TARGET); |
| 4654 } |
| 4655 |
| 4656 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead |
| 4657 // of the original receiver from the call site). |
| 4658 __ Bind(&non_function); |
| 4659 __ Poke(function, argc_ * kXRegSizeInBytes); |
| 4660 __ Mov(x0, argc_); // Set up the number of arguments. |
| 4661 __ Mov(x2, 0); |
| 4662 __ GetBuiltinEntry(x3, Builtins::CALL_NON_FUNCTION); |
| 4663 __ SetCallKind(x5, CALL_AS_METHOD); |
| 4664 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), |
| 4665 RelocInfo::CODE_TARGET); |
| 4666 } |
| 4667 |
| 4668 |
| 4669 void CallConstructStub::Generate(MacroAssembler* masm) { |
| 4670 ASM_LOCATION("CallConstructStub::Generate"); |
| 4671 // x0 : number of arguments |
| 4672 // x1 : the function to call |
| 4673 // x2 : cache cell for call target |
| 4674 Register function = x1; |
| 4675 Label slow, non_function_call; |
| 4676 |
| 4677 // Check that the function is not a smi. |
| 4678 __ JumpIfSmi(function, &non_function_call); |
| 4679 // Check that the function is a JSFunction. |
| 4680 Register object_type = x10; |
| 4681 __ JumpIfNotObjectType(function, object_type, object_type, JS_FUNCTION_TYPE, |
| 4682 &slow); |
| 4683 |
| 4684 if (RecordCallTarget()) { |
| 4685 if (FLAG_optimize_constructed_arrays) { |
| 4686 GenerateRecordCallTarget(masm); |
| 4687 } else { |
| 4688 GenerateRecordCallTargetNoArray(masm); |
| 4689 } |
| 4690 } |
| 4691 |
| 4692 // Jump to the function-specific construct stub. |
| 4693 Register jump_reg = FLAG_optimize_constructed_arrays ? x3 : x2; |
| 4694 Register shared_func_info = jump_reg; |
| 4695 Register cons_stub = jump_reg; |
| 4696 Register cons_stub_code = jump_reg; |
| 4697 __ Ldr(shared_func_info, |
| 4698 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); |
| 4699 __ Ldr(cons_stub, |
| 4700 FieldMemOperand(shared_func_info, |
| 4701 SharedFunctionInfo::kConstructStubOffset)); |
| 4702 __ Add(cons_stub_code, cons_stub, Code::kHeaderSize - kHeapObjectTag); |
| 4703 __ Br(cons_stub_code); |
| 4704 |
| 4705 Label do_call; |
| 4706 __ Bind(&slow); |
| 4707 __ Cmp(object_type, JS_FUNCTION_PROXY_TYPE); |
| 4708 __ B(ne, &non_function_call); |
| 4709 Register builtin = x3; |
| 4710 __ GetBuiltinEntry(builtin, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); |
| 4711 __ B(&do_call); |
| 4712 |
| 4713 __ Bind(&non_function_call); |
| 4714 __ GetBuiltinEntry(builtin, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); |
| 4715 |
| 4716 __ Bind(&do_call); |
| 4717 // Set expected number of arguments to zero (not changing x0). |
| 4718 __ Mov(x2, 0); |
| 4719 __ SetCallKind(x5, CALL_AS_METHOD); |
| 4720 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), |
| 4721 RelocInfo::CODE_TARGET); |
| 4722 } |
| 4723 |
| 4724 |
| 4725 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { |
| 4726 // If the receiver is a smi trigger the non-string case. |
| 4727 __ JumpIfSmi(object_, receiver_not_string_); |
| 4728 |
| 4729 // Fetch the instance type of the receiver into result register. |
| 4730 __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); |
| 4731 __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); |
| 4732 |
| 4733 // If the receiver is not a string trigger the non-string case. |
| 4734 __ TestAndBranchIfAnySet(result_, kIsNotStringMask, receiver_not_string_); |
| 4735 |
| 4736 // If the index is non-smi trigger the non-smi case. |
| 4737 __ JumpIfNotSmi(index_, &index_not_smi_); |
| 4738 |
| 4739 __ Bind(&got_smi_index_); |
| 4740 // Check for index out of range. |
| 4741 __ Ldrsw(result_, UntagSmiFieldMemOperand(object_, String::kLengthOffset)); |
| 4742 __ Cmp(result_, Operand::UntagSmi(index_)); |
| 4743 __ B(ls, index_out_of_range_); |
| 4744 |
| 4745 __ SmiUntag(index_); |
| 4746 |
| 4747 StringCharLoadGenerator::Generate(masm, |
| 4748 object_, |
| 4749 index_, |
| 4750 result_, |
| 4751 &call_runtime_); |
| 4752 __ SmiTag(result_); |
| 4753 __ Bind(&exit_); |
| 4754 } |
| 4755 |
| 4756 |
| 4757 void StringCharCodeAtGenerator::GenerateSlow( |
| 4758 MacroAssembler* masm, |
| 4759 const RuntimeCallHelper& call_helper) { |
| 4760 __ Abort("Unexpected fallthrough to CharCodeAt slow case"); |
| 4761 |
| 4762 __ Bind(&index_not_smi_); |
| 4763 // If index is a heap number, try converting it to an integer. |
| 4764 __ CheckMap(index_, |
| 4765 result_, |
| 4766 Heap::kHeapNumberMapRootIndex, |
| 4767 index_not_number_, |
| 4768 DONT_DO_SMI_CHECK); |
| 4769 call_helper.BeforeCall(masm); |
| 4770 // Save object_ on the stack and pass index_ as argument for runtime call. |
| 4771 __ Push(object_, index_); |
| 4772 if (index_flags_ == STRING_INDEX_IS_NUMBER) { |
| 4773 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); |
| 4774 } else { |
| 4775 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); |
| 4776 // NumberToSmi discards numbers that are not exact integers. |
| 4777 __ CallRuntime(Runtime::kNumberToSmi, 1); |
| 4778 } |
| 4779 // Save the conversion result before the pop instructions below |
| 4780 // have a chance to overwrite it. |
| 4781 __ Mov(index_, x0); |
| 4782 __ Pop(object_); |
| 4783 // Reload the instance type. |
| 4784 __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); |
| 4785 __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); |
| 4786 call_helper.AfterCall(masm); |
| 4787 |
| 4788 // If index is still not a smi, it must be out of range. |
| 4789 __ JumpIfNotSmi(index_, index_out_of_range_); |
| 4790 // Otherwise, return to the fast path. |
| 4791 __ B(&got_smi_index_); |
| 4792 |
| 4793 // Call runtime. We get here when the receiver is a string and the |
| 4794 // index is a number, but the code of getting the actual character |
| 4795 // is too complex (e.g., when the string needs to be flattened). |
| 4796 __ Bind(&call_runtime_); |
| 4797 call_helper.BeforeCall(masm); |
| 4798 __ SmiTag(index_); |
| 4799 __ Push(object_, index_); |
| 4800 __ CallRuntime(Runtime::kStringCharCodeAt, 2); |
| 4801 __ Mov(result_, x0); |
| 4802 call_helper.AfterCall(masm); |
| 4803 __ B(&exit_); |
| 4804 |
| 4805 __ Abort("Unexpected fallthrough from CharCodeAt slow case"); |
| 4806 } |
| 4807 |
| 4808 |
| 4809 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { |
| 4810 __ JumpIfNotSmi(code_, &slow_case_); |
| 4811 __ Cmp(code_, Operand(Smi::FromInt(String::kMaxOneByteCharCode))); |
| 4812 __ B(hi, &slow_case_); |
| 4813 |
| 4814 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); |
| 4815 // At this point code register contains smi tagged ASCII char code. |
| 4816 STATIC_ASSERT(kSmiShift > kPointerSizeLog2); |
| 4817 __ Add(result_, result_, Operand(code_, LSR, kSmiShift - kPointerSizeLog2)); |
| 4818 __ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); |
| 4819 __ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_); |
| 4820 __ Bind(&exit_); |
| 4821 } |
| 4822 |
| 4823 |
| 4824 void StringCharFromCodeGenerator::GenerateSlow( |
| 4825 MacroAssembler* masm, |
| 4826 const RuntimeCallHelper& call_helper) { |
| 4827 __ Abort("Unexpected fallthrough to CharFromCode slow case"); |
| 4828 |
| 4829 __ Bind(&slow_case_); |
| 4830 call_helper.BeforeCall(masm); |
| 4831 __ Push(code_); |
| 4832 __ CallRuntime(Runtime::kCharFromCode, 1); |
| 4833 __ Mov(result_, x0); |
| 4834 call_helper.AfterCall(masm); |
| 4835 __ B(&exit_); |
| 4836 |
| 4837 __ Abort("Unexpected fallthrough from CharFromCode slow case"); |
| 4838 } |
| 4839 |
| 4840 |
| 4841 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { |
| 4842 // Inputs are in x0 (lhs) and x1 (rhs). |
| 4843 ASSERT(state_ == CompareIC::SMI); |
| 4844 ASM_LOCATION("ICCompareStub[Smis]"); |
| 4845 Label miss; |
| 4846 // Bail out (to 'miss') unless both x0 and x1 are smis. |
| 4847 __ JumpIfEitherNotSmi(x0, x1, &miss); |
| 4848 |
| 4849 // TODO(jbramley): Why do we only set the flags for EQ? |
| 4850 if (GetCondition() == eq) { |
| 4851 // For equality we do not care about the sign of the result. |
| 4852 __ Subs(x0, x0, x1); |
| 4853 } else { |
| 4854 // Untag before subtracting to avoid handling overflow. |
| 4855 __ SmiUntag(x1); |
| 4856 __ Sub(x0, x1, Operand::UntagSmi(x0)); |
| 4857 } |
| 4858 __ Ret(); |
| 4859 |
| 4860 __ Bind(&miss); |
| 4861 GenerateMiss(masm); |
| 4862 } |
| 4863 |
| 4864 |
| 4865 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { |
| 4866 ASSERT(state_ == CompareIC::NUMBER); |
| 4867 ASM_LOCATION("ICCompareStub[HeapNumbers]"); |
| 4868 |
| 4869 Label unordered, maybe_undefined1, maybe_undefined2; |
| 4870 Label miss, handle_lhs, values_in_d_regs; |
| 4871 Label untag_rhs, untag_lhs; |
| 4872 |
| 4873 Register result = x0; |
| 4874 Register rhs = x0; |
| 4875 Register lhs = x1; |
| 4876 FPRegister rhs_d = d0; |
| 4877 FPRegister lhs_d = d1; |
| 4878 |
| 4879 if (left_ == CompareIC::SMI) { |
| 4880 __ JumpIfNotSmi(lhs, &miss); |
| 4881 } |
| 4882 if (right_ == CompareIC::SMI) { |
| 4883 __ JumpIfNotSmi(rhs, &miss); |
| 4884 } |
| 4885 |
| 4886 __ SmiUntagToDouble(rhs_d, rhs, kSpeculativeUntag); |
| 4887 __ SmiUntagToDouble(lhs_d, lhs, kSpeculativeUntag); |
| 4888 |
| 4889 // Load rhs if it's a heap number. |
| 4890 __ JumpIfSmi(rhs, &handle_lhs); |
| 4891 __ CheckMap(rhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, |
| 4892 DONT_DO_SMI_CHECK); |
| 4893 __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset)); |
| 4894 |
| 4895 // Load lhs if it's a heap number. |
| 4896 __ Bind(&handle_lhs); |
| 4897 __ JumpIfSmi(lhs, &values_in_d_regs); |
| 4898 __ CheckMap(lhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, |
| 4899 DONT_DO_SMI_CHECK); |
| 4900 __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset)); |
| 4901 |
| 4902 __ Bind(&values_in_d_regs); |
| 4903 __ Fcmp(lhs_d, rhs_d); |
| 4904 __ B(vs, &unordered); // Overflow flag set if either is NaN. |
| 4905 STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1)); |
| 4906 __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL). |
| 4907 __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0. |
| 4908 __ Ret(); |
| 4909 |
| 4910 __ Bind(&unordered); |
| 4911 ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, |
| 4912 CompareIC::GENERIC); |
| 4913 __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); |
| 4914 |
| 4915 __ Bind(&maybe_undefined1); |
| 4916 if (Token::IsOrderedRelationalCompareOp(op_)) { |
| 4917 __ JumpIfNotRoot(rhs, Heap::kUndefinedValueRootIndex, &miss); |
| 4918 __ JumpIfSmi(lhs, &unordered); |
| 4919 __ JumpIfNotObjectType(lhs, x10, x10, HEAP_NUMBER_TYPE, &maybe_undefined2); |
| 4920 __ B(&unordered); |
| 4921 } |
| 4922 |
| 4923 __ Bind(&maybe_undefined2); |
| 4924 if (Token::IsOrderedRelationalCompareOp(op_)) { |
| 4925 __ JumpIfRoot(lhs, Heap::kUndefinedValueRootIndex, &unordered); |
| 4926 } |
| 4927 |
| 4928 __ Bind(&miss); |
| 4929 GenerateMiss(masm); |
| 4930 } |
| 4931 |
| 4932 |
| 4933 void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { |
| 4934 ASSERT(state_ == CompareIC::INTERNALIZED_STRING); |
| 4935 ASM_LOCATION("ICCompareStub[InternalizedStrings]"); |
| 4936 Label miss; |
| 4937 |
| 4938 Register result = x0; |
| 4939 Register rhs = x0; |
| 4940 Register lhs = x1; |
| 4941 |
| 4942 // Check that both operands are heap objects. |
| 4943 __ JumpIfEitherSmi(lhs, rhs, &miss); |
| 4944 |
| 4945 // Check that both operands are internalized strings. |
| 4946 Register rhs_map = x10; |
| 4947 Register lhs_map = x11; |
| 4948 Register rhs_type = x10; |
| 4949 Register lhs_type = x11; |
| 4950 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset)); |
| 4951 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset)); |
| 4952 __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset)); |
| 4953 __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset)); |
| 4954 __ And(x10, lhs_type, rhs_type); |
| 4955 __ Tbz(x10, MaskToBit(kIsInternalizedMask), &miss); |
| 4956 STATIC_ASSERT(kInternalizedTag != 0); |
| 4957 |
| 4958 // Internalized strings are compared by identity. |
| 4959 STATIC_ASSERT(EQUAL == 0); |
| 4960 __ Cmp(lhs, rhs); |
| 4961 __ Cset(result, ne); |
| 4962 __ Ret(); |
| 4963 |
| 4964 __ Bind(&miss); |
| 4965 GenerateMiss(masm); |
| 4966 } |
| 4967 |
| 4968 |
| 4969 void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { |
| 4970 ASSERT(state_ == CompareIC::UNIQUE_NAME); |
| 4971 ASM_LOCATION("ICCompareStub[UniqueNames]"); |
| 4972 ASSERT(GetCondition() == eq); |
| 4973 Label miss; |
| 4974 |
| 4975 Register result = x0; |
| 4976 Register rhs = x0; |
| 4977 Register lhs = x1; |
| 4978 |
| 4979 Register lhs_instance_type = w2; |
| 4980 Register rhs_instance_type = w3; |
| 4981 |
| 4982 // Check that both operands are heap objects. |
| 4983 __ JumpIfEitherSmi(lhs, rhs, &miss); |
| 4984 |
| 4985 // Check that both operands are unique names. This leaves the instance |
| 4986 // types loaded in tmp1 and tmp2. |
| 4987 __ Ldr(x10, FieldMemOperand(lhs, HeapObject::kMapOffset)); |
| 4988 __ Ldr(x11, FieldMemOperand(rhs, HeapObject::kMapOffset)); |
| 4989 __ Ldrb(lhs_instance_type, FieldMemOperand(x10, Map::kInstanceTypeOffset)); |
| 4990 __ Ldrb(rhs_instance_type, FieldMemOperand(x11, Map::kInstanceTypeOffset)); |
| 4991 |
| 4992 // To avoid a miss, each instance type should be either SYMBOL_TYPE or it |
| 4993 // should have kInternalizedTag set. |
| 4994 STATIC_ASSERT(kInternalizedTag != 0); |
| 4995 __ Tst(lhs_instance_type, kIsInternalizedMask); |
| 4996 __ Ccmp(lhs_instance_type, SYMBOL_TYPE, ZFlag, eq); |
| 4997 __ B(ne, &miss); |
| 4998 |
| 4999 __ Tst(rhs_instance_type, kIsInternalizedMask); |
| 5000 __ Ccmp(rhs_instance_type, SYMBOL_TYPE, ZFlag, eq); |
| 5001 __ B(ne, &miss); |
| 5002 |
| 5003 // Unique names are compared by identity. |
| 5004 STATIC_ASSERT(EQUAL == 0); |
| 5005 __ Cmp(lhs, rhs); |
| 5006 __ Cset(result, ne); |
| 5007 __ Ret(); |
| 5008 |
| 5009 __ Bind(&miss); |
| 5010 GenerateMiss(masm); |
| 5011 } |
| 5012 |
| 5013 |
| 5014 void ICCompareStub::GenerateStrings(MacroAssembler* masm) { |
| 5015 ASSERT(state_ == CompareIC::STRING); |
| 5016 ASM_LOCATION("ICCompareStub[Strings]"); |
| 5017 |
| 5018 Label miss; |
| 5019 |
| 5020 bool equality = Token::IsEqualityOp(op_); |
| 5021 |
| 5022 Register result = x0; |
| 5023 Register rhs = x0; |
| 5024 Register lhs = x1; |
| 5025 |
| 5026 // Check that both operands are heap objects. |
| 5027 __ JumpIfEitherSmi(rhs, lhs, &miss); |
| 5028 |
| 5029 // Check that both operands are strings. |
| 5030 Register rhs_map = x10; |
| 5031 Register lhs_map = x11; |
| 5032 Register rhs_type = x10; |
| 5033 Register lhs_type = x11; |
| 5034 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset)); |
| 5035 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset)); |
| 5036 __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset)); |
| 5037 __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset)); |
| 5038 STATIC_ASSERT(kNotStringTag != 0); |
| 5039 __ Orr(x12, lhs_type, rhs_type); |
| 5040 __ Tbnz(x12, MaskToBit(kIsNotStringMask), &miss); |
| 5041 |
| 5042 // Fast check for identical strings. |
| 5043 Label not_equal; |
| 5044 __ Cmp(lhs, rhs); |
| 5045 __ B(ne, ¬_equal); |
| 5046 __ Mov(result, EQUAL); |
| 5047 __ Ret(); |
| 5048 |
| 5049 __ Bind(¬_equal); |
| 5050 // Handle not identical strings |
| 5051 |
| 5052 // Check that both strings are internalized strings. If they are, we're done |
| 5053 // because we already know they are not identical. |
| 5054 if (equality) { |
| 5055 ASSERT(GetCondition() == eq); |
| 5056 STATIC_ASSERT(kInternalizedTag != 0); |
| 5057 Label not_internalized_strings; |
| 5058 __ And(x12, lhs_type, rhs_type); |
| 5059 __ Tbz(x12, MaskToBit(kIsInternalizedMask), ¬_internalized_strings); |
| 5060 // Result is in rhs (x0), and not EQUAL, as rhs is not a smi. |
| 5061 __ Ret(); |
| 5062 __ Bind(¬_internalized_strings); |
| 5063 } |
| 5064 |
| 5065 // Check that both strings are sequential ASCII. |
| 5066 Label runtime; |
| 5067 __ JumpIfBothInstanceTypesAreNotSequentialAscii( |
| 5068 lhs_type, rhs_type, x12, x13, &runtime); |
| 5069 |
| 5070 // Compare flat ASCII strings. Returns when done. |
| 5071 if (equality) { |
| 5072 StringCompareStub::GenerateFlatAsciiStringEquals( |
| 5073 masm, lhs, rhs, x10, x11, x12); |
| 5074 } else { |
| 5075 StringCompareStub::GenerateCompareFlatAsciiStrings( |
| 5076 masm, lhs, rhs, x10, x11, x12, x13); |
| 5077 } |
| 5078 |
| 5079 // Handle more complex cases in runtime. |
| 5080 __ Bind(&runtime); |
| 5081 __ Push(lhs, rhs); |
| 5082 if (equality) { |
| 5083 __ TailCallRuntime(Runtime::kStringEquals, 2, 1); |
| 5084 } else { |
| 5085 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); |
| 5086 } |
| 5087 |
| 5088 __ Bind(&miss); |
| 5089 GenerateMiss(masm); |
| 5090 } |
| 5091 |
| 5092 |
| 5093 void ICCompareStub::GenerateObjects(MacroAssembler* masm) { |
| 5094 ASSERT(state_ == CompareIC::OBJECT); |
| 5095 ASM_LOCATION("ICCompareStub[Objects]"); |
| 5096 |
| 5097 Label miss; |
| 5098 |
| 5099 Register result = x0; |
| 5100 Register rhs = x0; |
| 5101 Register lhs = x1; |
| 5102 |
| 5103 __ JumpIfEitherSmi(rhs, lhs, &miss); |
| 5104 |
| 5105 __ JumpIfNotObjectType(rhs, x10, x10, JS_OBJECT_TYPE, &miss); |
| 5106 __ JumpIfNotObjectType(lhs, x10, x10, JS_OBJECT_TYPE, &miss); |
| 5107 |
| 5108 ASSERT(GetCondition() == eq); |
| 5109 __ Sub(result, rhs, lhs); |
| 5110 __ Ret(); |
| 5111 |
| 5112 __ Bind(&miss); |
| 5113 GenerateMiss(masm); |
| 5114 } |
| 5115 |
| 5116 |
| 5117 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) { |
| 5118 ASM_LOCATION("ICCompareStub[KnownObjects]"); |
| 5119 |
| 5120 Label miss; |
| 5121 |
| 5122 Register result = x0; |
| 5123 Register rhs = x0; |
| 5124 Register lhs = x1; |
| 5125 |
| 5126 __ JumpIfEitherSmi(rhs, lhs, &miss); |
| 5127 |
| 5128 Register rhs_map = x10; |
| 5129 Register lhs_map = x11; |
| 5130 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset)); |
| 5131 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset)); |
| 5132 __ Cmp(rhs_map, Operand(known_map_)); |
| 5133 __ B(ne, &miss); |
| 5134 __ Cmp(lhs_map, Operand(known_map_)); |
| 5135 __ B(ne, &miss); |
| 5136 |
| 5137 __ Sub(result, rhs, lhs); |
| 5138 __ Ret(); |
| 5139 |
| 5140 __ Bind(&miss); |
| 5141 GenerateMiss(masm); |
| 5142 } |
| 5143 |
| 5144 |
| 5145 // This method handles the case where a compare stub had the wrong |
| 5146 // implementation. It calls a miss handler, which re-writes the stub. All other |
| 5147 // ICCompareStub::Generate* methods should fall back into this one if their |
| 5148 // operands were not the expected types. |
| 5149 void ICCompareStub::GenerateMiss(MacroAssembler* masm) { |
| 5150 ASM_LOCATION("ICCompareStub[Miss]"); |
| 5151 |
| 5152 Register stub_entry = x11; |
| 5153 { |
| 5154 ExternalReference miss = |
| 5155 ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate()); |
| 5156 |
| 5157 FrameScope scope(masm, StackFrame::INTERNAL); |
| 5158 Register op = x10; |
| 5159 Register left = x1; |
| 5160 Register right = x0; |
| 5161 // Preserve some caller-saved registers. |
| 5162 __ Push(x1, x0, lr); |
| 5163 // Push the arguments. |
| 5164 __ Mov(op, Operand(Smi::FromInt(op_))); |
| 5165 __ Push(left, right, op); |
| 5166 |
| 5167 // Call the miss handler. This also pops the arguments. |
| 5168 __ CallExternalReference(miss, 3); |
| 5169 |
| 5170 // Compute the entry point of the rewritten stub. |
| 5171 __ Add(stub_entry, x0, Code::kHeaderSize - kHeapObjectTag); |
| 5172 // Restore caller-saved registers. |
| 5173 __ Pop(lr, x0, x1); |
| 5174 } |
| 5175 |
| 5176 // Tail-call to the new stub. |
| 5177 __ Jump(stub_entry); |
| 5178 } |
| 5179 |
| 5180 |
| 5181 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, |
| 5182 Register object, |
| 5183 Register result, |
| 5184 Register scratch1, |
| 5185 Register scratch2, |
| 5186 Register scratch3, |
| 5187 ObjectType object_type, |
| 5188 Label* not_found) { |
| 5189 ASSERT(!AreAliased(object, result, scratch1, scratch2, scratch3)); |
| 5190 |
| 5191 // Use of registers. Register result is used as a temporary. |
| 5192 Register number_string_cache = result; |
| 5193 Register mask = scratch3; |
| 5194 |
| 5195 // Load the number string cache. |
| 5196 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); |
| 5197 |
| 5198 // Make the hash mask from the length of the number string cache. It |
| 5199 // contains two elements (number and string) for each cache entry. |
| 5200 __ Ldrsw(mask, UntagSmiFieldMemOperand(number_string_cache, |
| 5201 FixedArray::kLengthOffset)); |
| 5202 __ Asr(mask, mask, 1); // Divide length by two. |
| 5203 __ Sub(mask, mask, 1); // Make mask. |
| 5204 |
| 5205 // Calculate the entry in the number string cache. The hash value in the |
| 5206 // number string cache for smis is just the smi value, and the hash for |
| 5207 // doubles is the xor of the upper and lower words. See |
| 5208 // Heap::GetNumberStringCache. |
| 5209 Isolate* isolate = masm->isolate(); |
| 5210 Label is_smi; |
| 5211 Label load_result_from_cache; |
| 5212 if (object_type == OBJECT_IS_NOT_SMI) { |
| 5213 __ JumpIfSmi(object, &is_smi); |
| 5214 __ CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found, |
| 5215 DONT_DO_SMI_CHECK); |
| 5216 |
| 5217 STATIC_ASSERT(kDoubleSize == (kWRegSizeInBytes * 2)); |
| 5218 __ Add(scratch1, object, HeapNumber::kValueOffset - kHeapObjectTag); |
| 5219 __ Ldp(scratch1.W(), scratch2.W(), MemOperand(scratch1)); |
| 5220 __ Eor(scratch1, scratch1, scratch2); |
| 5221 __ And(scratch1, scratch1, mask); |
| 5222 |
| 5223 // Calculate address of entry in string cache: each entry consists of two |
| 5224 // pointer sized fields. |
| 5225 __ Add(scratch1, number_string_cache, |
| 5226 Operand(scratch1, LSL, kPointerSizeLog2 + 1)); |
| 5227 |
| 5228 Register probe = mask; |
| 5229 __ Ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); |
| 5230 __ JumpIfSmi(probe, not_found); |
| 5231 __ Ldr(d0, FieldMemOperand(object, HeapNumber::kValueOffset)); |
| 5232 __ Ldr(d1, FieldMemOperand(probe, HeapNumber::kValueOffset)); |
| 5233 __ Fcmp(d0, d1); |
| 5234 __ B(ne, not_found); |
| 5235 __ B(&load_result_from_cache); |
| 5236 } |
| 5237 |
| 5238 __ Bind(&is_smi); |
| 5239 Register scratch = scratch1; |
| 5240 __ And(scratch, mask, Operand::UntagSmi(object)); |
| 5241 // Calculate address of entry in string cache: each entry consists |
| 5242 // of two pointer sized fields. |
| 5243 __ Add(scratch, |
| 5244 number_string_cache, |
| 5245 Operand(scratch, LSL, kPointerSizeLog2 + 1)); |
| 5246 |
| 5247 // Check if the entry is the smi we are looking for. |
| 5248 Register probe = mask; |
| 5249 __ Ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); |
| 5250 __ Cmp(object, probe); |
| 5251 __ B(ne, not_found); |
| 5252 |
| 5253 // Get the result from the cache. |
| 5254 __ Bind(&load_result_from_cache); |
| 5255 __ Ldr(result, |
| 5256 FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); |
| 5257 __ IncrementCounter(isolate->counters()->number_to_string_native(), 1, |
| 5258 scratch1, scratch2); |
| 5259 } |
| 5260 |
| 5261 |
| 5262 void NumberToStringStub::Generate(MacroAssembler* masm) { |
| 5263 Register result = x0; |
| 5264 Register object = x1; |
| 5265 Label runtime; |
| 5266 |
| 5267 __ Pop(object); |
| 5268 |
| 5269 // Generate code to lookup number in the number string cache. |
| 5270 GenerateLookupNumberStringCache(masm, object, result, x2, x3, x4, |
| 5271 NumberToStringStub::OBJECT_IS_NOT_SMI, |
| 5272 &runtime); |
| 5273 __ Ret(); |
| 5274 |
| 5275 // Handle number to string in the runtime system if not found in the cache. |
| 5276 __ Bind(&runtime); |
| 5277 __ Push(object); |
| 5278 __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); |
| 5279 } |
| 5280 |
| 5281 |
| 5282 void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm, |
| 5283 Register c1, |
| 5284 Register c2, |
| 5285 Register scratch1, |
| 5286 Register scratch2, |
| 5287 Register scratch3, |
| 5288 Register scratch4, |
| 5289 Register scratch5, |
| 5290 Label* not_found) { |
| 5291 ASSERT(!AreAliased(c1, c2, scratch1, scratch2, scratch3, scratch4, scratch5)); |
| 5292 // Register scratch3 is the general scratch register in this function. |
| 5293 Register scratch = scratch3; |
| 5294 |
| 5295 // Make sure that both characters are not digits as such strings have a |
| 5296 // different hash algorithm. Don't try to look for these in the string table. |
| 5297 Label not_array_index; |
| 5298 __ Sub(scratch, c1, static_cast<int>('0')); |
| 5299 __ Cmp(scratch, static_cast<int>('9' - '0')); |
| 5300 __ B(hi, ¬_array_index); |
| 5301 __ Sub(scratch, c2, static_cast<int>('0')); |
| 5302 __ Cmp(scratch, static_cast<int>('9' - '0')); |
| 5303 |
| 5304 // If check failed, combine both characters into single halfword. |
| 5305 // This is required by the contract of the method: code at the not_found |
| 5306 // branch expects this combination in register c1. |
| 5307 __ Orr(scratch, c1, Operand(c2, LSL, kBitsPerByte)); |
| 5308 __ Csel(c1, scratch, c1, ls); |
| 5309 __ B(ls, not_found); |
| 5310 |
| 5311 __ Bind(¬_array_index); |
| 5312 |
| 5313 // Calculate the two character string hash. |
| 5314 Register hash = scratch1; |
| 5315 StringHelper::GenerateHashInit(masm, hash, c1); |
| 5316 StringHelper::GenerateHashAddCharacter(masm, hash, c2); |
| 5317 StringHelper::GenerateHashGetHash(masm, hash, scratch); |
| 5318 |
| 5319 // Collect the two characters in a register. |
| 5320 Register chars = c1; |
| 5321 __ Orr(chars, chars, Operand(c2, LSL, kBitsPerByte)); |
| 5322 |
| 5323 // chars: two character string, char 1 in byte 0 and char 2 in byte 1. |
| 5324 // hash: hash of two character string. |
| 5325 |
| 5326 // Load string table |
| 5327 // Load address of first element of the string table. |
| 5328 Register string_table = c2; |
| 5329 __ LoadRoot(string_table, Heap::kStringTableRootIndex); |
| 5330 |
| 5331 Register undefined = scratch4; |
| 5332 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); |
| 5333 |
| 5334 // Calculate capacity mask from the string table capacity. |
| 5335 Register mask = scratch2; |
| 5336 __ Ldrsw(mask, UntagSmiFieldMemOperand(string_table, |
| 5337 StringTable::kCapacityOffset)); |
| 5338 __ Sub(mask, mask, 1); |
| 5339 |
| 5340 // Calculate untagged address of the first element of the string table. |
| 5341 Register first_string_table_element = string_table; |
| 5342 __ Add(first_string_table_element, string_table, |
| 5343 StringTable::kElementsStartOffset - kHeapObjectTag); |
| 5344 |
| 5345 // Registers |
| 5346 // chars: two character string, char 1 in byte 0 and char 2 in byte 1 |
| 5347 // hash: hash of two character string |
| 5348 // mask: capacity mask |
| 5349 // first_string_table_element: address of the first element of the string |
| 5350 // table |
| 5351 // undefined: the undefined object |
| 5352 // scratch: - |
| 5353 |
| 5354 // Perform a number of probes of the string table. |
| 5355 static const int kProbes = 4; |
| 5356 Label found_in_string_table; |
| 5357 Label next_probe[kProbes]; |
| 5358 Register candidate = scratch5; // Scratch register contains candidate. |
| 5359 for (int i = 0; i < kProbes; i++) { |
| 5360 // Calculate entry in string table. |
| 5361 if (i > 0) { |
| 5362 __ Add(candidate, hash, StringTable::GetProbeOffset(i)); |
| 5363 __ And(candidate, candidate, mask); |
| 5364 } else { |
| 5365 __ And(candidate, hash, mask); |
| 5366 } |
| 5367 |
| 5368 // Load the entry from the string table. |
| 5369 STATIC_ASSERT(StringTable::kEntrySize == 1); |
| 5370 __ Ldr(candidate, MemOperand(first_string_table_element, |
| 5371 candidate, LSL, kPointerSizeLog2)); |
| 5372 |
| 5373 // If entry is undefined no string with this hash can be found. |
| 5374 Label is_string; |
| 5375 Register type = scratch; |
| 5376 __ JumpIfNotObjectType(candidate, type, type, ODDBALL_TYPE, &is_string); |
| 5377 |
| 5378 __ Cmp(undefined, candidate); |
| 5379 __ B(eq, not_found); |
| 5380 // Must be the hole (deleted entry). |
| 5381 if (FLAG_debug_code) { |
| 5382 __ CompareRoot(candidate, Heap::kTheHoleValueRootIndex); |
| 5383 __ Assert(eq, "oddball in string table is not undefined or the hole"); |
| 5384 } |
| 5385 __ B(&next_probe[i]); |
| 5386 |
| 5387 __ Bind(&is_string); |
| 5388 |
| 5389 // Check that the candidate is a non-external ASCII string. The instance |
| 5390 // type is still in the type register from the CompareObjectType |
| 5391 // operation. |
| 5392 __ JumpIfInstanceTypeIsNotSequentialAscii(type, type, &next_probe[i]); |
| 5393 |
| 5394 // If length is not two, the string is not a candidate. |
| 5395 __ Ldrsw(scratch, |
| 5396 UntagSmiFieldMemOperand(candidate, String::kLengthOffset)); |
| 5397 __ Cmp(scratch, 2); |
| 5398 __ B(ne, &next_probe[i]); |
| 5399 |
| 5400 // Check if the two characters match. |
| 5401 // Assumes that word load is little endian. |
| 5402 __ Ldrh(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize)); |
| 5403 __ Cmp(chars, scratch); |
| 5404 __ B(eq, &found_in_string_table); |
| 5405 __ Bind(&next_probe[i]); |
| 5406 } |
| 5407 |
| 5408 // No matching two character string found by probing. |
| 5409 __ B(not_found); |
| 5410 |
| 5411 // Scratch register contains result when we fall through to here. |
| 5412 __ Bind(&found_in_string_table); |
| 5413 __ Mov(x0, candidate); |
| 5414 } |
| 5415 |
| 5416 |
| 5417 void StringHelper::LoadPairInstanceTypes(MacroAssembler* masm, |
| 5418 Register first_type, |
| 5419 Register second_type, |
| 5420 Register first_string, |
| 5421 Register second_string) { |
| 5422 ASSERT(!AreAliased(first_string, second_string, first_type, second_type)); |
| 5423 __ Ldr(first_type, FieldMemOperand(first_string, HeapObject::kMapOffset)); |
| 5424 __ Ldr(second_type, FieldMemOperand(second_string, HeapObject::kMapOffset)); |
| 5425 __ Ldrb(first_type, FieldMemOperand(first_type, Map::kInstanceTypeOffset)); |
| 5426 __ Ldrb(second_type, FieldMemOperand(second_type, Map::kInstanceTypeOffset)); |
| 5427 } |
| 5428 |
| 5429 |
| 5430 void StringHelper::GenerateHashInit(MacroAssembler* masm, |
| 5431 Register hash, |
| 5432 Register character) { |
| 5433 ASSERT(!AreAliased(hash, character)); |
| 5434 |
| 5435 // hash = character + (character << 10); |
| 5436 __ LoadRoot(hash, Heap::kHashSeedRootIndex); |
| 5437 // Untag smi seed and add the character. |
| 5438 __ Add(hash, character, Operand(hash, LSR, kSmiShift)); |
| 5439 |
| 5440 // Compute hashes modulo 2^32 using a 32-bit W register. |
| 5441 Register hash_w = hash.W(); |
| 5442 |
| 5443 // hash += hash << 10; |
| 5444 __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10)); |
| 5445 // hash ^= hash >> 6; |
| 5446 __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6)); |
| 5447 } |
| 5448 |
| 5449 |
| 5450 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, |
| 5451 Register hash, |
| 5452 Register character) { |
| 5453 ASSERT(!AreAliased(hash, character)); |
| 5454 |
| 5455 // hash += character; |
| 5456 __ Add(hash, hash, character); |
| 5457 |
| 5458 // Compute hashes modulo 2^32 using a 32-bit W register. |
| 5459 Register hash_w = hash.W(); |
| 5460 |
| 5461 // hash += hash << 10; |
| 5462 __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10)); |
| 5463 // hash ^= hash >> 6; |
| 5464 __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6)); |
| 5465 } |
| 5466 |
| 5467 |
| 5468 void StringHelper::GenerateHashGetHash(MacroAssembler* masm, |
| 5469 Register hash, |
| 5470 Register scratch) { |
| 5471 // Compute hashes modulo 2^32 using a 32-bit W register. |
| 5472 Register hash_w = hash.W(); |
| 5473 Register scratch_w = scratch.W(); |
| 5474 ASSERT(!AreAliased(hash_w, scratch_w)); |
| 5475 |
| 5476 // hash += hash << 3; |
| 5477 __ Add(hash_w, hash_w, Operand(hash_w, LSL, 3)); |
| 5478 // hash ^= hash >> 11; |
| 5479 __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 11)); |
| 5480 // hash += hash << 15; |
| 5481 __ Add(hash_w, hash_w, Operand(hash_w, LSL, 15)); |
| 5482 |
| 5483 __ Ands(hash_w, hash_w, String::kHashBitMask); |
| 5484 |
| 5485 // if (hash == 0) hash = 27; |
| 5486 __ Mov(scratch_w, StringHasher::kZeroHash); |
| 5487 __ Csel(hash_w, scratch_w, hash_w, eq); |
| 5488 } |
| 5489 |
| 5490 |
| 5491 void SubStringStub::Generate(MacroAssembler* masm) { |
| 5492 ASM_LOCATION("SubStringStub::Generate"); |
| 5493 Label runtime; |
| 5494 |
| 5495 // Stack frame on entry. |
| 5496 // lr: return address |
| 5497 // jssp[0]: substring "to" offset |
| 5498 // jssp[8]: substring "from" offset |
| 5499 // jssp[16]: pointer to string object |
| 5500 |
| 5501 // This stub is called from the native-call %_SubString(...), so |
| 5502 // nothing can be assumed about the arguments. It is tested that: |
| 5503 // "string" is a sequential string, |
| 5504 // both "from" and "to" are smis, and |
| 5505 // 0 <= from <= to <= string.length (in debug mode.) |
| 5506 // If any of these assumptions fail, we call the runtime system. |
| 5507 |
| 5508 static const int kToOffset = 0 * kPointerSize; |
| 5509 static const int kFromOffset = 1 * kPointerSize; |
| 5510 static const int kStringOffset = 2 * kPointerSize; |
| 5511 |
| 5512 Register to = x0; |
| 5513 Register from = x15; |
| 5514 Register input_string = x10; |
| 5515 Register input_length = x11; |
| 5516 Register input_type = x12; |
| 5517 Register result_string = x0; |
| 5518 Register result_length = x1; |
| 5519 Register temp = x3; |
| 5520 |
| 5521 __ Peek(to, kToOffset); |
| 5522 __ Peek(from, kFromOffset); |
| 5523 |
| 5524 // Check that both from and to are smis. If not, jump to runtime. |
| 5525 __ JumpIfEitherNotSmi(from, to, &runtime); |
| 5526 __ SmiUntag(from); |
| 5527 __ SmiUntag(to); |
| 5528 |
| 5529 // Calculate difference between from and to. If to < from, branch to runtime. |
| 5530 __ Subs(result_length, to, from); |
| 5531 __ B(mi, &runtime); |
| 5532 |
| 5533 // Check from is positive. |
| 5534 __ Tbnz(from, kWSignBit, &runtime); |
| 5535 |
| 5536 // Make sure first argument is a string. |
| 5537 __ Peek(input_string, kStringOffset); |
| 5538 __ JumpIfSmi(input_string, &runtime); |
| 5539 __ IsObjectJSStringType(input_string, input_type, &runtime); |
| 5540 |
| 5541 Label single_char; |
| 5542 __ Cmp(result_length, 1); |
| 5543 __ B(eq, &single_char); |
| 5544 |
| 5545 // Short-cut for the case of trivial substring. |
| 5546 Label return_x0; |
| 5547 __ Ldrsw(input_length, |
| 5548 UntagSmiFieldMemOperand(input_string, String::kLengthOffset)); |
| 5549 |
| 5550 __ Cmp(result_length, input_length); |
| 5551 __ CmovX(x0, input_string, eq); |
| 5552 // Return original string. |
| 5553 __ B(eq, &return_x0); |
| 5554 |
| 5555 // Longer than original string's length or negative: unsafe arguments. |
| 5556 __ B(hi, &runtime); |
| 5557 |
| 5558 // Shorter than original string's length: an actual substring. |
| 5559 |
| 5560 // x0 to substring end character offset |
| 5561 // x1 result_length length of substring result |
| 5562 // x10 input_string pointer to input string object |
| 5563 // x10 unpacked_string pointer to unpacked string object |
| 5564 // x11 input_length length of input string |
| 5565 // x12 input_type instance type of input string |
| 5566 // x15 from substring start character offset |
| 5567 |
| 5568 // Deal with different string types: update the index if necessary and put |
| 5569 // the underlying string into register unpacked_string. |
| 5570 Label underlying_unpacked, sliced_string, seq_or_external_string; |
| 5571 Label update_instance_type; |
| 5572 // If the string is not indirect, it can only be sequential or external. |
| 5573 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag)); |
| 5574 STATIC_ASSERT(kIsIndirectStringMask != 0); |
| 5575 |
| 5576 // Test for string types, and branch/fall through to appropriate unpacking |
| 5577 // code. |
| 5578 __ Tst(input_type, kIsIndirectStringMask); |
| 5579 __ B(eq, &seq_or_external_string); |
| 5580 __ Tst(input_type, kSlicedNotConsMask); |
| 5581 __ B(ne, &sliced_string); |
| 5582 |
| 5583 Register unpacked_string = input_string; |
| 5584 |
| 5585 // Cons string. Check whether it is flat, then fetch first part. |
| 5586 __ Ldr(temp, FieldMemOperand(input_string, ConsString::kSecondOffset)); |
| 5587 __ JumpIfNotRoot(temp, Heap::kempty_stringRootIndex, &runtime); |
| 5588 __ Ldr(unpacked_string, |
| 5589 FieldMemOperand(input_string, ConsString::kFirstOffset)); |
| 5590 __ B(&update_instance_type); |
| 5591 |
| 5592 __ Bind(&sliced_string); |
| 5593 // Sliced string. Fetch parent and correct start index by offset. |
| 5594 __ Ldrsw(temp, |
| 5595 UntagSmiFieldMemOperand(input_string, SlicedString::kOffsetOffset)); |
| 5596 __ Add(from, from, temp); |
| 5597 __ Ldr(unpacked_string, |
| 5598 FieldMemOperand(input_string, SlicedString::kParentOffset)); |
| 5599 |
| 5600 __ Bind(&update_instance_type); |
| 5601 __ Ldr(temp, FieldMemOperand(unpacked_string, HeapObject::kMapOffset)); |
| 5602 __ Ldrb(input_type, FieldMemOperand(temp, Map::kInstanceTypeOffset)); |
| 5603 // TODO(all): This generates "b #+0x4". Can these be optimised out? |
| 5604 __ B(&underlying_unpacked); |
| 5605 |
| 5606 __ Bind(&seq_or_external_string); |
| 5607 // Sequential or external string. Registers unpacked_string and input_string |
| 5608 // alias, so there's nothing to do here. |
| 5609 |
| 5610 // x0 result_string pointer to result string object (uninit) |
| 5611 // x1 result_length length of substring result |
| 5612 // x10 unpacked_string pointer to unpacked string object |
| 5613 // x11 input_length length of input string |
| 5614 // x12 input_type instance type of input string |
| 5615 // x15 from substring start character offset |
| 5616 __ Bind(&underlying_unpacked); |
| 5617 |
| 5618 if (FLAG_string_slices) { |
| 5619 Label copy_routine; |
| 5620 __ Cmp(result_length, SlicedString::kMinLength); |
| 5621 // Short slice. Copy instead of slicing. |
| 5622 __ B(lt, ©_routine); |
| 5623 // Allocate new sliced string. At this point we do not reload the instance |
| 5624 // type including the string encoding because we simply rely on the info |
| 5625 // provided by the original string. It does not matter if the original |
| 5626 // string's encoding is wrong because we always have to recheck encoding of |
| 5627 // the newly created string's parent anyway due to externalized strings. |
| 5628 Label two_byte_slice, set_slice_header; |
| 5629 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); |
| 5630 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); |
| 5631 __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_slice); |
| 5632 __ AllocateAsciiSlicedString(result_string, result_length, x3, x4, |
| 5633 &runtime); |
| 5634 __ B(&set_slice_header); |
| 5635 |
| 5636 __ Bind(&two_byte_slice); |
| 5637 __ AllocateTwoByteSlicedString(result_string, result_length, x3, x4, |
| 5638 &runtime); |
| 5639 |
| 5640 __ Bind(&set_slice_header); |
| 5641 __ SmiTag(from); |
| 5642 __ Str(from, FieldMemOperand(result_string, SlicedString::kOffsetOffset)); |
| 5643 __ Str(unpacked_string, |
| 5644 FieldMemOperand(result_string, SlicedString::kParentOffset)); |
| 5645 __ B(&return_x0); |
| 5646 |
| 5647 __ Bind(©_routine); |
| 5648 } |
| 5649 |
| 5650 // x0 result_string pointer to result string object (uninit) |
| 5651 // x1 result_length length of substring result |
| 5652 // x10 unpacked_string pointer to unpacked string object |
| 5653 // x11 input_length length of input string |
| 5654 // x12 input_type instance type of input string |
| 5655 // x13 unpacked_char0 pointer to first char of unpacked string (uninit) |
| 5656 // x13 substring_char0 pointer to first char of substring (uninit) |
| 5657 // x14 result_char0 pointer to first char of result (uninit) |
| 5658 // x15 from substring start character offset |
| 5659 Register unpacked_char0 = x13; |
| 5660 Register substring_char0 = x13; |
| 5661 Register result_char0 = x14; |
| 5662 Label two_byte_sequential, sequential_string, allocate_result; |
| 5663 STATIC_ASSERT(kExternalStringTag != 0); |
| 5664 STATIC_ASSERT(kSeqStringTag == 0); |
| 5665 |
| 5666 __ Tst(input_type, kExternalStringTag); |
| 5667 __ B(eq, &sequential_string); |
| 5668 |
| 5669 __ Tst(input_type, kShortExternalStringTag); |
| 5670 __ B(ne, &runtime); |
| 5671 __ Ldr(unpacked_char0, |
| 5672 FieldMemOperand(unpacked_string, ExternalString::kResourceDataOffset)); |
| 5673 // unpacked_char0 points to the first character of the underlying string. |
| 5674 __ B(&allocate_result); |
| 5675 |
| 5676 __ Bind(&sequential_string); |
| 5677 // Locate first character of underlying subject string. |
| 5678 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); |
| 5679 __ Add(unpacked_char0, unpacked_string, |
| 5680 SeqOneByteString::kHeaderSize - kHeapObjectTag); |
| 5681 |
| 5682 __ Bind(&allocate_result); |
| 5683 // Sequential ASCII string. Allocate the result. |
| 5684 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0); |
| 5685 __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_sequential); |
| 5686 |
| 5687 // Allocate and copy the resulting ASCII string. |
| 5688 __ AllocateAsciiString(result_string, result_length, x3, x4, x5, &runtime); |
| 5689 |
| 5690 // Locate first character of substring to copy. |
| 5691 __ Add(substring_char0, unpacked_char0, from); |
| 5692 |
| 5693 // Locate first character of result. |
| 5694 __ Add(result_char0, result_string, |
| 5695 SeqOneByteString::kHeaderSize - kHeapObjectTag); |
| 5696 |
| 5697 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); |
| 5698 __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong); |
| 5699 __ B(&return_x0); |
| 5700 |
| 5701 // Allocate and copy the resulting two-byte string. |
| 5702 __ Bind(&two_byte_sequential); |
| 5703 __ AllocateTwoByteString(result_string, result_length, x3, x4, x5, &runtime); |
| 5704 |
| 5705 // Locate first character of substring to copy. |
| 5706 __ Add(substring_char0, unpacked_char0, Operand(from, LSL, 1)); |
| 5707 |
| 5708 // Locate first character of result. |
| 5709 __ Add(result_char0, result_string, |
| 5710 SeqTwoByteString::kHeaderSize - kHeapObjectTag); |
| 5711 |
| 5712 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); |
| 5713 __ Add(result_length, result_length, result_length); |
| 5714 __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong); |
| 5715 |
| 5716 __ Bind(&return_x0); |
| 5717 Counters* counters = masm->isolate()->counters(); |
| 5718 __ IncrementCounter(counters->sub_string_native(), 1, x3, x4); |
| 5719 __ Drop(3); |
| 5720 __ Ret(); |
| 5721 |
| 5722 __ Bind(&runtime); |
| 5723 __ TailCallRuntime(Runtime::kSubString, 3, 1); |
| 5724 |
| 5725 __ bind(&single_char); |
| 5726 // x1: result_length |
| 5727 // x10: input_string |
| 5728 // x12: input_type |
| 5729 // x15: from (untagged) |
| 5730 __ SmiTag(from); |
| 5731 StringCharAtGenerator generator( |
| 5732 input_string, from, result_length, x0, |
| 5733 &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER); |
| 5734 generator.GenerateFast(masm); |
| 5735 // TODO(jbramley): Why doesn't this jump to return_x0? |
| 5736 __ Drop(3); |
| 5737 __ Ret(); |
| 5738 generator.SkipSlow(masm, &runtime); |
| 5739 } |
| 5740 |
| 5741 |
| 5742 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm, |
| 5743 Register left, |
| 5744 Register right, |
| 5745 Register scratch1, |
| 5746 Register scratch2, |
| 5747 Register scratch3) { |
| 5748 ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3)); |
| 5749 Register result = x0; |
| 5750 Register left_length = scratch1; |
| 5751 Register right_length = scratch2; |
| 5752 |
| 5753 // Compare lengths. If lengths differ, strings can't be equal. Lengths are |
| 5754 // smis, and don't need to be untagged. |
| 5755 Label strings_not_equal, check_zero_length; |
| 5756 __ Ldr(left_length, FieldMemOperand(left, String::kLengthOffset)); |
| 5757 __ Ldr(right_length, FieldMemOperand(right, String::kLengthOffset)); |
| 5758 __ Cmp(left_length, right_length); |
| 5759 __ B(eq, &check_zero_length); |
| 5760 |
| 5761 __ Bind(&strings_not_equal); |
| 5762 __ Mov(result, Operand(Smi::FromInt(NOT_EQUAL))); |
| 5763 __ Ret(); |
| 5764 |
| 5765 // Check if the length is zero. If so, the strings must be equal (and empty.) |
| 5766 Label compare_chars; |
| 5767 __ Bind(&check_zero_length); |
| 5768 STATIC_ASSERT(kSmiTag == 0); |
| 5769 __ Cbnz(left_length, &compare_chars); |
| 5770 __ Mov(result, Operand(Smi::FromInt(EQUAL))); |
| 5771 __ Ret(); |
| 5772 |
| 5773 // Compare characters. Falls through if all characters are equal. |
| 5774 __ Bind(&compare_chars); |
| 5775 GenerateAsciiCharsCompareLoop(masm, left, right, left_length, scratch2, |
| 5776 scratch3, &strings_not_equal); |
| 5777 |
| 5778 // Characters in strings are equal. |
| 5779 __ Mov(result, Operand(Smi::FromInt(EQUAL))); |
| 5780 __ Ret(); |
| 5781 } |
| 5782 |
| 5783 |
| 5784 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, |
| 5785 Register left, |
| 5786 Register right, |
| 5787 Register scratch1, |
| 5788 Register scratch2, |
| 5789 Register scratch3, |
| 5790 Register scratch4) { |
| 5791 ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4)); |
| 5792 Label result_not_equal, compare_lengths; |
| 5793 |
| 5794 // Find minimum length and length difference. |
| 5795 Register length_delta = scratch3; |
| 5796 __ Ldr(scratch1, FieldMemOperand(left, String::kLengthOffset)); |
| 5797 __ Ldr(scratch2, FieldMemOperand(right, String::kLengthOffset)); |
| 5798 __ Subs(length_delta, scratch1, scratch2); |
| 5799 |
| 5800 Register min_length = scratch1; |
| 5801 __ Csel(min_length, scratch2, scratch1, gt); |
| 5802 __ Cbz(min_length, &compare_lengths); |
| 5803 |
| 5804 // Compare loop. |
| 5805 GenerateAsciiCharsCompareLoop(masm, |
| 5806 left, right, min_length, scratch2, scratch4, |
| 5807 &result_not_equal); |
| 5808 |
| 5809 // Compare lengths - strings up to min-length are equal. |
| 5810 __ Bind(&compare_lengths); |
| 5811 |
| 5812 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); |
| 5813 |
| 5814 // Use length_delta as result if it's zero. |
| 5815 Register result = x0; |
| 5816 __ Subs(result, length_delta, 0); |
| 5817 |
| 5818 __ Bind(&result_not_equal); |
| 5819 Register greater = x10; |
| 5820 Register less = x11; |
| 5821 __ Mov(greater, Operand(Smi::FromInt(GREATER))); |
| 5822 __ Mov(less, Operand(Smi::FromInt(LESS))); |
| 5823 __ CmovX(result, greater, gt); |
| 5824 __ CmovX(result, less, lt); |
| 5825 __ Ret(); |
| 5826 } |
| 5827 |
| 5828 |
| 5829 void StringCompareStub::GenerateAsciiCharsCompareLoop( |
| 5830 MacroAssembler* masm, |
| 5831 Register left, |
| 5832 Register right, |
| 5833 Register length, |
| 5834 Register scratch1, |
| 5835 Register scratch2, |
| 5836 Label* chars_not_equal) { |
| 5837 ASSERT(!AreAliased(left, right, length, scratch1, scratch2)); |
| 5838 |
| 5839 // Change index to run from -length to -1 by adding length to string |
| 5840 // start. This means that loop ends when index reaches zero, which |
| 5841 // doesn't need an additional compare. |
| 5842 __ SmiUntag(length); |
| 5843 __ Add(scratch1, length, SeqOneByteString::kHeaderSize - kHeapObjectTag); |
| 5844 __ Add(left, left, scratch1); |
| 5845 __ Add(right, right, scratch1); |
| 5846 |
| 5847 Register index = length; |
| 5848 __ Neg(index, length); // index = -length; |
| 5849 |
| 5850 // Compare loop |
| 5851 Label loop; |
| 5852 __ Bind(&loop); |
| 5853 __ Ldrb(scratch1, MemOperand(left, index)); |
| 5854 __ Ldrb(scratch2, MemOperand(right, index)); |
| 5855 __ Cmp(scratch1, scratch2); |
| 5856 __ B(ne, chars_not_equal); |
| 5857 __ Add(index, index, 1); |
| 5858 __ Cbnz(index, &loop); |
| 5859 } |
| 5860 |
| 5861 |
| 5862 void StringCompareStub::Generate(MacroAssembler* masm) { |
| 5863 Label runtime; |
| 5864 |
| 5865 Counters* counters = masm->isolate()->counters(); |
| 5866 |
| 5867 // Stack frame on entry. |
| 5868 // sp[0]: right string |
| 5869 // sp[8]: left string |
| 5870 Register right = x10; |
| 5871 Register left = x11; |
| 5872 Register result = x0; |
| 5873 __ Pop(right, left); |
| 5874 |
| 5875 Label not_same; |
| 5876 __ Subs(result, right, left); |
| 5877 __ B(ne, ¬_same); |
| 5878 STATIC_ASSERT(EQUAL == 0); |
| 5879 __ IncrementCounter(counters->string_compare_native(), 1, x3, x4); |
| 5880 __ Ret(); |
| 5881 |
| 5882 __ Bind(¬_same); |
| 5883 |
| 5884 // Check that both objects are sequential ASCII strings. |
| 5885 __ JumpIfEitherIsNotSequentialAsciiStrings(left, right, x12, x13, &runtime); |
| 5886 |
| 5887 // Compare flat ASCII strings natively. Remove arguments from stack first, |
| 5888 // as this function will generate a return. |
| 5889 __ IncrementCounter(counters->string_compare_native(), 1, x3, x4); |
| 5890 GenerateCompareFlatAsciiStrings(masm, left, right, x12, x13, x14, x15); |
| 5891 |
| 5892 __ Bind(&runtime); |
| 5893 |
| 5894 // Push arguments back on to the stack. |
| 5895 // sp[0] = right string |
| 5896 // sp[8] = left string. |
| 5897 __ Push(left, right); |
| 5898 |
| 5899 // Call the runtime. |
| 5900 // Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer. |
| 5901 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); |
| 5902 } |
| 5903 |
| 5904 |
| 5905 void StringAddStub::Generate(MacroAssembler* masm) { |
| 5906 Label call_runtime, call_builtin; |
| 5907 Builtins::JavaScript builtin_id = Builtins::ADD; |
| 5908 |
| 5909 Counters* counters = masm->isolate()->counters(); |
| 5910 |
| 5911 // Stack on entry: |
| 5912 // sp[0]: second argument (right). |
| 5913 // sp[8]: first argument (left). |
| 5914 |
| 5915 Register result = x0; |
| 5916 Register left = x10; |
| 5917 Register right = x11; |
| 5918 Register left_type = x12; |
| 5919 Register right_type = x13; |
| 5920 |
| 5921 // Pop the two arguments from the stack. |
| 5922 __ Pop(right, left); |
| 5923 |
| 5924 // Make sure that both arguments are strings if not known in advance. |
| 5925 if ((flags_ & NO_STRING_ADD_FLAGS) != 0) { |
| 5926 __ JumpIfEitherSmi(right, left, &call_runtime); |
| 5927 // Load instance types. |
| 5928 StringHelper::LoadPairInstanceTypes(masm, left_type, right_type, left, |
| 5929 right); |
| 5930 STATIC_ASSERT(kStringTag == 0); |
| 5931 // If either is not a string, go to runtime. |
| 5932 __ Tbnz(left_type, MaskToBit(kIsNotStringMask), &call_runtime); |
| 5933 __ Tbnz(right_type, MaskToBit(kIsNotStringMask), &call_runtime); |
| 5934 } else { |
| 5935 // Here at least one of the arguments is definitely a string. |
| 5936 // We convert the one that is not known to be a string. |
| 5937 if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) { |
| 5938 // NO_STRING_CHECK_LEFT flag is clear: convert the left string. |
| 5939 ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0); |
| 5940 GenerateConvertArgument(masm, left, x12, x13, x14, x15, &call_builtin); |
| 5941 builtin_id = Builtins::STRING_ADD_RIGHT; |
| 5942 } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) { |
| 5943 // NO_STRING_CHECK_RIGHT flag is clear: convert the right string. |
| 5944 ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0); |
| 5945 GenerateConvertArgument(masm, right, x12, x13, x14, x15, &call_builtin); |
| 5946 builtin_id = Builtins::STRING_ADD_LEFT; |
| 5947 } |
| 5948 } |
| 5949 |
| 5950 // Both arguments are strings. |
| 5951 // x0 result pointer to result string object (uninit) |
| 5952 // x10 left pointer to first string object |
| 5953 // x11 right pointer to second string object |
| 5954 // if (flags_ == NO_STRING_ADD_FLAGS) { |
| 5955 // x12 left_type first string instance type |
| 5956 // x13 right_type second string instance type |
| 5957 // } |
| 5958 Register left_len = x14; |
| 5959 Register right_len = x15; |
| 5960 { |
| 5961 Label strings_not_empty; |
| 5962 // Speculatively move pointer to left string into the result register. |
| 5963 __ Mov(result, left); |
| 5964 // Check if either of the strings are empty. In that case return the other. |
| 5965 __ Ldrsw(left_len, UntagSmiFieldMemOperand(left, String::kLengthOffset)); |
| 5966 __ Ldrsw(right_len, UntagSmiFieldMemOperand(right, String::kLengthOffset)); |
| 5967 // Test if first string is empty. |
| 5968 __ Cmp(left_len, 0); |
| 5969 // If first is empty, return second. |
| 5970 __ CmovX(result, right, eq); |
| 5971 // Else test if second string is empty. |
| 5972 __ Ccmp(right_len, 0, ZFlag, ne); |
| 5973 // If either string was empty, return result. |
| 5974 __ B(ne, &strings_not_empty); |
| 5975 |
| 5976 __ IncrementCounter(counters->string_add_native(), 1, x3, x4); |
| 5977 __ Ret(); |
| 5978 |
| 5979 __ Bind(&strings_not_empty); |
| 5980 } |
| 5981 |
| 5982 // Load string instance types. |
| 5983 if (flags_ != NO_STRING_ADD_FLAGS) { |
| 5984 StringHelper::LoadPairInstanceTypes(masm, left_type, right_type, left, |
| 5985 right); |
| 5986 } |
| 5987 |
| 5988 // Both strings are non-empty. |
| 5989 // x10 left first string |
| 5990 // x11 right second string |
| 5991 // x12 left_type first string instance type |
| 5992 // x13 right_type second string instance type |
| 5993 // x14 left_len length of first string |
| 5994 // x15 right_len length of second string |
| 5995 Label string_add_flat_result, longer_than_two; |
| 5996 // Adding two lengths can't overflow |
| 5997 STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2); |
| 5998 Register length = x1; |
| 5999 __ Add(length, left_len, right_len); |
| 6000 // Use the string table when adding two one character strings, as it helps |
| 6001 // later optimizations to return a string here. |
| 6002 __ Cmp(length, 2); |
| 6003 __ B(ne, &longer_than_two); |
| 6004 |
| 6005 // Check that both strings are non-external ASCII strings. |
| 6006 __ JumpIfBothInstanceTypesAreNotSequentialAscii(left_type, right_type, x2, |
| 6007 x3, &call_runtime); |
| 6008 |
| 6009 Register left_char = x6; |
| 6010 Register right_char = x7; |
| 6011 // Get the two characters forming the sub string. |
| 6012 __ Ldrb(left_char, FieldMemOperand(left, SeqOneByteString::kHeaderSize)); |
| 6013 __ Ldrb(right_char, FieldMemOperand(right, SeqOneByteString::kHeaderSize)); |
| 6014 |
| 6015 // Try to lookup two character string in string table. If it is not found |
| 6016 // just allocate a new one. |
| 6017 // x0 result pointer to result string (uninit) |
| 6018 // x1 length sum of lengths of strings |
| 6019 // x6 left_char first character of first string |
| 6020 // x7 right_char first character of second string |
| 6021 // x10 left pointer to first string object |
| 6022 // x11 right pointer to second string object |
| 6023 // x12 left_type first string instance type |
| 6024 // x13 right_type second string instance type |
| 6025 // x14 left_len length of first string |
| 6026 // x15 right_len length of second string |
| 6027 Label make_two_character_string; |
| 6028 StringHelper::GenerateTwoCharacterStringTableProbe( |
| 6029 masm, |
| 6030 left_char, |
| 6031 right_char, |
| 6032 x2, x3, x4, x5, x8, |
| 6033 &make_two_character_string); |
| 6034 // Result register will be initialised with pointer to probed string, if |
| 6035 // found. |
| 6036 __ IncrementCounter(counters->string_add_native(), 1, x3, x4); |
| 6037 __ Ret(); |
| 6038 |
| 6039 __ Bind(&make_two_character_string); |
| 6040 // Resulting string has length two and first chars of two strings are |
| 6041 // combined into single halfword in left_char(x6) by |
| 6042 // GenerateTwoCharacterStringTableProbe(). |
| 6043 // Store the result to a newly-allocated string using a halfword store. |
| 6044 // This assumes the processor is little endian. |
| 6045 __ Mov(length, 2); |
| 6046 __ AllocateAsciiString(result, length, x12, x13, x14, &call_runtime); |
| 6047 __ Strh(left_char, FieldMemOperand(result, SeqOneByteString::kHeaderSize)); |
| 6048 __ IncrementCounter(counters->string_add_native(), 1, x3, x4); |
| 6049 __ Ret(); |
| 6050 |
| 6051 __ Bind(&longer_than_two); |
| 6052 // x0 result pointer to result string (uninit) |
| 6053 // x1 length sum of lengths of strings |
| 6054 // x10 left pointer to first string object |
| 6055 // x11 right pointer to second string object |
| 6056 // x12 left_type first string instance type |
| 6057 // x13 right_type second string instance type |
| 6058 // x14 left_len length of first string |
| 6059 // x15 right_len length of second string |
| 6060 |
| 6061 // Check if resulting string will be flat. |
| 6062 __ Cmp(length, ConsString::kMinLength); |
| 6063 __ B(lt, &string_add_flat_result); |
| 6064 // Handle exceptionally long strings in the runtime system. |
| 6065 STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0); |
| 6066 ASSERT(IsPowerOf2(String::kMaxLength + 1)); |
| 6067 |
| 6068 // (kMaxLength + 1) is a single bit, so if it's set, string length is >= |
| 6069 // kMaxLength + 1, and the string must be handled by the runtime. |
| 6070 __ Tbnz(length, MaskToBit(String::kMaxLength + 1), &call_runtime); |
| 6071 |
| 6072 // If result is not supposed to be flat, allocate a cons string object. |
| 6073 // If both strings are ASCII the result is an ASCII cons string. |
| 6074 Label non_ascii, allocated, ascii_data; |
| 6075 STATIC_ASSERT(kTwoByteStringTag == 0); |
| 6076 Register combined_type = x2; |
| 6077 __ And(combined_type, left_type, right_type); |
| 6078 __ Tbz(combined_type, MaskToBit(kStringEncodingMask), &non_ascii); |
| 6079 |
| 6080 // Allocate an ASCII cons string. |
| 6081 __ Bind(&ascii_data); |
| 6082 __ AllocateAsciiConsString(result, length, x12, x13, &call_runtime); |
| 6083 __ Bind(&allocated); |
| 6084 // Fill the fields of the cons string. |
| 6085 Label skip_write_barrier, after_writing; |
| 6086 ExternalReference high_promotion_mode = ExternalReference:: |
| 6087 new_space_high_promotion_mode_active_address(masm->isolate()); |
| 6088 __ Mov(x3, Operand(high_promotion_mode)); |
| 6089 __ Ldr(x3, MemOperand(x3)); |
| 6090 __ Cbz(x3, &skip_write_barrier); |
| 6091 |
| 6092 __ Str(left, FieldMemOperand(result, ConsString::kFirstOffset)); |
| 6093 __ RecordWriteField(result, |
| 6094 ConsString::kFirstOffset, |
| 6095 left, |
| 6096 x3, |
| 6097 kLRHasNotBeenSaved, |
| 6098 kDontSaveFPRegs, |
| 6099 EMIT_REMEMBERED_SET, |
| 6100 INLINE_SMI_CHECK, |
| 6101 EXPECT_PREGENERATED); |
| 6102 __ Str(right, FieldMemOperand(result, ConsString::kSecondOffset)); |
| 6103 __ RecordWriteField(result, |
| 6104 ConsString::kSecondOffset, |
| 6105 right, |
| 6106 x3, |
| 6107 kLRHasNotBeenSaved, |
| 6108 kDontSaveFPRegs, |
| 6109 EMIT_REMEMBERED_SET, |
| 6110 INLINE_SMI_CHECK, |
| 6111 EXPECT_PREGENERATED); |
| 6112 __ B(&after_writing); |
| 6113 __ Bind(&skip_write_barrier); |
| 6114 |
| 6115 __ Str(left, FieldMemOperand(result, ConsString::kFirstOffset)); |
| 6116 __ Str(right, FieldMemOperand(result, ConsString::kSecondOffset)); |
| 6117 __ Bind(&after_writing); |
| 6118 |
| 6119 __ IncrementCounter(counters->string_add_native(), 1, x3, x4); |
| 6120 __ Ret(); |
| 6121 |
| 6122 __ Bind(&non_ascii); |
| 6123 // At least one of the strings has a two-byte encoding. Check whether it |
| 6124 // happens to contain only one-byte characters. |
| 6125 // x2 combined_type bitwise-and of first and second string instance types |
| 6126 // x12 left_type first string instance type |
| 6127 // x13 right_type second string instance type |
| 6128 __ Tbnz(combined_type, MaskToBit(kOneByteDataHintMask), &ascii_data); |
| 6129 |
| 6130 // If one string has one-byte encoding, and the other is an ASCII string with |
| 6131 // two-byte encoding, the result can still be an ASCII string. |
| 6132 STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0); |
| 6133 __ Eor(x2, left_type, right_type); |
| 6134 __ And(x2, x2, kOneByteStringTag | kOneByteDataHintTag); |
| 6135 __ Cmp(x2, kOneByteStringTag | kOneByteDataHintTag); |
| 6136 __ B(eq, &ascii_data); |
| 6137 |
| 6138 // Allocate a two byte cons string. |
| 6139 __ AllocateTwoByteConsString(result, length, x12, x13, &call_runtime); |
| 6140 __ B(&allocated); |
| 6141 |
| 6142 // We cannot encounter sliced strings or cons strings here since: |
| 6143 STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength); |
| 6144 // Handle creating a flat result from either external or sequential strings. |
| 6145 // Locate the first characters' locations. |
| 6146 Label first_prepared, second_prepared; |
| 6147 __ Bind(&string_add_flat_result); |
| 6148 |
| 6149 Register temp = x5; |
| 6150 // Check whether both strings have same encoding |
| 6151 // x1 length sum of string lengths |
| 6152 // x5 temp temporary register (uninit) |
| 6153 // x6 left_char pointer to first character of first string (uninit) |
| 6154 // x7 right_char pointer to first character of second string (uninit) |
| 6155 // x10 left first string |
| 6156 // x11 right second string |
| 6157 // x12 left_type first string instance type |
| 6158 // x13 right_type second string instance type |
| 6159 // x14 left_len length of first string |
| 6160 // x15 right_len length of second string |
| 6161 __ Eor(temp, left_type, right_type); |
| 6162 __ Tbnz(temp, MaskToBit(kStringEncodingMask), &call_runtime); |
| 6163 |
| 6164 STATIC_ASSERT(kSeqStringTag == 0); |
| 6165 STATIC_ASSERT(kShortExternalStringTag != 0); |
| 6166 STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); |
| 6167 |
| 6168 __ Tst(left_type, kStringRepresentationMask); |
| 6169 __ Add(left_char, left, SeqOneByteString::kHeaderSize - kHeapObjectTag); |
| 6170 __ B(eq, &first_prepared); |
| 6171 // External string: rule out short external string and load string resource. |
| 6172 __ Tbnz(left_type, MaskToBit(kShortExternalStringMask), &call_runtime); |
| 6173 __ Ldr(left_char, FieldMemOperand(left, ExternalString::kResourceDataOffset)); |
| 6174 __ Bind(&first_prepared); |
| 6175 |
| 6176 __ Tst(right_type, kStringRepresentationMask); |
| 6177 __ Add(right_char, right, SeqOneByteString::kHeaderSize - kHeapObjectTag); |
| 6178 __ B(eq, &second_prepared); |
| 6179 // External string: rule out short external string and load string resource. |
| 6180 __ Tbnz(right_type, MaskToBit(kShortExternalStringMask), &call_runtime); |
| 6181 __ Ldr(right_char, |
| 6182 FieldMemOperand(right, ExternalString::kResourceDataOffset)); |
| 6183 __ Bind(&second_prepared); |
| 6184 |
| 6185 Label non_ascii_string_add_flat_result; |
| 6186 // x0 result pointer to result string (uninit) |
| 6187 // x1 length sum of string lengths |
| 6188 // x6 left_char pointer to first character of first string |
| 6189 // x7 right_char pointer to first character of second string |
| 6190 // x12 left_type first string instance type |
| 6191 // x13 right_type second string instance type |
| 6192 // x14 left_len length of first string |
| 6193 // x15 right_len length of second string |
| 6194 |
| 6195 // Both strings have the same encoding. |
| 6196 STATIC_ASSERT(kTwoByteStringTag == 0); |
| 6197 __ Tbz(right_type, MaskToBit(kStringEncodingMask), |
| 6198 &non_ascii_string_add_flat_result); |
| 6199 |
| 6200 Register result_char = x10; |
| 6201 __ AllocateAsciiString(result, length, x3, x12, x13, &call_runtime); |
| 6202 __ Add(result_char, result, SeqOneByteString::kHeaderSize - kHeapObjectTag); |
| 6203 // x0 result pointer to result ascii string object |
| 6204 // x1 length sum of string lengths |
| 6205 // x6 left_char pointer to first character of first string |
| 6206 // x7 right_char pointer to first character of second string |
| 6207 // x10 result_char pointer to first character of result string |
| 6208 // x14 left_len length of first string |
| 6209 // x15 right_len length of second string |
| 6210 __ CopyBytes(result_char, left_char, left_len, temp, kCopyShort); |
| 6211 // x10 result_char pointer to next character of result string |
| 6212 __ CopyBytes(result_char, right_char, right_len, temp, kCopyShort); |
| 6213 __ IncrementCounter(counters->string_add_native(), 1, x3, x4); |
| 6214 __ Ret(); |
| 6215 |
| 6216 |
| 6217 __ Bind(&non_ascii_string_add_flat_result); |
| 6218 __ AllocateTwoByteString(result, length, x3, x12, x13, &call_runtime); |
| 6219 __ Add(result_char, result, SeqTwoByteString::kHeaderSize - kHeapObjectTag); |
| 6220 // x0 result pointer to result two byte string object |
| 6221 // x1 length sum of string lengths |
| 6222 // x6 left_char pointer to first character of first string |
| 6223 // x7 right_char pointer to first character of second string |
| 6224 // x10 result_char pointer to first character of result string |
| 6225 // x14 left_len length of first string |
| 6226 // x15 right_len length of second string |
| 6227 __ Add(left_len, left_len, left_len); |
| 6228 __ CopyBytes(result_char, left_char, left_len, temp, kCopyShort); |
| 6229 |
| 6230 // x10 result_char pointer to next character of result string |
| 6231 __ Add(right_len, right_len, right_len); |
| 6232 __ CopyBytes(result_char, right_char, right_len, temp, kCopyShort); |
| 6233 __ IncrementCounter(counters->string_add_native(), 1, x3, x4); |
| 6234 __ Ret(); |
| 6235 |
| 6236 |
| 6237 // Just jump to runtime to add the two strings. |
| 6238 __ Bind(&call_runtime); |
| 6239 // Restore stack arguments. |
| 6240 __ Push(left, right); |
| 6241 if ((flags_ & ERECT_FRAME) != 0) { |
| 6242 GenerateRegisterArgsPop(masm); |
| 6243 // Build a frame |
| 6244 { |
| 6245 FrameScope scope(masm, StackFrame::INTERNAL); |
| 6246 GenerateRegisterArgsPush(masm); |
| 6247 __ CallRuntime(Runtime::kStringAdd, 2); |
| 6248 } |
| 6249 __ Ret(); |
| 6250 } else { |
| 6251 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); |
| 6252 } |
| 6253 |
| 6254 if (call_builtin.is_linked()) { |
| 6255 __ Bind(&call_builtin); |
| 6256 // Restore stack arguments. |
| 6257 __ Push(left, right); |
| 6258 if ((flags_ & ERECT_FRAME) != 0) { |
| 6259 GenerateRegisterArgsPop(masm); |
| 6260 // Build a frame |
| 6261 { |
| 6262 FrameScope scope(masm, StackFrame::INTERNAL); |
| 6263 GenerateRegisterArgsPush(masm); |
| 6264 __ InvokeBuiltin(builtin_id, CALL_FUNCTION); |
| 6265 } |
| 6266 __ Ret(); |
| 6267 } else { |
| 6268 __ InvokeBuiltin(builtin_id, JUMP_FUNCTION); |
| 6269 } |
| 6270 } |
| 6271 } |
| 6272 |
| 6273 |
| 6274 void StringAddStub::GenerateConvertArgument(MacroAssembler* masm, |
| 6275 Register arg, |
| 6276 Register scratch1, |
| 6277 Register scratch2, |
| 6278 Register scratch3, |
| 6279 Register scratch4, |
| 6280 Label* slow) { |
| 6281 ASSERT(!AreAliased(arg, scratch1, scratch2, scratch3, scratch4)); |
| 6282 |
| 6283 // First check if the argument is already a string. |
| 6284 Label not_string, done; |
| 6285 __ JumpIfSmi(arg, ¬_string); |
| 6286 __ JumpIfObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE, &done, lt); |
| 6287 |
| 6288 // Check the number to string cache. |
| 6289 Label not_cached; |
| 6290 __ Bind(¬_string); |
| 6291 // Puts the cache result into scratch1. |
| 6292 NumberToStringStub::GenerateLookupNumberStringCache( |
| 6293 masm, |
| 6294 arg, |
| 6295 scratch1, |
| 6296 scratch2, |
| 6297 scratch3, |
| 6298 scratch4, |
| 6299 NumberToStringStub::OBJECT_IS_NOT_SMI, |
| 6300 ¬_cached); |
| 6301 __ Mov(arg, scratch1); |
| 6302 __ B(&done); |
| 6303 |
| 6304 // Check if the argument is a safe string wrapper. |
| 6305 __ Bind(¬_cached); |
| 6306 __ JumpIfSmi(arg, slow); |
| 6307 Register map = scratch1; |
| 6308 __ JumpIfNotObjectType(arg, map, scratch2, JS_VALUE_TYPE, slow); |
| 6309 __ Ldrb(scratch2, FieldMemOperand(map, Map::kBitField2Offset)); |
| 6310 __ Tbz(scratch2, Map::kStringWrapperSafeForDefaultValueOf, slow); |
| 6311 __ Ldr(arg, FieldMemOperand(arg, JSValue::kValueOffset)); |
| 6312 |
| 6313 __ Bind(&done); |
| 6314 } |
| 6315 |
| 6316 |
| 6317 void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) { |
| 6318 __ Push(x0, x1); |
| 6319 } |
| 6320 |
| 6321 |
| 6322 void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm) { |
| 6323 __ Pop(x1, x0); |
| 6324 } |
| 6325 |
| 6326 |
| 6327 const int RecordWriteStub::kAheadOfTime[] = { |
| 6328 // Arguments to MinorKeyFor() are object, value and address registers. |
| 6329 |
| 6330 // Used in StoreArrayLiteralElementStub::Generate. |
| 6331 MinorKeyFor(x10, x0, x11, EMIT_REMEMBERED_SET, kDontSaveFPRegs), |
| 6332 |
| 6333 // Used in FastNewClosure::Generate. |
| 6334 MinorKeyFor(x5, x4, x1, EMIT_REMEMBERED_SET, kDontSaveFPRegs), |
| 6335 |
| 6336 // Used in KeyedStoreStubCompiler::GenerateStoreFastElement. |
| 6337 MinorKeyFor(x3, x2, x10, EMIT_REMEMBERED_SET, kDontSaveFPRegs), |
| 6338 |
| 6339 // Used in KeyedStoreStubCompiler::GenerateStoreFastDoubleElement. |
| 6340 MinorKeyFor(x2, x3, x10, EMIT_REMEMBERED_SET, kDontSaveFPRegs), |
| 6341 |
| 6342 // Used in ElementsTransitionGenerator::GenerateSmiToDouble. |
| 6343 MinorKeyFor(x2, x3, x6, OMIT_REMEMBERED_SET, kDontSaveFPRegs), |
| 6344 MinorKeyFor(x2, x10, x6, EMIT_REMEMBERED_SET, kDontSaveFPRegs), |
| 6345 |
| 6346 // Used in ElementsTransitionGenerator::GenerateDoubleToObject. |
| 6347 MinorKeyFor(x7, x5, x13, EMIT_REMEMBERED_SET, kDontSaveFPRegs), |
| 6348 MinorKeyFor(x2, x7, x13, EMIT_REMEMBERED_SET, kDontSaveFPRegs), |
| 6349 MinorKeyFor(x2, x3, x13, OMIT_REMEMBERED_SET, kDontSaveFPRegs), |
| 6350 |
| 6351 // Used in KeyedStoreIC::GenerateGeneric helper function. |
| 6352 MinorKeyFor(x4, x10, x11, EMIT_REMEMBERED_SET, kDontSaveFPRegs), |
| 6353 |
| 6354 // Used in RegExpExecStub::Generate. |
| 6355 MinorKeyFor(x21, x10, x11, EMIT_REMEMBERED_SET, kDontSaveFPRegs), |
| 6356 |
| 6357 // Used in StringAddStub::Generate. |
| 6358 MinorKeyFor(x0, x10, x3, EMIT_REMEMBERED_SET, kDontSaveFPRegs), |
| 6359 MinorKeyFor(x0, x11, x3, EMIT_REMEMBERED_SET, kDontSaveFPRegs), |
| 6360 |
| 6361 // TODO(jbramley): There are many more sites that want a pregenerated |
| 6362 // instance of this stub, but they are currently unimplemented. Once they are |
| 6363 // implemented, they should be added to this list. |
| 6364 |
| 6365 // Null termination. |
| 6366 // It is safe to encode this as 0 because the three registers used for |
| 6367 // RecordWriteStub must not be aliased, and 0 represents (x0, x0, x0). |
| 6368 0 |
| 6369 }; |
| 6370 |
| 6371 |
| 6372 void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) { |
| 6373 // Pregenerate all of the stub variants in the kAheadOfTime list. |
| 6374 for (const int* entry = kAheadOfTime; *entry != 0; entry++) { |
| 6375 // kAheadOfTime is a list of minor keys, so extract the relevant fields |
| 6376 // from the minor key. |
| 6377 Register object = Register::XRegFromCode(ObjectBits::decode(*entry)); |
| 6378 Register value = Register::XRegFromCode(ValueBits::decode(*entry)); |
| 6379 Register address = Register::XRegFromCode(AddressBits::decode(*entry)); |
| 6380 RememberedSetAction action = RememberedSetActionBits::decode(*entry); |
| 6381 SaveFPRegsMode fp_mode = SaveFPRegsModeBits::decode(*entry); |
| 6382 |
| 6383 RecordWriteStub stub(object, value, address, action, fp_mode); |
| 6384 stub.GetCode(isolate)->set_is_pregenerated(true); |
| 6385 } |
| 6386 } |
| 6387 |
| 6388 |
| 6389 bool CodeStub::CanUseFPRegisters() { |
| 6390 // FP registers always available on A64. |
| 6391 return true; |
| 6392 } |
| 6393 |
| 6394 |
| 6395 bool RecordWriteStub::IsPregenerated() { |
| 6396 // If the stub exists in the kAheadOfTime list, it is pregenerated. |
| 6397 for (const int* entry = kAheadOfTime; *entry != 0; entry++) { |
| 6398 if (*entry == MinorKeyFor(object_, value_, address_, |
| 6399 remembered_set_action_, save_fp_regs_mode_)) { |
| 6400 return true; |
| 6401 } |
| 6402 } |
| 6403 return false; |
| 6404 } |
| 6405 |
| 6406 |
| 6407 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { |
| 6408 // We need some extra registers for this stub, they have been allocated |
| 6409 // but we need to save them before using them. |
| 6410 regs_.Save(masm); |
| 6411 |
| 6412 if (remembered_set_action_ == EMIT_REMEMBERED_SET) { |
| 6413 Label dont_need_remembered_set; |
| 6414 |
| 6415 Register value = regs_.scratch0(); |
| 6416 __ Ldr(value, MemOperand(regs_.address())); |
| 6417 __ JumpIfNotInNewSpace(value, &dont_need_remembered_set); |
| 6418 |
| 6419 __ CheckPageFlagSet(regs_.object(), |
| 6420 value, |
| 6421 1 << MemoryChunk::SCAN_ON_SCAVENGE, |
| 6422 &dont_need_remembered_set); |
| 6423 |
| 6424 // First notify the incremental marker if necessary, then update the |
| 6425 // remembered set. |
| 6426 CheckNeedsToInformIncrementalMarker( |
| 6427 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode); |
| 6428 InformIncrementalMarker(masm, mode); |
| 6429 regs_.Restore(masm); // Restore the extra scratch registers we used. |
| 6430 __ RememberedSetHelper(object_, |
| 6431 address_, |
| 6432 value_, |
| 6433 save_fp_regs_mode_, |
| 6434 MacroAssembler::kReturnAtEnd); |
| 6435 |
| 6436 __ Bind(&dont_need_remembered_set); |
| 6437 } |
| 6438 |
| 6439 CheckNeedsToInformIncrementalMarker( |
| 6440 masm, kReturnOnNoNeedToInformIncrementalMarker, mode); |
| 6441 InformIncrementalMarker(masm, mode); |
| 6442 regs_.Restore(masm); // Restore the extra scratch registers we used. |
| 6443 __ Ret(); |
| 6444 } |
| 6445 |
| 6446 |
| 6447 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { |
| 6448 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); |
| 6449 Register address = |
| 6450 x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address(); |
| 6451 ASSERT(!address.Is(regs_.object())); |
| 6452 ASSERT(!address.Is(x0)); |
| 6453 __ Mov(address, regs_.address()); |
| 6454 __ Mov(x0, regs_.object()); |
| 6455 __ Mov(x1, address); |
| 6456 __ Mov(x2, Operand(ExternalReference::isolate_address(masm->isolate()))); |
| 6457 |
| 6458 AllowExternalCallThatCantCauseGC scope(masm); |
| 6459 ExternalReference function = (mode == INCREMENTAL_COMPACTION) |
| 6460 ? ExternalReference::incremental_evacuation_record_write_function( |
| 6461 masm->isolate()) |
| 6462 : ExternalReference::incremental_marking_record_write_function( |
| 6463 masm->isolate()); |
| 6464 __ CallCFunction(function, 3, 0); |
| 6465 |
| 6466 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); |
| 6467 } |
| 6468 |
| 6469 |
| 6470 void RecordWriteStub::CheckNeedsToInformIncrementalMarker( |
| 6471 MacroAssembler* masm, |
| 6472 OnNoNeedToInformIncrementalMarker on_no_need, |
| 6473 Mode mode) { |
| 6474 Label on_black; |
| 6475 Label need_incremental; |
| 6476 Label need_incremental_pop_scratch; |
| 6477 |
| 6478 Register mem_chunk = regs_.scratch0(); |
| 6479 Register counter = regs_.scratch1(); |
| 6480 __ Bic(mem_chunk, regs_.object(), Page::kPageAlignmentMask); |
| 6481 __ Ldr(counter, |
| 6482 MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset)); |
| 6483 __ Subs(counter, counter, 1); |
| 6484 __ Str(counter, |
| 6485 MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset)); |
| 6486 __ B(mi, &need_incremental); |
| 6487 |
| 6488 // If the object is not black we don't have to inform the incremental marker. |
| 6489 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); |
| 6490 |
| 6491 regs_.Restore(masm); // Restore the extra scratch registers we used. |
| 6492 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { |
| 6493 __ RememberedSetHelper(object_, |
| 6494 address_, |
| 6495 value_, |
| 6496 save_fp_regs_mode_, |
| 6497 MacroAssembler::kReturnAtEnd); |
| 6498 } else { |
| 6499 __ Ret(); |
| 6500 } |
| 6501 |
| 6502 __ Bind(&on_black); |
| 6503 // Get the value from the slot. |
| 6504 Register value = regs_.scratch0(); |
| 6505 __ Ldr(value, MemOperand(regs_.address())); |
| 6506 |
| 6507 if (mode == INCREMENTAL_COMPACTION) { |
| 6508 Label ensure_not_white; |
| 6509 |
| 6510 __ CheckPageFlagClear(value, |
| 6511 regs_.scratch1(), |
| 6512 MemoryChunk::kEvacuationCandidateMask, |
| 6513 &ensure_not_white); |
| 6514 |
| 6515 __ CheckPageFlagClear(regs_.object(), |
| 6516 regs_.scratch1(), |
| 6517 MemoryChunk::kSkipEvacuationSlotsRecordingMask, |
| 6518 &need_incremental); |
| 6519 |
| 6520 __ Bind(&ensure_not_white); |
| 6521 } |
| 6522 |
| 6523 // We need extra registers for this, so we push the object and the address |
| 6524 // register temporarily. |
| 6525 __ Push(regs_.address(), regs_.object()); |
| 6526 __ EnsureNotWhite(value, |
| 6527 regs_.scratch1(), // Scratch. |
| 6528 regs_.object(), // Scratch. |
| 6529 regs_.address(), // Scratch. |
| 6530 regs_.scratch2(), // Scratch. |
| 6531 &need_incremental_pop_scratch); |
| 6532 __ Pop(regs_.object(), regs_.address()); |
| 6533 |
| 6534 regs_.Restore(masm); // Restore the extra scratch registers we used. |
| 6535 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { |
| 6536 __ RememberedSetHelper(object_, |
| 6537 address_, |
| 6538 value_, |
| 6539 save_fp_regs_mode_, |
| 6540 MacroAssembler::kReturnAtEnd); |
| 6541 } else { |
| 6542 __ Ret(); |
| 6543 } |
| 6544 |
| 6545 __ Bind(&need_incremental_pop_scratch); |
| 6546 __ Pop(regs_.object(), regs_.address()); |
| 6547 |
| 6548 __ Bind(&need_incremental); |
| 6549 // Fall through when we need to inform the incremental marker. |
| 6550 } |
| 6551 |
| 6552 |
| 6553 void RecordWriteStub::Generate(MacroAssembler* masm) { |
| 6554 Label skip_to_incremental_noncompacting; |
| 6555 Label skip_to_incremental_compacting; |
| 6556 |
| 6557 // We patch these two first instructions back and forth between a nop and |
| 6558 // real branch when we start and stop incremental heap marking. |
| 6559 // Initially the stub is expected to be in STORE_BUFFER_ONLY mode, so 2 nops |
| 6560 // are generated. |
| 6561 // See RecordWriteStub::Patch for details. |
| 6562 { |
| 6563 InstructionAccurateScope scope(masm, 2); |
| 6564 __ adr(xzr, &skip_to_incremental_noncompacting); |
| 6565 __ adr(xzr, &skip_to_incremental_compacting); |
| 6566 } |
| 6567 |
| 6568 if (remembered_set_action_ == EMIT_REMEMBERED_SET) { |
| 6569 __ RememberedSetHelper(object_, |
| 6570 address_, |
| 6571 value_, |
| 6572 save_fp_regs_mode_, |
| 6573 MacroAssembler::kReturnAtEnd); |
| 6574 } |
| 6575 __ Ret(); |
| 6576 |
| 6577 __ Bind(&skip_to_incremental_noncompacting); |
| 6578 GenerateIncremental(masm, INCREMENTAL); |
| 6579 |
| 6580 __ Bind(&skip_to_incremental_compacting); |
| 6581 GenerateIncremental(masm, INCREMENTAL_COMPACTION); |
| 6582 } |
| 6583 |
| 6584 |
| 6585 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { |
| 6586 // TODO(all): Possible optimisations in this function: |
| 6587 // 1. Merge CheckFastElements and CheckFastSmiElements, so that the map |
| 6588 // bitfield is loaded only once. |
| 6589 // 2. Refactor the Ldr/Add sequence at the start of fast_elements and |
| 6590 // smi_element. |
| 6591 |
| 6592 // x0 value element value to store |
| 6593 // x1 array array literal |
| 6594 // x2 array_map map of array literal |
| 6595 // x3 index_smi element index as smi |
| 6596 // x4 array_index_smi array literal index in function as smi |
| 6597 |
| 6598 Register value = x0; |
| 6599 Register array = x1; |
| 6600 Register array_map = x2; |
| 6601 Register index_smi = x3; |
| 6602 Register array_index_smi = x4; |
| 6603 |
| 6604 Label double_elements, smi_element, fast_elements, slow_elements; |
| 6605 __ CheckFastElements(array_map, x10, &double_elements); |
| 6606 __ JumpIfSmi(value, &smi_element); |
| 6607 __ CheckFastSmiElements(array_map, x10, &fast_elements); |
| 6608 |
| 6609 // Store into the array literal requires an elements transition. Call into |
| 6610 // the runtime. |
| 6611 __ Bind(&slow_elements); |
| 6612 __ Push(array, index_smi, value); |
| 6613 __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
| 6614 __ Ldr(x11, FieldMemOperand(x10, JSFunction::kLiteralsOffset)); |
| 6615 __ Push(x11, array_index_smi); |
| 6616 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); |
| 6617 |
| 6618 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. |
| 6619 __ Bind(&fast_elements); |
| 6620 __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset)); |
| 6621 __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2)); |
| 6622 __ Add(x11, x11, FixedArray::kHeaderSize - kHeapObjectTag); |
| 6623 __ Str(value, MemOperand(x11)); |
| 6624 // Update the write barrier for the array store. |
| 6625 __ RecordWrite(x10, x11, value, kLRHasNotBeenSaved, kDontSaveFPRegs, |
| 6626 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK, EXPECT_PREGENERATED); |
| 6627 __ Ret(); |
| 6628 |
| 6629 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS, |
| 6630 // and value is Smi. |
| 6631 __ Bind(&smi_element); |
| 6632 __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset)); |
| 6633 __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2)); |
| 6634 __ Str(value, FieldMemOperand(x11, FixedArray::kHeaderSize)); |
| 6635 __ Ret(); |
| 6636 |
| 6637 __ Bind(&double_elements); |
| 6638 __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset)); |
| 6639 __ StoreNumberToDoubleElements(value, index_smi, x10, x11, d0, d1, |
| 6640 &slow_elements); |
| 6641 __ Ret(); |
| 6642 } |
| 6643 |
| 6644 |
| 6645 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { |
| 6646 // TODO(jbramley): The ARM code leaves the (shifted) offset in r1. Why? |
| 6647 CEntryStub ces(1, kSaveFPRegs); |
| 6648 __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); |
| 6649 int parameter_count_offset = |
| 6650 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; |
| 6651 __ Ldr(x1, MemOperand(fp, parameter_count_offset)); |
| 6652 if (function_mode_ == JS_FUNCTION_STUB_MODE) { |
| 6653 __ Add(x1, x1, 1); |
| 6654 } |
| 6655 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); |
| 6656 __ Add(__ StackPointer(), __ StackPointer(), |
| 6657 Operand(x1, LSL, kPointerSizeLog2)); |
| 6658 // Return to IC Miss stub, continuation still on stack. |
| 6659 __ Ret(); |
| 6660 } |
| 6661 |
| 6662 |
| 6663 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { |
| 6664 if (entry_hook_ != NULL) { |
| 6665 // TODO(all) this needs a literal pool blocking scope and predictable code |
| 6666 // size. |
| 6667 ProfileEntryHookStub stub; |
| 6668 __ Push(lr); |
| 6669 __ CallStub(&stub); |
| 6670 __ Pop(lr); |
| 6671 } |
| 6672 } |
| 6673 |
| 6674 |
| 6675 void ProfileEntryHookStub::Generate(MacroAssembler* masm) { |
| 6676 // The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by |
| 6677 // a "Push lr" instruction, followed by a call. |
| 6678 // TODO(jbramley): Verify that this call is always made with relocation. |
| 6679 static const int kReturnAddressDistanceFromFunctionStart = |
| 6680 Assembler::kCallSizeWithRelocation + (2 * kInstructionSize); |
| 6681 |
| 6682 // Save live volatile registers. |
| 6683 __ Push(lr, x1, x5); |
| 6684 static const int kNumSavedRegs = 3; |
| 6685 |
| 6686 // Compute the function's address as the first argument. |
| 6687 __ Sub(x0, lr, kReturnAddressDistanceFromFunctionStart); |
| 6688 |
| 6689 #if defined(V8_HOST_ARCH_A64) |
| 6690 __ Mov(x10, Operand(reinterpret_cast<intptr_t>(&entry_hook_))); |
| 6691 __ Ldr(x10, MemOperand(x10)); |
| 6692 #else |
| 6693 // Under the simulator we need to indirect the entry hook through a trampoline |
| 6694 // function at a known address. |
| 6695 Address trampoline_address = reinterpret_cast<Address>( |
| 6696 reinterpret_cast<intptr_t>(EntryHookTrampoline)); |
| 6697 ApiFunction dispatcher(trampoline_address); |
| 6698 __ Mov(x10, Operand(ExternalReference(&dispatcher, |
| 6699 ExternalReference::BUILTIN_CALL, |
| 6700 masm->isolate()))); |
| 6701 #endif |
| 6702 |
| 6703 // The caller's return address is above the saved temporaries. |
| 6704 // Grab that for the second argument to the hook. |
| 6705 __ Peek(x1, kNumSavedRegs * kPointerSize); |
| 6706 |
| 6707 { |
| 6708 // Create a dummy frame, as CallCFunction requires this. |
| 6709 FrameScope frame(masm, StackFrame::MANUAL); |
| 6710 __ CallCFunction(x10, 2, 0); |
| 6711 } |
| 6712 |
| 6713 __ Pop(x5, x1, lr); |
| 6714 __ Ret(); |
| 6715 } |
| 6716 |
| 6717 |
| 6718 void DirectCEntryStub::Generate(MacroAssembler* masm) { |
| 6719 // When calling into C++ code the stack pointer must be csp. |
| 6720 // Therefore this code must use csp for peek/poke operations when the |
| 6721 // stub is generated. When the stub is called |
| 6722 // (via DirectCEntryStub::GenerateCall), the caller must setup an ExitFrame |
| 6723 // and configure the stack pointer *before* doing the call. |
| 6724 const Register old_stack_pointer = __ StackPointer(); |
| 6725 __ SetStackPointer(csp); |
| 6726 |
| 6727 // Put return address on the stack (accessible to GC through exit frame pc). |
| 6728 __ Poke(lr, 0); |
| 6729 // Call the C++ function. |
| 6730 __ Blr(x10); |
| 6731 // Return to calling code. |
| 6732 __ Peek(lr, 0); |
| 6733 __ Ret(); |
| 6734 |
| 6735 __ SetStackPointer(old_stack_pointer); |
| 6736 } |
| 6737 |
| 6738 void DirectCEntryStub::GenerateCall(MacroAssembler* masm, |
| 6739 Register target) { |
| 6740 // Make sure the caller configured the stack pointer (see comment in |
| 6741 // DirectCEntryStub::Generate). |
| 6742 ASSERT(csp.Is(__ StackPointer())); |
| 6743 |
| 6744 intptr_t code = |
| 6745 reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location()); |
| 6746 __ Mov(lr, Operand(code, RelocInfo::CODE_TARGET)); |
| 6747 __ Mov(x10, target); |
| 6748 // Branch to the stub. |
| 6749 __ Blr(lr); |
| 6750 } |
| 6751 |
| 6752 |
| 6753 // Probe the name dictionary in the 'elements' register. |
| 6754 // Jump to the 'done' label if a property with the given name is found. |
| 6755 // Jump to the 'miss' label otherwise. |
| 6756 // |
| 6757 // If lookup was successful 'scratch2' will be equal to elements + 4 * index. |
| 6758 // 'elements' and 'name' registers are preserved on miss. |
| 6759 void NameDictionaryLookupStub::GeneratePositiveLookup( |
| 6760 MacroAssembler* masm, |
| 6761 Label* miss, |
| 6762 Label* done, |
| 6763 Register elements, |
| 6764 Register name, |
| 6765 Register scratch1, |
| 6766 Register scratch2) { |
| 6767 ASSERT(!AreAliased(elements, name, scratch1, scratch2)); |
| 6768 |
| 6769 // Assert that name contains a string. |
| 6770 __ AssertName(name); |
| 6771 |
| 6772 // Compute the capacity mask. |
| 6773 __ Ldrsw(scratch1, UntagSmiFieldMemOperand(elements, kCapacityOffset)); |
| 6774 __ Sub(scratch1, scratch1, 1); |
| 6775 |
| 6776 // Generate an unrolled loop that performs a few probes before giving up. |
| 6777 for (int i = 0; i < kInlinedProbes; i++) { |
| 6778 // Compute the masked index: (hash + i + i * i) & mask. |
| 6779 __ Ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset)); |
| 6780 if (i > 0) { |
| 6781 // Add the probe offset (i + i * i) left shifted to avoid right shifting |
| 6782 // the hash in a separate instruction. The value hash + i + i * i is right |
| 6783 // shifted in the following and instruction. |
| 6784 ASSERT(NameDictionary::GetProbeOffset(i) < |
| 6785 1 << (32 - Name::kHashFieldOffset)); |
| 6786 __ Add(scratch2, scratch2, Operand( |
| 6787 NameDictionary::GetProbeOffset(i) << Name::kHashShift)); |
| 6788 } |
| 6789 __ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift)); |
| 6790 |
| 6791 // Scale the index by multiplying by the element size. |
| 6792 ASSERT(NameDictionary::kEntrySize == 3); |
| 6793 __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1)); |
| 6794 |
| 6795 // Check if the key is identical to the name. |
| 6796 __ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2)); |
| 6797 // TODO(jbramley): We need another scratch here, but some callers can't |
| 6798 // provide a scratch3 so we have to use Tmp1(). We should find a clean way |
| 6799 // to make it unavailable to the MacroAssembler for a short time. |
| 6800 __ Ldr(__ Tmp1(), FieldMemOperand(scratch2, kElementsStartOffset)); |
| 6801 __ Cmp(name, __ Tmp1()); |
| 6802 __ B(eq, done); |
| 6803 } |
| 6804 |
| 6805 // The inlined probes didn't find the entry. |
| 6806 // Call the complete stub to scan the whole dictionary. |
| 6807 |
| 6808 CPURegList spill_list(CPURegister::kRegister, kXRegSize, 0, 6); |
| 6809 spill_list.Combine(lr); |
| 6810 spill_list.Remove(scratch1); |
| 6811 spill_list.Remove(scratch2); |
| 6812 |
| 6813 __ PushCPURegList(spill_list); |
| 6814 |
| 6815 if (name.is(x0)) { |
| 6816 ASSERT(!elements.is(x1)); |
| 6817 __ Mov(x1, name); |
| 6818 __ Mov(x0, elements); |
| 6819 } else { |
| 6820 __ Mov(x0, elements); |
| 6821 __ Mov(x1, name); |
| 6822 } |
| 6823 |
| 6824 Label not_found; |
| 6825 NameDictionaryLookupStub stub(POSITIVE_LOOKUP); |
| 6826 __ CallStub(&stub); |
| 6827 __ Cbz(x0, ¬_found); |
| 6828 __ Mov(scratch2, x2); // Move entry index into scratch2. |
| 6829 __ PopCPURegList(spill_list); |
| 6830 __ B(done); |
| 6831 |
| 6832 __ Bind(¬_found); |
| 6833 __ PopCPURegList(spill_list); |
| 6834 __ B(miss); |
| 6835 } |
| 6836 |
| 6837 |
| 6838 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, |
| 6839 Label* miss, |
| 6840 Label* done, |
| 6841 Register receiver, |
| 6842 Register properties, |
| 6843 Handle<Name> name, |
| 6844 Register scratch0) { |
| 6845 ASSERT(!AreAliased(receiver, properties, scratch0)); |
| 6846 ASSERT(name->IsUniqueName()); |
| 6847 // If names of slots in range from 1 to kProbes - 1 for the hash value are |
| 6848 // not equal to the name and kProbes-th slot is not used (its name is the |
| 6849 // undefined value), it guarantees the hash table doesn't contain the |
| 6850 // property. It's true even if some slots represent deleted properties |
| 6851 // (their names are the hole value). |
| 6852 for (int i = 0; i < kInlinedProbes; i++) { |
| 6853 // scratch0 points to properties hash. |
| 6854 // Compute the masked index: (hash + i + i * i) & mask. |
| 6855 Register index = scratch0; |
| 6856 // Capacity is smi 2^n. |
| 6857 __ Ldrsw(index, UntagSmiFieldMemOperand(properties, kCapacityOffset)); |
| 6858 __ Sub(index, index, 1); |
| 6859 __ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i)); |
| 6860 |
| 6861 // Scale the index by multiplying by the entry size. |
| 6862 ASSERT(NameDictionary::kEntrySize == 3); |
| 6863 __ Add(index, index, Operand(index, LSL, 1)); // index *= 3. |
| 6864 |
| 6865 Register entity_name = scratch0; |
| 6866 // Having undefined at this place means the name is not contained. |
| 6867 Register tmp = index; |
| 6868 __ Add(tmp, properties, Operand(index, LSL, kPointerSizeLog2)); |
| 6869 __ Ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); |
| 6870 |
| 6871 __ JumpIfRoot(entity_name, Heap::kUndefinedValueRootIndex, done); |
| 6872 |
| 6873 // Stop if found the property. |
| 6874 __ Cmp(entity_name, Operand(name)); |
| 6875 __ B(eq, miss); |
| 6876 |
| 6877 Label good; |
| 6878 __ JumpIfRoot(entity_name, Heap::kTheHoleValueRootIndex, &good); |
| 6879 |
| 6880 // Check if the entry name is not a unique name. |
| 6881 __ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset)); |
| 6882 __ Ldrb(entity_name, |
| 6883 FieldMemOperand(entity_name, Map::kInstanceTypeOffset)); |
| 6884 __ TestAndBranchIfAnySet(entity_name, kIsInternalizedMask, &good); |
| 6885 __ CompareAndBranch(entity_name, SYMBOL_TYPE, ne, miss); |
| 6886 |
| 6887 __ Bind(&good); |
| 6888 } |
| 6889 |
| 6890 CPURegList spill_list(CPURegister::kRegister, kXRegSize, 0, 6); |
| 6891 spill_list.Combine(lr); |
| 6892 spill_list.Remove(scratch0); // Scratch registers don't need to be preserved. |
| 6893 |
| 6894 __ PushCPURegList(spill_list); |
| 6895 |
| 6896 __ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
| 6897 __ Mov(x1, Operand(name)); |
| 6898 NameDictionaryLookupStub stub(NEGATIVE_LOOKUP); |
| 6899 __ CallStub(&stub); |
| 6900 // Move stub return value to scratch0. Note that scratch0 is not included in |
| 6901 // spill_list and won't be clobbered by PopCPURegList. |
| 6902 __ Mov(scratch0, x0); |
| 6903 __ PopCPURegList(spill_list); |
| 6904 |
| 6905 __ Cbz(scratch0, done); |
| 6906 __ B(miss); |
| 6907 } |
| 6908 |
| 6909 |
| 6910 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { |
| 6911 // This stub overrides SometimesSetsUpAFrame() to return false. That means |
| 6912 // we cannot call anything that could cause a GC from this stub. |
| 6913 // |
| 6914 // Arguments are in x0 and x1: |
| 6915 // x0: property dictionary. |
| 6916 // x1: the name of the property we are looking for. |
| 6917 // |
| 6918 // Return value is in x0 and is zero if lookup failed, non zero otherwise. |
| 6919 // If the lookup is successful, x2 will contains the index of the entry. |
| 6920 |
| 6921 Register result = x0; |
| 6922 Register dictionary = x0; |
| 6923 Register key = x1; |
| 6924 Register index = x2; |
| 6925 Register mask = x3; |
| 6926 Register hash = x4; |
| 6927 Register undefined = x5; |
| 6928 Register entry_key = x6; |
| 6929 |
| 6930 Label in_dictionary, maybe_in_dictionary, not_in_dictionary; |
| 6931 |
| 6932 __ Ldrsw(mask, UntagSmiFieldMemOperand(dictionary, kCapacityOffset)); |
| 6933 __ Sub(mask, mask, 1); |
| 6934 |
| 6935 __ Ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset)); |
| 6936 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); |
| 6937 |
| 6938 for (int i = kInlinedProbes; i < kTotalProbes; i++) { |
| 6939 // Compute the masked index: (hash + i + i * i) & mask. |
| 6940 // Capacity is smi 2^n. |
| 6941 if (i > 0) { |
| 6942 // Add the probe offset (i + i * i) left shifted to avoid right shifting |
| 6943 // the hash in a separate instruction. The value hash + i + i * i is right |
| 6944 // shifted in the following and instruction. |
| 6945 ASSERT(NameDictionary::GetProbeOffset(i) < |
| 6946 1 << (32 - Name::kHashFieldOffset)); |
| 6947 __ Add(index, hash, |
| 6948 NameDictionary::GetProbeOffset(i) << Name::kHashShift); |
| 6949 } else { |
| 6950 __ Mov(index, hash); |
| 6951 } |
| 6952 __ And(index, mask, Operand(index, LSR, Name::kHashShift)); |
| 6953 |
| 6954 // Scale the index by multiplying by the entry size. |
| 6955 ASSERT(NameDictionary::kEntrySize == 3); |
| 6956 __ Add(index, index, Operand(index, LSL, 1)); // index *= 3. |
| 6957 |
| 6958 __ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2)); |
| 6959 __ Ldr(entry_key, FieldMemOperand(index, kElementsStartOffset)); |
| 6960 |
| 6961 // Having undefined at this place means the name is not contained. |
| 6962 __ Cmp(entry_key, undefined); |
| 6963 __ B(eq, ¬_in_dictionary); |
| 6964 |
| 6965 // Stop if found the property. |
| 6966 __ Cmp(entry_key, key); |
| 6967 __ B(eq, &in_dictionary); |
| 6968 |
| 6969 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) { |
| 6970 // Check if the entry name is not a unique name. |
| 6971 Label cont; |
| 6972 __ Ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset)); |
| 6973 __ Ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset)); |
| 6974 STATIC_ASSERT(kIsInternalizedMask != 0); |
| 6975 __ Tbnz(entry_key, MaskToBit(kIsInternalizedMask), &cont); |
| 6976 __ CompareAndBranch(entry_key, SYMBOL_TYPE, ne, &maybe_in_dictionary); |
| 6977 __ Bind(&cont); |
| 6978 } |
| 6979 } |
| 6980 |
| 6981 __ Bind(&maybe_in_dictionary); |
| 6982 // If we are doing negative lookup then probing failure should be |
| 6983 // treated as a lookup success. For positive lookup, probing failure |
| 6984 // should be treated as lookup failure. |
| 6985 if (mode_ == POSITIVE_LOOKUP) { |
| 6986 __ Mov(result, 0); |
| 6987 __ Ret(); |
| 6988 } |
| 6989 |
| 6990 __ Bind(&in_dictionary); |
| 6991 __ Mov(result, 1); |
| 6992 __ Ret(); |
| 6993 |
| 6994 __ Bind(¬_in_dictionary); |
| 6995 __ Mov(result, 0); |
| 6996 __ Ret(); |
| 6997 } |
| 6998 |
| 6999 |
| 7000 template<class T> |
| 7001 static void CreateArrayDispatch(MacroAssembler* masm) { |
| 7002 Register kind = x3; |
| 7003 int last_index = GetSequenceIndexFromFastElementsKind( |
| 7004 TERMINAL_FAST_ELEMENTS_KIND); |
| 7005 for (int i = 0; i <= last_index; ++i) { |
| 7006 Label next; |
| 7007 ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i); |
| 7008 // TODO(jbramley): Is this the best way to handle this? Can we make the tail |
| 7009 // calls conditional, rather than hopping over each one? |
| 7010 __ CompareAndBranch(kind, candidate_kind, ne, &next); |
| 7011 T stub(candidate_kind); |
| 7012 __ TailCallStub(&stub); |
| 7013 __ Bind(&next); |
| 7014 } |
| 7015 |
| 7016 // If we reached this point there is a problem. |
| 7017 __ Abort("Unexpected ElementsKind in array constructor"); |
| 7018 } |
| 7019 |
| 7020 |
| 7021 // TODO(jbramley): If this needs to be a special case, make it a proper template |
| 7022 // specialization, and not a separate function. |
| 7023 static void CreateArrayDispatchOneArgument(MacroAssembler* masm) { |
| 7024 // x0 - argc |
| 7025 // x1 - constructor? |
| 7026 // x2 - type info cell |
| 7027 // x3 - kind |
| 7028 // sp[0] - last argument |
| 7029 |
| 7030 Register type_info_cell = x2; |
| 7031 Register kind = x3; |
| 7032 |
| 7033 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); |
| 7034 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); |
| 7035 STATIC_ASSERT(FAST_ELEMENTS == 2); |
| 7036 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); |
| 7037 STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4); |
| 7038 STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5); |
| 7039 |
| 7040 Handle<Object> undefined_sentinel( |
| 7041 masm->isolate()->heap()->undefined_value(), |
| 7042 masm->isolate()); |
| 7043 |
| 7044 // Is the low bit set? If so, the array is holey. |
| 7045 Label normal_sequence; |
| 7046 __ Tbnz(kind, 0, &normal_sequence); |
| 7047 |
| 7048 // Look at the last argument. |
| 7049 // TODO(jbramley): What does a 0 argument represent? |
| 7050 __ Peek(x10, 0); |
| 7051 __ Cbz(x10, &normal_sequence); |
| 7052 |
| 7053 // We are going to create a holey array, but our kind is non-holey. |
| 7054 // Fix kind and retry. |
| 7055 __ Orr(kind, kind, 1); |
| 7056 __ Cmp(type_info_cell, Operand(undefined_sentinel)); |
| 7057 __ B(eq, &normal_sequence); |
| 7058 |
| 7059 // Save the resulting elements kind in type info. |
| 7060 // TODO(jbramley): Tag and store at the same time. |
| 7061 __ SmiTag(x10, kind); |
| 7062 __ Str(x10, FieldMemOperand(type_info_cell, kPointerSize)); |
| 7063 |
| 7064 __ Bind(&normal_sequence); |
| 7065 int last_index = GetSequenceIndexFromFastElementsKind( |
| 7066 TERMINAL_FAST_ELEMENTS_KIND); |
| 7067 for (int i = 0; i <= last_index; ++i) { |
| 7068 Label next; |
| 7069 ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i); |
| 7070 // TODO(jbramley): Is this the best way to handle this? Can we make the tail |
| 7071 // calls conditional, rather than hopping over each one? |
| 7072 __ CompareAndBranch(kind, candidate_kind, ne, &next); |
| 7073 ArraySingleArgumentConstructorStub stub(candidate_kind); |
| 7074 __ TailCallStub(&stub); |
| 7075 __ Bind(&next); |
| 7076 } |
| 7077 |
| 7078 // If we reached this point there is a problem. |
| 7079 __ Abort("Unexpected ElementsKind in array constructor"); |
| 7080 } |
| 7081 |
| 7082 |
| 7083 template<class T> |
| 7084 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) { |
| 7085 int to_index = GetSequenceIndexFromFastElementsKind( |
| 7086 TERMINAL_FAST_ELEMENTS_KIND); |
| 7087 for (int i = 0; i <= to_index; ++i) { |
| 7088 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); |
| 7089 T stub(kind); |
| 7090 stub.GetCode(isolate)->set_is_pregenerated(true); |
| 7091 if (AllocationSiteInfo::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) { |
| 7092 T stub1(kind, true); |
| 7093 stub1.GetCode(isolate)->set_is_pregenerated(true); |
| 7094 } |
| 7095 } |
| 7096 } |
| 7097 |
| 7098 |
| 7099 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) { |
| 7100 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>( |
| 7101 isolate); |
| 7102 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>( |
| 7103 isolate); |
| 7104 ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>( |
| 7105 isolate); |
| 7106 } |
| 7107 |
| 7108 |
| 7109 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime( |
| 7110 Isolate* isolate) { |
| 7111 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS }; |
| 7112 for (int i = 0; i < 2; i++) { |
| 7113 // For internal arrays we only need a few things |
| 7114 InternalArrayNoArgumentConstructorStub stubh1(kinds[i]); |
| 7115 stubh1.GetCode(isolate)->set_is_pregenerated(true); |
| 7116 InternalArraySingleArgumentConstructorStub stubh2(kinds[i]); |
| 7117 stubh2.GetCode(isolate)->set_is_pregenerated(true); |
| 7118 InternalArrayNArgumentsConstructorStub stubh3(kinds[i]); |
| 7119 stubh3.GetCode(isolate)->set_is_pregenerated(true); |
| 7120 } |
| 7121 } |
| 7122 |
| 7123 |
| 7124 void ArrayConstructorStub::Generate(MacroAssembler* masm) { |
| 7125 // ----------- S t a t e ------------- |
| 7126 // -- x0 : argc (only if argument_count_ == ANY) |
| 7127 // -- x1 : constructor |
| 7128 // -- x2 : type info cell |
| 7129 // -- sp[0] : return address |
| 7130 // -- sp[4] : last argument |
| 7131 // ----------------------------------- |
| 7132 Handle<Object> undefined_sentinel( |
| 7133 masm->isolate()->heap()->undefined_value(), masm->isolate()); |
| 7134 |
| 7135 Register argc = x0; |
| 7136 Register constructor = x1; |
| 7137 Register type_info_cell = x2; |
| 7138 |
| 7139 if (FLAG_debug_code) { |
| 7140 // The array construct code is only set for the global and natives |
| 7141 // builtin Array functions which always have maps. |
| 7142 |
| 7143 Label unexpected_map, map_ok; |
| 7144 // Initial map for the builtin Array function should be a map. |
| 7145 __ Ldr(x10, FieldMemOperand(constructor, |
| 7146 JSFunction::kPrototypeOrInitialMapOffset)); |
| 7147 // Will both indicate a NULL and a Smi. |
| 7148 __ JumpIfSmi(x10, &unexpected_map); |
| 7149 __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok); |
| 7150 __ Bind(&unexpected_map); |
| 7151 __ Abort("Unexpected initial map for Array function"); |
| 7152 __ Bind(&map_ok); |
| 7153 |
| 7154 // In type_info_cell, we expect either undefined or a valid |
| 7155 // JSGlobalPropertyCell. |
| 7156 Label okay_here; |
| 7157 Handle<Map> global_property_cell_map( |
| 7158 masm->isolate()->heap()->global_property_cell_map()); |
| 7159 __ CompareAndBranch(type_info_cell, Operand(undefined_sentinel), |
| 7160 eq, &okay_here); |
| 7161 __ Ldr(x10, FieldMemOperand(type_info_cell, |
| 7162 JSGlobalPropertyCell::kMapOffset)); |
| 7163 __ Cmp(x10, Operand(global_property_cell_map)); |
| 7164 __ Assert(eq, "Expected property cell in type_info_cell"); |
| 7165 __ Bind(&okay_here); |
| 7166 } |
| 7167 |
| 7168 if (FLAG_optimize_constructed_arrays) { |
| 7169 Register kind = x3; |
| 7170 Label no_info, switch_ready; |
| 7171 // Get the elements kind and case on that. |
| 7172 __ CompareAndBranch(type_info_cell, Operand(undefined_sentinel), |
| 7173 eq, &no_info); |
| 7174 __ Ldr(kind, FieldMemOperand(type_info_cell, |
| 7175 JSGlobalPropertyCell::kValueOffset)); |
| 7176 __ JumpIfNotSmi(kind, &no_info); |
| 7177 __ SmiUntag(kind); |
| 7178 __ B(&switch_ready); |
| 7179 |
| 7180 __ Bind(&no_info); |
| 7181 __ Mov(kind, GetInitialFastElementsKind()); |
| 7182 __ Bind(&switch_ready); |
| 7183 |
| 7184 if (argument_count_ == ANY) { |
| 7185 Label zero_case, n_case; |
| 7186 __ Cbz(argc, &zero_case); |
| 7187 __ Cmp(argc, 1); |
| 7188 __ B(ne, &n_case); |
| 7189 |
| 7190 // One argument. |
| 7191 CreateArrayDispatchOneArgument(masm); |
| 7192 |
| 7193 __ Bind(&zero_case); |
| 7194 // No arguments. |
| 7195 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm); |
| 7196 |
| 7197 __ Bind(&n_case); |
| 7198 // N arguments. |
| 7199 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm); |
| 7200 |
| 7201 } else if (argument_count_ == NONE) { |
| 7202 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm); |
| 7203 } else if (argument_count_ == ONE) { |
| 7204 CreateArrayDispatchOneArgument(masm); |
| 7205 } else if (argument_count_ == MORE_THAN_ONE) { |
| 7206 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm); |
| 7207 } else { |
| 7208 UNREACHABLE(); |
| 7209 } |
| 7210 } else { |
| 7211 Label generic_constructor; |
| 7212 // Run the native code for the Array function called as a constructor. |
| 7213 ArrayNativeCode(masm, &generic_constructor); |
| 7214 |
| 7215 // Jump to the generic construct code in case the specialized code cannot |
| 7216 // handle the construction. |
| 7217 __ Bind(&generic_constructor); |
| 7218 Handle<Code> generic_construct_stub = |
| 7219 masm->isolate()->builtins()->JSConstructStubGeneric(); |
| 7220 __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); |
| 7221 } |
| 7222 } |
| 7223 |
| 7224 |
| 7225 void InternalArrayConstructorStub::GenerateCase( |
| 7226 MacroAssembler* masm, ElementsKind kind) { |
| 7227 Label zero_case, n_case; |
| 7228 Register argc = x0; |
| 7229 |
| 7230 __ Cbz(argc, &zero_case); |
| 7231 __ CompareAndBranch(argc, 1, ne, &n_case); |
| 7232 |
| 7233 // One argument. |
| 7234 if (IsFastPackedElementsKind(kind)) { |
| 7235 Label normal_sequence; |
| 7236 |
| 7237 // We might need to create a holey array; look at the first argument. |
| 7238 // TODO(jbramley): Is x3 significant? x10 is the convention in A64. |
| 7239 __ Peek(x3, 0); |
| 7240 __ Cbz(x3, &normal_sequence); |
| 7241 |
| 7242 InternalArraySingleArgumentConstructorStub |
| 7243 stub1_holey(GetHoleyElementsKind(kind)); |
| 7244 __ TailCallStub(&stub1_holey); |
| 7245 |
| 7246 __ Bind(&normal_sequence); |
| 7247 } |
| 7248 InternalArraySingleArgumentConstructorStub stub1(kind); |
| 7249 __ TailCallStub(&stub1); |
| 7250 |
| 7251 __ Bind(&zero_case); |
| 7252 // No arguments. |
| 7253 InternalArrayNoArgumentConstructorStub stub0(kind); |
| 7254 __ TailCallStub(&stub0); |
| 7255 |
| 7256 __ Bind(&n_case); |
| 7257 // N arguments. |
| 7258 InternalArrayNArgumentsConstructorStub stubN(kind); |
| 7259 __ TailCallStub(&stubN); |
| 7260 } |
| 7261 |
| 7262 |
| 7263 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { |
| 7264 // ----------- S t a t e ------------- |
| 7265 // -- x0 : argc |
| 7266 // -- x1 : constructor |
| 7267 // -- sp[0] : return address |
| 7268 // -- sp[4] : last argument |
| 7269 // ----------------------------------- |
| 7270 Handle<Object> undefined_sentinel( |
| 7271 masm->isolate()->heap()->undefined_value(), masm->isolate()); |
| 7272 |
| 7273 Register constructor = x1; |
| 7274 |
| 7275 if (FLAG_debug_code) { |
| 7276 // The array construct code is only set for the global and natives |
| 7277 // builtin Array functions which always have maps. |
| 7278 |
| 7279 Label unexpected_map, map_ok; |
| 7280 // Initial map for the builtin Array function should be a map. |
| 7281 __ Ldr(x10, FieldMemOperand(constructor, |
| 7282 JSFunction::kPrototypeOrInitialMapOffset)); |
| 7283 // Will both indicate a NULL and a Smi. |
| 7284 __ JumpIfSmi(x10, &unexpected_map); |
| 7285 __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok); |
| 7286 __ Bind(&unexpected_map); |
| 7287 __ Abort("Unexpected initial map for Array function"); |
| 7288 __ Bind(&map_ok); |
| 7289 } |
| 7290 |
| 7291 if (FLAG_optimize_constructed_arrays) { |
| 7292 Register kind = w3; |
| 7293 // Figure out the right elements kind |
| 7294 __ Ldr(x10, FieldMemOperand(constructor, |
| 7295 JSFunction::kPrototypeOrInitialMapOffset)); |
| 7296 |
| 7297 // TODO(jbramley): Add a helper function to read elements kind from an |
| 7298 // existing map. |
| 7299 // Load the map's "bit field 2" into result. |
| 7300 __ Ldr(kind, FieldMemOperand(x10, Map::kBitField2Offset)); |
| 7301 // Retrieve elements_kind from bit field 2. |
| 7302 __ Ubfx(kind, kind, Map::kElementsKindShift, Map::kElementsKindBitCount); |
| 7303 |
| 7304 if (FLAG_debug_code) { |
| 7305 Label done; |
| 7306 __ Cmp(x3, FAST_ELEMENTS); |
| 7307 __ Ccmp(x3, FAST_HOLEY_ELEMENTS, ZFlag, ne); |
| 7308 __ Assert(eq, |
| 7309 "Invalid ElementsKind for InternalArray or InternalPackedArray"); |
| 7310 } |
| 7311 |
| 7312 Label fast_elements_case; |
| 7313 __ CompareAndBranch(kind, FAST_ELEMENTS, eq, &fast_elements_case); |
| 7314 GenerateCase(masm, FAST_HOLEY_ELEMENTS); |
| 7315 |
| 7316 __ Bind(&fast_elements_case); |
| 7317 GenerateCase(masm, FAST_ELEMENTS); |
| 7318 } else { |
| 7319 Label generic_constructor; |
| 7320 // Run the native code for the Array function called as constructor. |
| 7321 ArrayNativeCode(masm, &generic_constructor); |
| 7322 |
| 7323 // Jump to the generic construct code in case the specialized code cannot |
| 7324 // handle the construction. |
| 7325 __ Bind(&generic_constructor); |
| 7326 Handle<Code> generic_construct_stub = |
| 7327 masm->isolate()->builtins()->JSConstructStubGeneric(); |
| 7328 __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); |
| 7329 } |
| 7330 } |
| 7331 |
| 7332 |
| 7333 #undef __ |
| 7334 |
| 7335 } } // namespace v8::internal |
| 7336 |
| 7337 #endif // V8_TARGET_ARCH_A64 |
| OLD | NEW |