| OLD | NEW |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #if V8_TARGET_ARCH_ARM64 | 7 #if V8_TARGET_ARCH_ARM64 |
| 8 | 8 |
| 9 #include "src/codegen.h" | 9 #include "src/codegen.h" |
| 10 #include "src/ic/ic.h" | 10 #include "src/ic/ic.h" |
| (...skipping 240 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 251 // enough. | 251 // enough. |
| 252 __ Ldrb(hash_scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset)); | 252 __ Ldrb(hash_scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset)); |
| 253 STATIC_ASSERT(kInternalizedTag == 0); | 253 STATIC_ASSERT(kInternalizedTag == 0); |
| 254 __ TestAndBranchIfAnySet(hash_scratch, kIsNotInternalizedMask, not_unique); | 254 __ TestAndBranchIfAnySet(hash_scratch, kIsNotInternalizedMask, not_unique); |
| 255 | 255 |
| 256 __ Bind(&unique); | 256 __ Bind(&unique); |
| 257 // Fall through if the key is a unique name. | 257 // Fall through if the key is a unique name. |
| 258 } | 258 } |
| 259 | 259 |
| 260 | 260 |
| 261 // Neither 'object' nor 'key' are modified by this function. | |
| 262 // | |
| 263 // If the 'unmapped_case' or 'slow_case' exit is taken, the 'map' register is | |
| 264 // left with the object's elements map. Otherwise, it is used as a scratch | |
| 265 // register. | |
| 266 static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm, | |
| 267 Register object, Register key, | |
| 268 Register map, Register scratch1, | |
| 269 Register scratch2, | |
| 270 Label* unmapped_case, | |
| 271 Label* slow_case) { | |
| 272 DCHECK(!AreAliased(object, key, map, scratch1, scratch2)); | |
| 273 | |
| 274 Heap* heap = masm->isolate()->heap(); | |
| 275 | |
| 276 // Check that the receiver is a JSObject. Because of the elements | |
| 277 // map check later, we do not need to check for interceptors or | |
| 278 // whether it requires access checks. | |
| 279 __ JumpIfSmi(object, slow_case); | |
| 280 // Check that the object is some kind of JSObject. | |
| 281 __ JumpIfObjectType(object, map, scratch1, FIRST_JS_RECEIVER_TYPE, slow_case, | |
| 282 lt); | |
| 283 | |
| 284 // Check that the key is a positive smi. | |
| 285 __ JumpIfNotSmi(key, slow_case); | |
| 286 __ Tbnz(key, kXSignBit, slow_case); | |
| 287 | |
| 288 // Load the elements object and check its map. | |
| 289 Handle<Map> arguments_map(heap->sloppy_arguments_elements_map()); | |
| 290 __ Ldr(map, FieldMemOperand(object, JSObject::kElementsOffset)); | |
| 291 __ CheckMap(map, scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK); | |
| 292 | |
| 293 // Check if element is in the range of mapped arguments. If not, jump | |
| 294 // to the unmapped lookup. | |
| 295 __ Ldr(scratch1, FieldMemOperand(map, FixedArray::kLengthOffset)); | |
| 296 __ Sub(scratch1, scratch1, Smi::FromInt(2)); | |
| 297 __ Cmp(key, scratch1); | |
| 298 __ B(hs, unmapped_case); | |
| 299 | |
| 300 // Load element index and check whether it is the hole. | |
| 301 static const int offset = | |
| 302 FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag; | |
| 303 | |
| 304 __ Add(scratch1, map, offset); | |
| 305 __ SmiUntag(scratch2, key); | |
| 306 __ Ldr(scratch1, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2)); | |
| 307 __ JumpIfRoot(scratch1, Heap::kTheHoleValueRootIndex, unmapped_case); | |
| 308 | |
| 309 // Load value from context and return it. | |
| 310 __ Ldr(scratch2, FieldMemOperand(map, FixedArray::kHeaderSize)); | |
| 311 __ SmiUntag(scratch1); | |
| 312 __ Lsl(scratch1, scratch1, kPointerSizeLog2); | |
| 313 __ Add(scratch1, scratch1, Context::kHeaderSize - kHeapObjectTag); | |
| 314 // The base of the result (scratch2) is passed to RecordWrite in | |
| 315 // KeyedStoreIC::GenerateSloppyArguments and it must be a HeapObject. | |
| 316 return MemOperand(scratch2, scratch1); | |
| 317 } | |
| 318 | |
| 319 | |
| 320 // The 'parameter_map' register must be loaded with the parameter map of the | |
| 321 // arguments object and is overwritten. | |
| 322 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, | |
| 323 Register key, | |
| 324 Register parameter_map, | |
| 325 Register scratch, | |
| 326 Label* slow_case) { | |
| 327 DCHECK(!AreAliased(key, parameter_map, scratch)); | |
| 328 | |
| 329 // Element is in arguments backing store, which is referenced by the | |
| 330 // second element of the parameter_map. | |
| 331 const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize; | |
| 332 Register backing_store = parameter_map; | |
| 333 __ Ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset)); | |
| 334 Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map()); | |
| 335 __ CheckMap(backing_store, scratch, fixed_array_map, slow_case, | |
| 336 DONT_DO_SMI_CHECK); | |
| 337 __ Ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset)); | |
| 338 __ Cmp(key, scratch); | |
| 339 __ B(hs, slow_case); | |
| 340 | |
| 341 __ Add(backing_store, backing_store, | |
| 342 FixedArray::kHeaderSize - kHeapObjectTag); | |
| 343 __ SmiUntag(scratch, key); | |
| 344 return MemOperand(backing_store, scratch, LSL, kPointerSizeLog2); | |
| 345 } | |
| 346 | |
| 347 | |
| 348 void LoadIC::GenerateNormal(MacroAssembler* masm) { | 261 void LoadIC::GenerateNormal(MacroAssembler* masm) { |
| 349 Register dictionary = x0; | 262 Register dictionary = x0; |
| 350 DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister())); | 263 DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister())); |
| 351 DCHECK(!dictionary.is(LoadDescriptor::NameRegister())); | 264 DCHECK(!dictionary.is(LoadDescriptor::NameRegister())); |
| 352 Label slow; | 265 Label slow; |
| 353 | 266 |
| 354 __ Ldr(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(), | 267 __ Ldr(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(), |
| 355 JSObject::kPropertiesOffset)); | 268 JSObject::kPropertiesOffset)); |
| 356 GenerateDictionaryLoad(masm, &slow, dictionary, | 269 GenerateDictionaryLoad(masm, &slow, dictionary, |
| 357 LoadDescriptor::NameRegister(), x0, x3, x4); | 270 LoadDescriptor::NameRegister(), x0, x3, x4); |
| (...skipping 25 matching lines...) Expand all Loading... |
| 383 } | 296 } |
| 384 | 297 |
| 385 | 298 |
| 386 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { | 299 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { |
| 387 // The return address is in lr. | 300 // The return address is in lr. |
| 388 __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister()); | 301 __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister()); |
| 389 __ TailCallRuntime(Runtime::kGetProperty, 2, 1); | 302 __ TailCallRuntime(Runtime::kGetProperty, 2, 1); |
| 390 } | 303 } |
| 391 | 304 |
| 392 | 305 |
| 393 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { | |
| 394 ASM_LOCATION("KeyedStoreIC::GenerateSloppyArguments"); | |
| 395 Label slow, notin; | |
| 396 Register value = StoreDescriptor::ValueRegister(); | |
| 397 Register key = StoreDescriptor::NameRegister(); | |
| 398 Register receiver = StoreDescriptor::ReceiverRegister(); | |
| 399 DCHECK(receiver.is(x1)); | |
| 400 DCHECK(key.is(x2)); | |
| 401 DCHECK(value.is(x0)); | |
| 402 | |
| 403 Register map = x3; | |
| 404 | |
| 405 // These registers are used by GenerateMappedArgumentsLookup to build a | |
| 406 // MemOperand. They are live for as long as the MemOperand is live. | |
| 407 Register mapped1 = x4; | |
| 408 Register mapped2 = x5; | |
| 409 | |
| 410 MemOperand mapped = GenerateMappedArgumentsLookup( | |
| 411 masm, receiver, key, map, mapped1, mapped2, ¬in, &slow); | |
| 412 Operand mapped_offset = mapped.OffsetAsOperand(); | |
| 413 __ Str(value, mapped); | |
| 414 __ Add(x10, mapped.base(), mapped_offset); | |
| 415 __ Mov(x11, value); | |
| 416 __ RecordWrite(mapped.base(), x10, x11, kLRHasNotBeenSaved, kDontSaveFPRegs); | |
| 417 __ Ret(); | |
| 418 | |
| 419 __ Bind(¬in); | |
| 420 | |
| 421 // These registers are used by GenerateMappedArgumentsLookup to build a | |
| 422 // MemOperand. They are live for as long as the MemOperand is live. | |
| 423 Register unmapped1 = map; // This is assumed to alias 'map'. | |
| 424 Register unmapped2 = x4; | |
| 425 MemOperand unmapped = | |
| 426 GenerateUnmappedArgumentsLookup(masm, key, unmapped1, unmapped2, &slow); | |
| 427 Operand unmapped_offset = unmapped.OffsetAsOperand(); | |
| 428 __ Str(value, unmapped); | |
| 429 __ Add(x10, unmapped.base(), unmapped_offset); | |
| 430 __ Mov(x11, value); | |
| 431 __ RecordWrite(unmapped.base(), x10, x11, kLRHasNotBeenSaved, | |
| 432 kDontSaveFPRegs); | |
| 433 __ Ret(); | |
| 434 __ Bind(&slow); | |
| 435 GenerateMiss(masm); | |
| 436 } | |
| 437 | |
| 438 | |
| 439 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { | 306 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { |
| 440 // The return address is in lr. | 307 // The return address is in lr. |
| 441 Isolate* isolate = masm->isolate(); | 308 Isolate* isolate = masm->isolate(); |
| 442 | 309 |
| 443 DCHECK(!AreAliased(x10, x11, LoadWithVectorDescriptor::SlotRegister(), | 310 DCHECK(!AreAliased(x10, x11, LoadWithVectorDescriptor::SlotRegister(), |
| 444 LoadWithVectorDescriptor::VectorRegister())); | 311 LoadWithVectorDescriptor::VectorRegister())); |
| 445 __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11); | 312 __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11); |
| 446 | 313 |
| 447 __ Push(LoadWithVectorDescriptor::ReceiverRegister(), | 314 __ Push(LoadWithVectorDescriptor::ReceiverRegister(), |
| 448 LoadWithVectorDescriptor::NameRegister(), | 315 LoadWithVectorDescriptor::NameRegister(), |
| (...skipping 532 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 981 } else { | 848 } else { |
| 982 DCHECK(to_patch->Mask(TestBranchMask) == TBNZ); | 849 DCHECK(to_patch->Mask(TestBranchMask) == TBNZ); |
| 983 // This is JumpIfSmi(smi_reg, branch_imm). | 850 // This is JumpIfSmi(smi_reg, branch_imm). |
| 984 patcher.tbz(smi_reg, 0, branch_imm); | 851 patcher.tbz(smi_reg, 0, branch_imm); |
| 985 } | 852 } |
| 986 } | 853 } |
| 987 } | 854 } |
| 988 } // namespace v8::internal | 855 } // namespace v8::internal |
| 989 | 856 |
| 990 #endif // V8_TARGET_ARCH_ARM64 | 857 #endif // V8_TARGET_ARCH_ARM64 |
| OLD | NEW |