| OLD | NEW |
| (Empty) |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "src/v8.h" | |
| 6 | |
| 7 #if V8_TARGET_ARCH_ARM64 | |
| 8 | |
| 9 #include "src/arm64/assembler-arm64.h" | |
| 10 #include "src/code-stubs.h" | |
| 11 #include "src/codegen.h" | |
| 12 #include "src/disasm.h" | |
| 13 #include "src/ic-inl.h" | |
| 14 #include "src/runtime.h" | |
| 15 #include "src/stub-cache.h" | |
| 16 | |
| 17 namespace v8 { | |
| 18 namespace internal { | |
| 19 | |
| 20 | |
| 21 #define __ ACCESS_MASM(masm) | |
| 22 | |
| 23 | |
| 24 // "type" holds an instance type on entry and is not clobbered. | |
| 25 // Generated code branch on "global_object" if type is any kind of global | |
| 26 // JS object. | |
| 27 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, | |
| 28 Register type, | |
| 29 Label* global_object) { | |
| 30 __ Cmp(type, JS_GLOBAL_OBJECT_TYPE); | |
| 31 __ Ccmp(type, JS_BUILTINS_OBJECT_TYPE, ZFlag, ne); | |
| 32 __ Ccmp(type, JS_GLOBAL_PROXY_TYPE, ZFlag, ne); | |
| 33 __ B(eq, global_object); | |
| 34 } | |
| 35 | |
| 36 | |
| 37 // Helper function used from LoadIC GenerateNormal. | |
| 38 // | |
| 39 // elements: Property dictionary. It is not clobbered if a jump to the miss | |
| 40 // label is done. | |
| 41 // name: Property name. It is not clobbered if a jump to the miss label is | |
| 42 // done | |
| 43 // result: Register for the result. It is only updated if a jump to the miss | |
| 44 // label is not done. | |
| 45 // The scratch registers need to be different from elements, name and result. | |
| 46 // The generated code assumes that the receiver has slow properties, | |
| 47 // is not a global object and does not have interceptors. | |
| 48 static void GenerateDictionaryLoad(MacroAssembler* masm, | |
| 49 Label* miss, | |
| 50 Register elements, | |
| 51 Register name, | |
| 52 Register result, | |
| 53 Register scratch1, | |
| 54 Register scratch2) { | |
| 55 DCHECK(!AreAliased(elements, name, scratch1, scratch2)); | |
| 56 DCHECK(!AreAliased(result, scratch1, scratch2)); | |
| 57 | |
| 58 Label done; | |
| 59 | |
| 60 // Probe the dictionary. | |
| 61 NameDictionaryLookupStub::GeneratePositiveLookup(masm, | |
| 62 miss, | |
| 63 &done, | |
| 64 elements, | |
| 65 name, | |
| 66 scratch1, | |
| 67 scratch2); | |
| 68 | |
| 69 // If probing finds an entry check that the value is a normal property. | |
| 70 __ Bind(&done); | |
| 71 | |
| 72 static const int kElementsStartOffset = NameDictionary::kHeaderSize + | |
| 73 NameDictionary::kElementsStartIndex * kPointerSize; | |
| 74 static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; | |
| 75 __ Ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); | |
| 76 __ Tst(scratch1, Smi::FromInt(PropertyDetails::TypeField::kMask)); | |
| 77 __ B(ne, miss); | |
| 78 | |
| 79 // Get the value at the masked, scaled index and return. | |
| 80 __ Ldr(result, | |
| 81 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize)); | |
| 82 } | |
| 83 | |
| 84 | |
| 85 // Helper function used from StoreIC::GenerateNormal. | |
| 86 // | |
| 87 // elements: Property dictionary. It is not clobbered if a jump to the miss | |
| 88 // label is done. | |
| 89 // name: Property name. It is not clobbered if a jump to the miss label is | |
| 90 // done | |
| 91 // value: The value to store (never clobbered). | |
| 92 // | |
| 93 // The generated code assumes that the receiver has slow properties, | |
| 94 // is not a global object and does not have interceptors. | |
| 95 static void GenerateDictionaryStore(MacroAssembler* masm, | |
| 96 Label* miss, | |
| 97 Register elements, | |
| 98 Register name, | |
| 99 Register value, | |
| 100 Register scratch1, | |
| 101 Register scratch2) { | |
| 102 DCHECK(!AreAliased(elements, name, value, scratch1, scratch2)); | |
| 103 | |
| 104 Label done; | |
| 105 | |
| 106 // Probe the dictionary. | |
| 107 NameDictionaryLookupStub::GeneratePositiveLookup(masm, | |
| 108 miss, | |
| 109 &done, | |
| 110 elements, | |
| 111 name, | |
| 112 scratch1, | |
| 113 scratch2); | |
| 114 | |
| 115 // If probing finds an entry in the dictionary check that the value | |
| 116 // is a normal property that is not read only. | |
| 117 __ Bind(&done); | |
| 118 | |
| 119 static const int kElementsStartOffset = NameDictionary::kHeaderSize + | |
| 120 NameDictionary::kElementsStartIndex * kPointerSize; | |
| 121 static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; | |
| 122 static const int kTypeAndReadOnlyMask = | |
| 123 PropertyDetails::TypeField::kMask | | |
| 124 PropertyDetails::AttributesField::encode(READ_ONLY); | |
| 125 __ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset)); | |
| 126 __ Tst(scratch1, kTypeAndReadOnlyMask); | |
| 127 __ B(ne, miss); | |
| 128 | |
| 129 // Store the value at the masked, scaled index and return. | |
| 130 static const int kValueOffset = kElementsStartOffset + kPointerSize; | |
| 131 __ Add(scratch2, scratch2, kValueOffset - kHeapObjectTag); | |
| 132 __ Str(value, MemOperand(scratch2)); | |
| 133 | |
| 134 // Update the write barrier. Make sure not to clobber the value. | |
| 135 __ Mov(scratch1, value); | |
| 136 __ RecordWrite( | |
| 137 elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs); | |
| 138 } | |
| 139 | |
| 140 | |
| 141 // Checks the receiver for special cases (value type, slow case bits). | |
| 142 // Falls through for regular JS object and return the map of the | |
| 143 // receiver in 'map_scratch' if the receiver is not a SMI. | |
| 144 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, | |
| 145 Register receiver, | |
| 146 Register map_scratch, | |
| 147 Register scratch, | |
| 148 int interceptor_bit, | |
| 149 Label* slow) { | |
| 150 DCHECK(!AreAliased(map_scratch, scratch)); | |
| 151 | |
| 152 // Check that the object isn't a smi. | |
| 153 __ JumpIfSmi(receiver, slow); | |
| 154 // Get the map of the receiver. | |
| 155 __ Ldr(map_scratch, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
| 156 // Check bit field. | |
| 157 __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kBitFieldOffset)); | |
| 158 __ Tbnz(scratch, Map::kIsAccessCheckNeeded, slow); | |
| 159 __ Tbnz(scratch, interceptor_bit, slow); | |
| 160 | |
| 161 // Check that the object is some kind of JS object EXCEPT JS Value type. | |
| 162 // In the case that the object is a value-wrapper object, we enter the | |
| 163 // runtime system to make sure that indexing into string objects work | |
| 164 // as intended. | |
| 165 STATIC_ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE); | |
| 166 __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset)); | |
| 167 __ Cmp(scratch, JS_OBJECT_TYPE); | |
| 168 __ B(lt, slow); | |
| 169 } | |
| 170 | |
| 171 | |
| 172 // Loads an indexed element from a fast case array. | |
| 173 // If not_fast_array is NULL, doesn't perform the elements map check. | |
| 174 // | |
| 175 // receiver - holds the receiver on entry. | |
| 176 // Unchanged unless 'result' is the same register. | |
| 177 // | |
| 178 // key - holds the smi key on entry. | |
| 179 // Unchanged unless 'result' is the same register. | |
| 180 // | |
| 181 // elements - holds the elements of the receiver on exit. | |
| 182 // | |
| 183 // elements_map - holds the elements map on exit if the not_fast_array branch is | |
| 184 // taken. Otherwise, this is used as a scratch register. | |
| 185 // | |
| 186 // result - holds the result on exit if the load succeeded. | |
| 187 // Allowed to be the the same as 'receiver' or 'key'. | |
| 188 // Unchanged on bailout so 'receiver' and 'key' can be safely | |
| 189 // used by further computation. | |
| 190 static void GenerateFastArrayLoad(MacroAssembler* masm, | |
| 191 Register receiver, | |
| 192 Register key, | |
| 193 Register elements, | |
| 194 Register elements_map, | |
| 195 Register scratch2, | |
| 196 Register result, | |
| 197 Label* not_fast_array, | |
| 198 Label* slow) { | |
| 199 DCHECK(!AreAliased(receiver, key, elements, elements_map, scratch2)); | |
| 200 | |
| 201 // Check for fast array. | |
| 202 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
| 203 if (not_fast_array != NULL) { | |
| 204 // Check that the object is in fast mode and writable. | |
| 205 __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); | |
| 206 __ JumpIfNotRoot(elements_map, Heap::kFixedArrayMapRootIndex, | |
| 207 not_fast_array); | |
| 208 } else { | |
| 209 __ AssertFastElements(elements); | |
| 210 } | |
| 211 | |
| 212 // The elements_map register is only used for the not_fast_array path, which | |
| 213 // was handled above. From this point onward it is a scratch register. | |
| 214 Register scratch1 = elements_map; | |
| 215 | |
| 216 // Check that the key (index) is within bounds. | |
| 217 __ Ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset)); | |
| 218 __ Cmp(key, scratch1); | |
| 219 __ B(hs, slow); | |
| 220 | |
| 221 // Fast case: Do the load. | |
| 222 __ Add(scratch1, elements, FixedArray::kHeaderSize - kHeapObjectTag); | |
| 223 __ SmiUntag(scratch2, key); | |
| 224 __ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2)); | |
| 225 | |
| 226 // In case the loaded value is the_hole we have to consult GetProperty | |
| 227 // to ensure the prototype chain is searched. | |
| 228 __ JumpIfRoot(scratch2, Heap::kTheHoleValueRootIndex, slow); | |
| 229 | |
| 230 // Move the value to the result register. | |
| 231 // 'result' can alias with 'receiver' or 'key' but these two must be | |
| 232 // preserved if we jump to 'slow'. | |
| 233 __ Mov(result, scratch2); | |
| 234 } | |
| 235 | |
| 236 | |
| 237 // Checks whether a key is an array index string or a unique name. | |
| 238 // Falls through if a key is a unique name. | |
| 239 // The map of the key is returned in 'map_scratch'. | |
| 240 // If the jump to 'index_string' is done the hash of the key is left | |
| 241 // in 'hash_scratch'. | |
| 242 static void GenerateKeyNameCheck(MacroAssembler* masm, | |
| 243 Register key, | |
| 244 Register map_scratch, | |
| 245 Register hash_scratch, | |
| 246 Label* index_string, | |
| 247 Label* not_unique) { | |
| 248 DCHECK(!AreAliased(key, map_scratch, hash_scratch)); | |
| 249 | |
| 250 // Is the key a name? | |
| 251 Label unique; | |
| 252 __ JumpIfObjectType(key, map_scratch, hash_scratch, LAST_UNIQUE_NAME_TYPE, | |
| 253 not_unique, hi); | |
| 254 STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE); | |
| 255 __ B(eq, &unique); | |
| 256 | |
| 257 // Is the string an array index with cached numeric value? | |
| 258 __ Ldr(hash_scratch.W(), FieldMemOperand(key, Name::kHashFieldOffset)); | |
| 259 __ TestAndBranchIfAllClear(hash_scratch, | |
| 260 Name::kContainsCachedArrayIndexMask, | |
| 261 index_string); | |
| 262 | |
| 263 // Is the string internalized? We know it's a string, so a single bit test is | |
| 264 // enough. | |
| 265 __ Ldrb(hash_scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset)); | |
| 266 STATIC_ASSERT(kInternalizedTag == 0); | |
| 267 __ TestAndBranchIfAnySet(hash_scratch, kIsNotInternalizedMask, not_unique); | |
| 268 | |
| 269 __ Bind(&unique); | |
| 270 // Fall through if the key is a unique name. | |
| 271 } | |
| 272 | |
| 273 | |
| 274 // Neither 'object' nor 'key' are modified by this function. | |
| 275 // | |
| 276 // If the 'unmapped_case' or 'slow_case' exit is taken, the 'map' register is | |
| 277 // left with the object's elements map. Otherwise, it is used as a scratch | |
| 278 // register. | |
| 279 static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm, | |
| 280 Register object, | |
| 281 Register key, | |
| 282 Register map, | |
| 283 Register scratch1, | |
| 284 Register scratch2, | |
| 285 Label* unmapped_case, | |
| 286 Label* slow_case) { | |
| 287 DCHECK(!AreAliased(object, key, map, scratch1, scratch2)); | |
| 288 | |
| 289 Heap* heap = masm->isolate()->heap(); | |
| 290 | |
| 291 // Check that the receiver is a JSObject. Because of the elements | |
| 292 // map check later, we do not need to check for interceptors or | |
| 293 // whether it requires access checks. | |
| 294 __ JumpIfSmi(object, slow_case); | |
| 295 // Check that the object is some kind of JSObject. | |
| 296 __ JumpIfObjectType(object, map, scratch1, FIRST_JS_RECEIVER_TYPE, | |
| 297 slow_case, lt); | |
| 298 | |
| 299 // Check that the key is a positive smi. | |
| 300 __ JumpIfNotSmi(key, slow_case); | |
| 301 __ Tbnz(key, kXSignBit, slow_case); | |
| 302 | |
| 303 // Load the elements object and check its map. | |
| 304 Handle<Map> arguments_map(heap->sloppy_arguments_elements_map()); | |
| 305 __ Ldr(map, FieldMemOperand(object, JSObject::kElementsOffset)); | |
| 306 __ CheckMap(map, scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK); | |
| 307 | |
| 308 // Check if element is in the range of mapped arguments. If not, jump | |
| 309 // to the unmapped lookup. | |
| 310 __ Ldr(scratch1, FieldMemOperand(map, FixedArray::kLengthOffset)); | |
| 311 __ Sub(scratch1, scratch1, Smi::FromInt(2)); | |
| 312 __ Cmp(key, scratch1); | |
| 313 __ B(hs, unmapped_case); | |
| 314 | |
| 315 // Load element index and check whether it is the hole. | |
| 316 static const int offset = | |
| 317 FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag; | |
| 318 | |
| 319 __ Add(scratch1, map, offset); | |
| 320 __ SmiUntag(scratch2, key); | |
| 321 __ Ldr(scratch1, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2)); | |
| 322 __ JumpIfRoot(scratch1, Heap::kTheHoleValueRootIndex, unmapped_case); | |
| 323 | |
| 324 // Load value from context and return it. | |
| 325 __ Ldr(scratch2, FieldMemOperand(map, FixedArray::kHeaderSize)); | |
| 326 __ SmiUntag(scratch1); | |
| 327 __ Lsl(scratch1, scratch1, kPointerSizeLog2); | |
| 328 __ Add(scratch1, scratch1, Context::kHeaderSize - kHeapObjectTag); | |
| 329 // The base of the result (scratch2) is passed to RecordWrite in | |
| 330 // KeyedStoreIC::GenerateSloppyArguments and it must be a HeapObject. | |
| 331 return MemOperand(scratch2, scratch1); | |
| 332 } | |
| 333 | |
| 334 | |
| 335 // The 'parameter_map' register must be loaded with the parameter map of the | |
| 336 // arguments object and is overwritten. | |
| 337 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, | |
| 338 Register key, | |
| 339 Register parameter_map, | |
| 340 Register scratch, | |
| 341 Label* slow_case) { | |
| 342 DCHECK(!AreAliased(key, parameter_map, scratch)); | |
| 343 | |
| 344 // Element is in arguments backing store, which is referenced by the | |
| 345 // second element of the parameter_map. | |
| 346 const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize; | |
| 347 Register backing_store = parameter_map; | |
| 348 __ Ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset)); | |
| 349 Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map()); | |
| 350 __ CheckMap( | |
| 351 backing_store, scratch, fixed_array_map, slow_case, DONT_DO_SMI_CHECK); | |
| 352 __ Ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset)); | |
| 353 __ Cmp(key, scratch); | |
| 354 __ B(hs, slow_case); | |
| 355 | |
| 356 __ Add(backing_store, | |
| 357 backing_store, | |
| 358 FixedArray::kHeaderSize - kHeapObjectTag); | |
| 359 __ SmiUntag(scratch, key); | |
| 360 return MemOperand(backing_store, scratch, LSL, kPointerSizeLog2); | |
| 361 } | |
| 362 | |
| 363 | |
| 364 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { | |
| 365 // The return address is in lr. | |
| 366 Register receiver = ReceiverRegister(); | |
| 367 Register name = NameRegister(); | |
| 368 DCHECK(receiver.is(x1)); | |
| 369 DCHECK(name.is(x2)); | |
| 370 | |
| 371 // Probe the stub cache. | |
| 372 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( | |
| 373 Code::ComputeHandlerFlags(Code::LOAD_IC)); | |
| 374 masm->isolate()->stub_cache()->GenerateProbe( | |
| 375 masm, flags, receiver, name, x3, x4, x5, x6); | |
| 376 | |
| 377 // Cache miss: Jump to runtime. | |
| 378 GenerateMiss(masm); | |
| 379 } | |
| 380 | |
| 381 | |
| 382 void LoadIC::GenerateNormal(MacroAssembler* masm) { | |
| 383 Register dictionary = x0; | |
| 384 DCHECK(!dictionary.is(ReceiverRegister())); | |
| 385 DCHECK(!dictionary.is(NameRegister())); | |
| 386 Label slow; | |
| 387 | |
| 388 __ Ldr(dictionary, | |
| 389 FieldMemOperand(ReceiverRegister(), JSObject::kPropertiesOffset)); | |
| 390 GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), x0, x3, x4); | |
| 391 __ Ret(); | |
| 392 | |
| 393 // Dictionary load failed, go slow (but don't miss). | |
| 394 __ Bind(&slow); | |
| 395 GenerateRuntimeGetProperty(masm); | |
| 396 } | |
| 397 | |
| 398 | |
| 399 void LoadIC::GenerateMiss(MacroAssembler* masm) { | |
| 400 // The return address is in lr. | |
| 401 Isolate* isolate = masm->isolate(); | |
| 402 ASM_LOCATION("LoadIC::GenerateMiss"); | |
| 403 | |
| 404 __ IncrementCounter(isolate->counters()->load_miss(), 1, x3, x4); | |
| 405 | |
| 406 // Perform tail call to the entry. | |
| 407 __ Push(ReceiverRegister(), NameRegister()); | |
| 408 ExternalReference ref = | |
| 409 ExternalReference(IC_Utility(kLoadIC_Miss), isolate); | |
| 410 __ TailCallExternalReference(ref, 2, 1); | |
| 411 } | |
| 412 | |
| 413 | |
| 414 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { | |
| 415 // The return address is in lr. | |
| 416 __ Push(ReceiverRegister(), NameRegister()); | |
| 417 __ TailCallRuntime(Runtime::kGetProperty, 2, 1); | |
| 418 } | |
| 419 | |
| 420 | |
| 421 void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) { | |
| 422 // The return address is in lr. | |
| 423 Register result = x0; | |
| 424 Register receiver = ReceiverRegister(); | |
| 425 Register key = NameRegister(); | |
| 426 DCHECK(receiver.is(x1)); | |
| 427 DCHECK(key.is(x2)); | |
| 428 | |
| 429 Label miss, unmapped; | |
| 430 | |
| 431 Register map_scratch = x0; | |
| 432 MemOperand mapped_location = GenerateMappedArgumentsLookup( | |
| 433 masm, receiver, key, map_scratch, x3, x4, &unmapped, &miss); | |
| 434 __ Ldr(result, mapped_location); | |
| 435 __ Ret(); | |
| 436 | |
| 437 __ Bind(&unmapped); | |
| 438 // Parameter map is left in map_scratch when a jump on unmapped is done. | |
| 439 MemOperand unmapped_location = | |
| 440 GenerateUnmappedArgumentsLookup(masm, key, map_scratch, x3, &miss); | |
| 441 __ Ldr(result, unmapped_location); | |
| 442 __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &miss); | |
| 443 __ Ret(); | |
| 444 | |
| 445 __ Bind(&miss); | |
| 446 GenerateMiss(masm); | |
| 447 } | |
| 448 | |
| 449 | |
| 450 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { | |
| 451 ASM_LOCATION("KeyedStoreIC::GenerateSloppyArguments"); | |
| 452 Label slow, notin; | |
| 453 Register value = ValueRegister(); | |
| 454 Register key = NameRegister(); | |
| 455 Register receiver = ReceiverRegister(); | |
| 456 DCHECK(receiver.is(x1)); | |
| 457 DCHECK(key.is(x2)); | |
| 458 DCHECK(value.is(x0)); | |
| 459 | |
| 460 Register map = x3; | |
| 461 | |
| 462 // These registers are used by GenerateMappedArgumentsLookup to build a | |
| 463 // MemOperand. They are live for as long as the MemOperand is live. | |
| 464 Register mapped1 = x4; | |
| 465 Register mapped2 = x5; | |
| 466 | |
| 467 MemOperand mapped = | |
| 468 GenerateMappedArgumentsLookup(masm, receiver, key, map, | |
| 469 mapped1, mapped2, | |
| 470 ¬in, &slow); | |
| 471 Operand mapped_offset = mapped.OffsetAsOperand(); | |
| 472 __ Str(value, mapped); | |
| 473 __ Add(x10, mapped.base(), mapped_offset); | |
| 474 __ Mov(x11, value); | |
| 475 __ RecordWrite(mapped.base(), x10, x11, kLRHasNotBeenSaved, kDontSaveFPRegs); | |
| 476 __ Ret(); | |
| 477 | |
| 478 __ Bind(¬in); | |
| 479 | |
| 480 // These registers are used by GenerateMappedArgumentsLookup to build a | |
| 481 // MemOperand. They are live for as long as the MemOperand is live. | |
| 482 Register unmapped1 = map; // This is assumed to alias 'map'. | |
| 483 Register unmapped2 = x4; | |
| 484 MemOperand unmapped = | |
| 485 GenerateUnmappedArgumentsLookup(masm, key, unmapped1, unmapped2, &slow); | |
| 486 Operand unmapped_offset = unmapped.OffsetAsOperand(); | |
| 487 __ Str(value, unmapped); | |
| 488 __ Add(x10, unmapped.base(), unmapped_offset); | |
| 489 __ Mov(x11, value); | |
| 490 __ RecordWrite(unmapped.base(), x10, x11, | |
| 491 kLRHasNotBeenSaved, kDontSaveFPRegs); | |
| 492 __ Ret(); | |
| 493 __ Bind(&slow); | |
| 494 GenerateMiss(masm); | |
| 495 } | |
| 496 | |
| 497 | |
| 498 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { | |
| 499 // The return address is in lr. | |
| 500 Isolate* isolate = masm->isolate(); | |
| 501 | |
| 502 __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11); | |
| 503 | |
| 504 __ Push(ReceiverRegister(), NameRegister()); | |
| 505 | |
| 506 // Perform tail call to the entry. | |
| 507 ExternalReference ref = | |
| 508 ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate); | |
| 509 | |
| 510 __ TailCallExternalReference(ref, 2, 1); | |
| 511 } | |
| 512 | |
| 513 | |
| 514 // IC register specifications | |
| 515 const Register LoadIC::ReceiverRegister() { return x1; } | |
| 516 const Register LoadIC::NameRegister() { return x2; } | |
| 517 | |
| 518 const Register LoadIC::SlotRegister() { | |
| 519 DCHECK(FLAG_vector_ics); | |
| 520 return x0; | |
| 521 } | |
| 522 | |
| 523 | |
| 524 const Register LoadIC::VectorRegister() { | |
| 525 DCHECK(FLAG_vector_ics); | |
| 526 return x3; | |
| 527 } | |
| 528 | |
| 529 | |
| 530 const Register StoreIC::ReceiverRegister() { return x1; } | |
| 531 const Register StoreIC::NameRegister() { return x2; } | |
| 532 const Register StoreIC::ValueRegister() { return x0; } | |
| 533 | |
| 534 | |
| 535 const Register KeyedStoreIC::MapRegister() { | |
| 536 return x3; | |
| 537 } | |
| 538 | |
| 539 | |
| 540 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { | |
| 541 // The return address is in lr. | |
| 542 __ Push(ReceiverRegister(), NameRegister()); | |
| 543 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1); | |
| 544 } | |
| 545 | |
| 546 | |
| 547 static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm, | |
| 548 Register key, | |
| 549 Register receiver, | |
| 550 Register scratch1, | |
| 551 Register scratch2, | |
| 552 Register scratch3, | |
| 553 Register scratch4, | |
| 554 Register scratch5, | |
| 555 Label *slow) { | |
| 556 DCHECK(!AreAliased( | |
| 557 key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5)); | |
| 558 | |
| 559 Isolate* isolate = masm->isolate(); | |
| 560 Label check_number_dictionary; | |
| 561 // If we can load the value, it should be returned in x0. | |
| 562 Register result = x0; | |
| 563 | |
| 564 GenerateKeyedLoadReceiverCheck( | |
| 565 masm, receiver, scratch1, scratch2, Map::kHasIndexedInterceptor, slow); | |
| 566 | |
| 567 // Check the receiver's map to see if it has fast elements. | |
| 568 __ CheckFastElements(scratch1, scratch2, &check_number_dictionary); | |
| 569 | |
| 570 GenerateFastArrayLoad( | |
| 571 masm, receiver, key, scratch3, scratch2, scratch1, result, NULL, slow); | |
| 572 __ IncrementCounter( | |
| 573 isolate->counters()->keyed_load_generic_smi(), 1, scratch1, scratch2); | |
| 574 __ Ret(); | |
| 575 | |
| 576 __ Bind(&check_number_dictionary); | |
| 577 __ Ldr(scratch3, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
| 578 __ Ldr(scratch2, FieldMemOperand(scratch3, JSObject::kMapOffset)); | |
| 579 | |
| 580 // Check whether we have a number dictionary. | |
| 581 __ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow); | |
| 582 | |
| 583 __ LoadFromNumberDictionary( | |
| 584 slow, scratch3, key, result, scratch1, scratch2, scratch4, scratch5); | |
| 585 __ Ret(); | |
| 586 } | |
| 587 | |
| 588 static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, | |
| 589 Register key, | |
| 590 Register receiver, | |
| 591 Register scratch1, | |
| 592 Register scratch2, | |
| 593 Register scratch3, | |
| 594 Register scratch4, | |
| 595 Register scratch5, | |
| 596 Label *slow) { | |
| 597 DCHECK(!AreAliased( | |
| 598 key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5)); | |
| 599 | |
| 600 Isolate* isolate = masm->isolate(); | |
| 601 Label probe_dictionary, property_array_property; | |
| 602 // If we can load the value, it should be returned in x0. | |
| 603 Register result = x0; | |
| 604 | |
| 605 GenerateKeyedLoadReceiverCheck( | |
| 606 masm, receiver, scratch1, scratch2, Map::kHasNamedInterceptor, slow); | |
| 607 | |
| 608 // If the receiver is a fast-case object, check the keyed lookup cache. | |
| 609 // Otherwise probe the dictionary. | |
| 610 __ Ldr(scratch2, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | |
| 611 __ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset)); | |
| 612 __ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary); | |
| 613 | |
| 614 // We keep the map of the receiver in scratch1. | |
| 615 Register receiver_map = scratch1; | |
| 616 | |
| 617 // Load the map of the receiver, compute the keyed lookup cache hash | |
| 618 // based on 32 bits of the map pointer and the name hash. | |
| 619 __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
| 620 __ Mov(scratch2, Operand(receiver_map, ASR, KeyedLookupCache::kMapHashShift)); | |
| 621 __ Ldr(scratch3.W(), FieldMemOperand(key, Name::kHashFieldOffset)); | |
| 622 __ Eor(scratch2, scratch2, Operand(scratch3, ASR, Name::kHashShift)); | |
| 623 int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask; | |
| 624 __ And(scratch2, scratch2, mask); | |
| 625 | |
| 626 // Load the key (consisting of map and unique name) from the cache and | |
| 627 // check for match. | |
| 628 Label load_in_object_property; | |
| 629 static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket; | |
| 630 Label hit_on_nth_entry[kEntriesPerBucket]; | |
| 631 ExternalReference cache_keys = | |
| 632 ExternalReference::keyed_lookup_cache_keys(isolate); | |
| 633 | |
| 634 __ Mov(scratch3, cache_keys); | |
| 635 __ Add(scratch3, scratch3, Operand(scratch2, LSL, kPointerSizeLog2 + 1)); | |
| 636 | |
| 637 for (int i = 0; i < kEntriesPerBucket - 1; i++) { | |
| 638 Label try_next_entry; | |
| 639 // Load map and make scratch3 pointing to the next entry. | |
| 640 __ Ldr(scratch4, MemOperand(scratch3, kPointerSize * 2, PostIndex)); | |
| 641 __ Cmp(receiver_map, scratch4); | |
| 642 __ B(ne, &try_next_entry); | |
| 643 __ Ldr(scratch4, MemOperand(scratch3, -kPointerSize)); // Load name | |
| 644 __ Cmp(key, scratch4); | |
| 645 __ B(eq, &hit_on_nth_entry[i]); | |
| 646 __ Bind(&try_next_entry); | |
| 647 } | |
| 648 | |
| 649 // Last entry. | |
| 650 __ Ldr(scratch4, MemOperand(scratch3, kPointerSize, PostIndex)); | |
| 651 __ Cmp(receiver_map, scratch4); | |
| 652 __ B(ne, slow); | |
| 653 __ Ldr(scratch4, MemOperand(scratch3)); | |
| 654 __ Cmp(key, scratch4); | |
| 655 __ B(ne, slow); | |
| 656 | |
| 657 // Get field offset. | |
| 658 ExternalReference cache_field_offsets = | |
| 659 ExternalReference::keyed_lookup_cache_field_offsets(isolate); | |
| 660 | |
| 661 // Hit on nth entry. | |
| 662 for (int i = kEntriesPerBucket - 1; i >= 0; i--) { | |
| 663 __ Bind(&hit_on_nth_entry[i]); | |
| 664 __ Mov(scratch3, cache_field_offsets); | |
| 665 if (i != 0) { | |
| 666 __ Add(scratch2, scratch2, i); | |
| 667 } | |
| 668 __ Ldr(scratch4.W(), MemOperand(scratch3, scratch2, LSL, 2)); | |
| 669 __ Ldrb(scratch5, | |
| 670 FieldMemOperand(receiver_map, Map::kInObjectPropertiesOffset)); | |
| 671 __ Subs(scratch4, scratch4, scratch5); | |
| 672 __ B(ge, &property_array_property); | |
| 673 if (i != 0) { | |
| 674 __ B(&load_in_object_property); | |
| 675 } | |
| 676 } | |
| 677 | |
| 678 // Load in-object property. | |
| 679 __ Bind(&load_in_object_property); | |
| 680 __ Ldrb(scratch5, FieldMemOperand(receiver_map, Map::kInstanceSizeOffset)); | |
| 681 __ Add(scratch5, scratch5, scratch4); // Index from start of object. | |
| 682 __ Sub(receiver, receiver, kHeapObjectTag); // Remove the heap tag. | |
| 683 __ Ldr(result, MemOperand(receiver, scratch5, LSL, kPointerSizeLog2)); | |
| 684 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), | |
| 685 1, scratch1, scratch2); | |
| 686 __ Ret(); | |
| 687 | |
| 688 // Load property array property. | |
| 689 __ Bind(&property_array_property); | |
| 690 __ Ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | |
| 691 __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag); | |
| 692 __ Ldr(result, MemOperand(scratch1, scratch4, LSL, kPointerSizeLog2)); | |
| 693 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), | |
| 694 1, scratch1, scratch2); | |
| 695 __ Ret(); | |
| 696 | |
| 697 // Do a quick inline probe of the receiver's dictionary, if it exists. | |
| 698 __ Bind(&probe_dictionary); | |
| 699 __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
| 700 __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); | |
| 701 GenerateGlobalInstanceTypeCheck(masm, scratch1, slow); | |
| 702 // Load the property. | |
| 703 GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3); | |
| 704 __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), | |
| 705 1, scratch1, scratch2); | |
| 706 __ Ret(); | |
| 707 } | |
| 708 | |
| 709 | |
| 710 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { | |
| 711 // The return address is in lr. | |
| 712 Label slow, check_name, index_smi, index_name; | |
| 713 | |
| 714 Register key = NameRegister(); | |
| 715 Register receiver = ReceiverRegister(); | |
| 716 DCHECK(key.is(x2)); | |
| 717 DCHECK(receiver.is(x1)); | |
| 718 | |
| 719 __ JumpIfNotSmi(key, &check_name); | |
| 720 __ Bind(&index_smi); | |
| 721 // Now the key is known to be a smi. This place is also jumped to from below | |
| 722 // where a numeric string is converted to a smi. | |
| 723 GenerateKeyedLoadWithSmiKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow); | |
| 724 | |
| 725 // Slow case. | |
| 726 __ Bind(&slow); | |
| 727 __ IncrementCounter( | |
| 728 masm->isolate()->counters()->keyed_load_generic_slow(), 1, x4, x3); | |
| 729 GenerateRuntimeGetProperty(masm); | |
| 730 | |
| 731 __ Bind(&check_name); | |
| 732 GenerateKeyNameCheck(masm, key, x0, x3, &index_name, &slow); | |
| 733 | |
| 734 GenerateKeyedLoadWithNameKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow); | |
| 735 | |
| 736 __ Bind(&index_name); | |
| 737 __ IndexFromHash(x3, key); | |
| 738 // Now jump to the place where smi keys are handled. | |
| 739 __ B(&index_smi); | |
| 740 } | |
| 741 | |
| 742 | |
| 743 void KeyedLoadIC::GenerateString(MacroAssembler* masm) { | |
| 744 // Return address is in lr. | |
| 745 Label miss; | |
| 746 | |
| 747 Register receiver = ReceiverRegister(); | |
| 748 Register index = NameRegister(); | |
| 749 Register result = x0; | |
| 750 Register scratch = x3; | |
| 751 DCHECK(!scratch.is(receiver) && !scratch.is(index)); | |
| 752 | |
| 753 StringCharAtGenerator char_at_generator(receiver, | |
| 754 index, | |
| 755 scratch, | |
| 756 result, | |
| 757 &miss, // When not a string. | |
| 758 &miss, // When not a number. | |
| 759 &miss, // When index out of range. | |
| 760 STRING_INDEX_IS_ARRAY_INDEX); | |
| 761 char_at_generator.GenerateFast(masm); | |
| 762 __ Ret(); | |
| 763 | |
| 764 StubRuntimeCallHelper call_helper; | |
| 765 char_at_generator.GenerateSlow(masm, call_helper); | |
| 766 | |
| 767 __ Bind(&miss); | |
| 768 GenerateMiss(masm); | |
| 769 } | |
| 770 | |
| 771 | |
| 772 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { | |
| 773 // Return address is in lr. | |
| 774 Label slow; | |
| 775 | |
| 776 Register receiver = ReceiverRegister(); | |
| 777 Register key = NameRegister(); | |
| 778 Register scratch1 = x3; | |
| 779 Register scratch2 = x4; | |
| 780 DCHECK(!AreAliased(scratch1, scratch2, receiver, key)); | |
| 781 | |
| 782 // Check that the receiver isn't a smi. | |
| 783 __ JumpIfSmi(receiver, &slow); | |
| 784 | |
| 785 // Check that the key is an array index, that is Uint32. | |
| 786 __ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow); | |
| 787 | |
| 788 // Get the map of the receiver. | |
| 789 Register map = scratch1; | |
| 790 __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
| 791 | |
| 792 // Check that it has indexed interceptor and access checks | |
| 793 // are not enabled for this object. | |
| 794 __ Ldrb(scratch2, FieldMemOperand(map, Map::kBitFieldOffset)); | |
| 795 DCHECK(kSlowCaseBitFieldMask == | |
| 796 ((1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor))); | |
| 797 __ Tbnz(scratch2, Map::kIsAccessCheckNeeded, &slow); | |
| 798 __ Tbz(scratch2, Map::kHasIndexedInterceptor, &slow); | |
| 799 | |
| 800 // Everything is fine, call runtime. | |
| 801 __ Push(receiver, key); | |
| 802 __ TailCallExternalReference( | |
| 803 ExternalReference(IC_Utility(kLoadElementWithInterceptor), | |
| 804 masm->isolate()), | |
| 805 2, 1); | |
| 806 | |
| 807 __ Bind(&slow); | |
| 808 GenerateMiss(masm); | |
| 809 } | |
| 810 | |
| 811 | |
| 812 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { | |
| 813 ASM_LOCATION("KeyedStoreIC::GenerateMiss"); | |
| 814 | |
| 815 // Push receiver, key and value for runtime call. | |
| 816 __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); | |
| 817 | |
| 818 ExternalReference ref = | |
| 819 ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate()); | |
| 820 __ TailCallExternalReference(ref, 3, 1); | |
| 821 } | |
| 822 | |
| 823 | |
| 824 void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) { | |
| 825 ASM_LOCATION("KeyedStoreIC::GenerateSlow"); | |
| 826 | |
| 827 // Push receiver, key and value for runtime call. | |
| 828 __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); | |
| 829 | |
| 830 // The slow case calls into the runtime to complete the store without causing | |
| 831 // an IC miss that would otherwise cause a transition to the generic stub. | |
| 832 ExternalReference ref = | |
| 833 ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate()); | |
| 834 __ TailCallExternalReference(ref, 3, 1); | |
| 835 } | |
| 836 | |
| 837 | |
| 838 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, | |
| 839 StrictMode strict_mode) { | |
| 840 ASM_LOCATION("KeyedStoreIC::GenerateRuntimeSetProperty"); | |
| 841 | |
| 842 // Push receiver, key and value for runtime call. | |
| 843 __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); | |
| 844 | |
| 845 // Push strict_mode for runtime call. | |
| 846 __ Mov(x10, Smi::FromInt(strict_mode)); | |
| 847 __ Push(x10); | |
| 848 | |
| 849 __ TailCallRuntime(Runtime::kSetProperty, 4, 1); | |
| 850 } | |
| 851 | |
| 852 | |
| 853 static void KeyedStoreGenerateGenericHelper( | |
| 854 MacroAssembler* masm, | |
| 855 Label* fast_object, | |
| 856 Label* fast_double, | |
| 857 Label* slow, | |
| 858 KeyedStoreCheckMap check_map, | |
| 859 KeyedStoreIncrementLength increment_length, | |
| 860 Register value, | |
| 861 Register key, | |
| 862 Register receiver, | |
| 863 Register receiver_map, | |
| 864 Register elements_map, | |
| 865 Register elements) { | |
| 866 DCHECK(!AreAliased( | |
| 867 value, key, receiver, receiver_map, elements_map, elements, x10, x11)); | |
| 868 | |
| 869 Label transition_smi_elements; | |
| 870 Label transition_double_elements; | |
| 871 Label fast_double_without_map_check; | |
| 872 Label non_double_value; | |
| 873 Label finish_store; | |
| 874 | |
| 875 __ Bind(fast_object); | |
| 876 if (check_map == kCheckMap) { | |
| 877 __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); | |
| 878 __ Cmp(elements_map, | |
| 879 Operand(masm->isolate()->factory()->fixed_array_map())); | |
| 880 __ B(ne, fast_double); | |
| 881 } | |
| 882 | |
| 883 // HOLECHECK: guards "A[i] = V" | |
| 884 // We have to go to the runtime if the current value is the hole because there | |
| 885 // may be a callback on the element. | |
| 886 Label holecheck_passed; | |
| 887 __ Add(x10, elements, FixedArray::kHeaderSize - kHeapObjectTag); | |
| 888 __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2)); | |
| 889 __ Ldr(x11, MemOperand(x10)); | |
| 890 __ JumpIfNotRoot(x11, Heap::kTheHoleValueRootIndex, &holecheck_passed); | |
| 891 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow); | |
| 892 __ bind(&holecheck_passed); | |
| 893 | |
| 894 // Smi stores don't require further checks. | |
| 895 __ JumpIfSmi(value, &finish_store); | |
| 896 | |
| 897 // Escape to elements kind transition case. | |
| 898 __ CheckFastObjectElements(receiver_map, x10, &transition_smi_elements); | |
| 899 | |
| 900 __ Bind(&finish_store); | |
| 901 if (increment_length == kIncrementLength) { | |
| 902 // Add 1 to receiver->length. | |
| 903 __ Add(x10, key, Smi::FromInt(1)); | |
| 904 __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset)); | |
| 905 } | |
| 906 | |
| 907 Register address = x11; | |
| 908 __ Add(address, elements, FixedArray::kHeaderSize - kHeapObjectTag); | |
| 909 __ Add(address, address, Operand::UntagSmiAndScale(key, kPointerSizeLog2)); | |
| 910 __ Str(value, MemOperand(address)); | |
| 911 | |
| 912 Label dont_record_write; | |
| 913 __ JumpIfSmi(value, &dont_record_write); | |
| 914 | |
| 915 // Update write barrier for the elements array address. | |
| 916 __ Mov(x10, value); // Preserve the value which is returned. | |
| 917 __ RecordWrite(elements, | |
| 918 address, | |
| 919 x10, | |
| 920 kLRHasNotBeenSaved, | |
| 921 kDontSaveFPRegs, | |
| 922 EMIT_REMEMBERED_SET, | |
| 923 OMIT_SMI_CHECK); | |
| 924 | |
| 925 __ Bind(&dont_record_write); | |
| 926 __ Ret(); | |
| 927 | |
| 928 | |
| 929 __ Bind(fast_double); | |
| 930 if (check_map == kCheckMap) { | |
| 931 // Check for fast double array case. If this fails, call through to the | |
| 932 // runtime. | |
| 933 __ JumpIfNotRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex, slow); | |
| 934 } | |
| 935 | |
| 936 // HOLECHECK: guards "A[i] double hole?" | |
| 937 // We have to see if the double version of the hole is present. If so go to | |
| 938 // the runtime. | |
| 939 __ Add(x10, elements, FixedDoubleArray::kHeaderSize - kHeapObjectTag); | |
| 940 __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2)); | |
| 941 __ Ldr(x11, MemOperand(x10)); | |
| 942 __ CompareAndBranch(x11, kHoleNanInt64, ne, &fast_double_without_map_check); | |
| 943 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow); | |
| 944 | |
| 945 __ Bind(&fast_double_without_map_check); | |
| 946 __ StoreNumberToDoubleElements(value, | |
| 947 key, | |
| 948 elements, | |
| 949 x10, | |
| 950 d0, | |
| 951 &transition_double_elements); | |
| 952 if (increment_length == kIncrementLength) { | |
| 953 // Add 1 to receiver->length. | |
| 954 __ Add(x10, key, Smi::FromInt(1)); | |
| 955 __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset)); | |
| 956 } | |
| 957 __ Ret(); | |
| 958 | |
| 959 | |
| 960 __ Bind(&transition_smi_elements); | |
| 961 // Transition the array appropriately depending on the value type. | |
| 962 __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset)); | |
| 963 __ JumpIfNotRoot(x10, Heap::kHeapNumberMapRootIndex, &non_double_value); | |
| 964 | |
| 965 // Value is a double. Transition FAST_SMI_ELEMENTS -> | |
| 966 // FAST_DOUBLE_ELEMENTS and complete the store. | |
| 967 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, | |
| 968 FAST_DOUBLE_ELEMENTS, | |
| 969 receiver_map, | |
| 970 x10, | |
| 971 x11, | |
| 972 slow); | |
| 973 AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, | |
| 974 FAST_DOUBLE_ELEMENTS); | |
| 975 ElementsTransitionGenerator::GenerateSmiToDouble( | |
| 976 masm, receiver, key, value, receiver_map, mode, slow); | |
| 977 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
| 978 __ B(&fast_double_without_map_check); | |
| 979 | |
| 980 __ Bind(&non_double_value); | |
| 981 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS. | |
| 982 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, | |
| 983 FAST_ELEMENTS, | |
| 984 receiver_map, | |
| 985 x10, | |
| 986 x11, | |
| 987 slow); | |
| 988 | |
| 989 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); | |
| 990 ElementsTransitionGenerator::GenerateMapChangeElementsTransition( | |
| 991 masm, receiver, key, value, receiver_map, mode, slow); | |
| 992 | |
| 993 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
| 994 __ B(&finish_store); | |
| 995 | |
| 996 __ Bind(&transition_double_elements); | |
| 997 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a | |
| 998 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and | |
| 999 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS | |
| 1000 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, | |
| 1001 FAST_ELEMENTS, | |
| 1002 receiver_map, | |
| 1003 x10, | |
| 1004 x11, | |
| 1005 slow); | |
| 1006 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); | |
| 1007 ElementsTransitionGenerator::GenerateDoubleToObject( | |
| 1008 masm, receiver, key, value, receiver_map, mode, slow); | |
| 1009 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
| 1010 __ B(&finish_store); | |
| 1011 } | |
| 1012 | |
| 1013 | |
| 1014 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, | |
| 1015 StrictMode strict_mode) { | |
| 1016 ASM_LOCATION("KeyedStoreIC::GenerateGeneric"); | |
| 1017 Label slow; | |
| 1018 Label array; | |
| 1019 Label fast_object; | |
| 1020 Label extra; | |
| 1021 Label fast_object_grow; | |
| 1022 Label fast_double_grow; | |
| 1023 Label fast_double; | |
| 1024 | |
| 1025 Register value = ValueRegister(); | |
| 1026 Register key = NameRegister(); | |
| 1027 Register receiver = ReceiverRegister(); | |
| 1028 DCHECK(receiver.is(x1)); | |
| 1029 DCHECK(key.is(x2)); | |
| 1030 DCHECK(value.is(x0)); | |
| 1031 | |
| 1032 Register receiver_map = x3; | |
| 1033 Register elements = x4; | |
| 1034 Register elements_map = x5; | |
| 1035 | |
| 1036 __ JumpIfNotSmi(key, &slow); | |
| 1037 __ JumpIfSmi(receiver, &slow); | |
| 1038 __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
| 1039 | |
| 1040 // Check that the receiver does not require access checks and is not observed. | |
| 1041 // The generic stub does not perform map checks or handle observed objects. | |
| 1042 __ Ldrb(x10, FieldMemOperand(receiver_map, Map::kBitFieldOffset)); | |
| 1043 __ TestAndBranchIfAnySet( | |
| 1044 x10, (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kIsObserved), &slow); | |
| 1045 | |
| 1046 // Check if the object is a JS array or not. | |
| 1047 Register instance_type = x10; | |
| 1048 __ CompareInstanceType(receiver_map, instance_type, JS_ARRAY_TYPE); | |
| 1049 __ B(eq, &array); | |
| 1050 // Check that the object is some kind of JSObject. | |
| 1051 __ Cmp(instance_type, FIRST_JS_OBJECT_TYPE); | |
| 1052 __ B(lt, &slow); | |
| 1053 | |
| 1054 // Object case: Check key against length in the elements array. | |
| 1055 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
| 1056 // Check array bounds. Both the key and the length of FixedArray are smis. | |
| 1057 __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset)); | |
| 1058 __ Cmp(x10, Operand::UntagSmi(key)); | |
| 1059 __ B(hi, &fast_object); | |
| 1060 | |
| 1061 | |
| 1062 __ Bind(&slow); | |
| 1063 // Slow case, handle jump to runtime. | |
| 1064 // Live values: | |
| 1065 // x0: value | |
| 1066 // x1: key | |
| 1067 // x2: receiver | |
| 1068 GenerateRuntimeSetProperty(masm, strict_mode); | |
| 1069 | |
| 1070 | |
| 1071 __ Bind(&extra); | |
| 1072 // Extra capacity case: Check if there is extra capacity to | |
| 1073 // perform the store and update the length. Used for adding one | |
| 1074 // element to the array by writing to array[array.length]. | |
| 1075 | |
| 1076 // Check for room in the elements backing store. | |
| 1077 // Both the key and the length of FixedArray are smis. | |
| 1078 __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset)); | |
| 1079 __ Cmp(x10, Operand::UntagSmi(key)); | |
| 1080 __ B(ls, &slow); | |
| 1081 | |
| 1082 __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); | |
| 1083 __ Cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map())); | |
| 1084 __ B(eq, &fast_object_grow); | |
| 1085 __ Cmp(elements_map, | |
| 1086 Operand(masm->isolate()->factory()->fixed_double_array_map())); | |
| 1087 __ B(eq, &fast_double_grow); | |
| 1088 __ B(&slow); | |
| 1089 | |
| 1090 | |
| 1091 __ Bind(&array); | |
| 1092 // Array case: Get the length and the elements array from the JS | |
| 1093 // array. Check that the array is in fast mode (and writable); if it | |
| 1094 // is the length is always a smi. | |
| 1095 | |
| 1096 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
| 1097 | |
| 1098 // Check the key against the length in the array. | |
| 1099 __ Ldrsw(x10, UntagSmiFieldMemOperand(receiver, JSArray::kLengthOffset)); | |
| 1100 __ Cmp(x10, Operand::UntagSmi(key)); | |
| 1101 __ B(eq, &extra); // We can handle the case where we are appending 1 element. | |
| 1102 __ B(lo, &slow); | |
| 1103 | |
| 1104 KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, | |
| 1105 &slow, kCheckMap, kDontIncrementLength, | |
| 1106 value, key, receiver, receiver_map, | |
| 1107 elements_map, elements); | |
| 1108 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow, | |
| 1109 &slow, kDontCheckMap, kIncrementLength, | |
| 1110 value, key, receiver, receiver_map, | |
| 1111 elements_map, elements); | |
| 1112 } | |
| 1113 | |
| 1114 | |
| 1115 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { | |
| 1116 Register receiver = ReceiverRegister(); | |
| 1117 Register name = NameRegister(); | |
| 1118 DCHECK(!AreAliased(receiver, name, ValueRegister(), x3, x4, x5, x6)); | |
| 1119 | |
| 1120 // Probe the stub cache. | |
| 1121 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( | |
| 1122 Code::ComputeHandlerFlags(Code::STORE_IC)); | |
| 1123 masm->isolate()->stub_cache()->GenerateProbe( | |
| 1124 masm, flags, receiver, name, x3, x4, x5, x6); | |
| 1125 | |
| 1126 // Cache miss: Jump to runtime. | |
| 1127 GenerateMiss(masm); | |
| 1128 } | |
| 1129 | |
| 1130 | |
| 1131 void StoreIC::GenerateMiss(MacroAssembler* masm) { | |
| 1132 __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); | |
| 1133 | |
| 1134 // Tail call to the entry. | |
| 1135 ExternalReference ref = | |
| 1136 ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate()); | |
| 1137 __ TailCallExternalReference(ref, 3, 1); | |
| 1138 } | |
| 1139 | |
| 1140 | |
| 1141 void StoreIC::GenerateNormal(MacroAssembler* masm) { | |
| 1142 Label miss; | |
| 1143 Register value = ValueRegister(); | |
| 1144 Register receiver = ReceiverRegister(); | |
| 1145 Register name = NameRegister(); | |
| 1146 Register dictionary = x3; | |
| 1147 DCHECK(!AreAliased(value, receiver, name, x3, x4, x5)); | |
| 1148 | |
| 1149 __ Ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | |
| 1150 | |
| 1151 GenerateDictionaryStore(masm, &miss, dictionary, name, value, x4, x5); | |
| 1152 Counters* counters = masm->isolate()->counters(); | |
| 1153 __ IncrementCounter(counters->store_normal_hit(), 1, x4, x5); | |
| 1154 __ Ret(); | |
| 1155 | |
| 1156 // Cache miss: Jump to runtime. | |
| 1157 __ Bind(&miss); | |
| 1158 __ IncrementCounter(counters->store_normal_miss(), 1, x4, x5); | |
| 1159 GenerateMiss(masm); | |
| 1160 } | |
| 1161 | |
| 1162 | |
| 1163 void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, | |
| 1164 StrictMode strict_mode) { | |
| 1165 ASM_LOCATION("StoreIC::GenerateRuntimeSetProperty"); | |
| 1166 | |
| 1167 __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); | |
| 1168 | |
| 1169 __ Mov(x10, Smi::FromInt(strict_mode)); | |
| 1170 __ Push(x10); | |
| 1171 | |
| 1172 // Do tail-call to runtime routine. | |
| 1173 __ TailCallRuntime(Runtime::kSetProperty, 4, 1); | |
| 1174 } | |
| 1175 | |
| 1176 | |
| 1177 void StoreIC::GenerateSlow(MacroAssembler* masm) { | |
| 1178 // ---------- S t a t e -------------- | |
| 1179 // -- x0 : value | |
| 1180 // -- x1 : receiver | |
| 1181 // -- x2 : name | |
| 1182 // -- lr : return address | |
| 1183 // ----------------------------------- | |
| 1184 | |
| 1185 // Push receiver, name and value for runtime call. | |
| 1186 __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); | |
| 1187 | |
| 1188 // The slow case calls into the runtime to complete the store without causing | |
| 1189 // an IC miss that would otherwise cause a transition to the generic stub. | |
| 1190 ExternalReference ref = | |
| 1191 ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate()); | |
| 1192 __ TailCallExternalReference(ref, 3, 1); | |
| 1193 } | |
| 1194 | |
| 1195 | |
| 1196 Condition CompareIC::ComputeCondition(Token::Value op) { | |
| 1197 switch (op) { | |
| 1198 case Token::EQ_STRICT: | |
| 1199 case Token::EQ: | |
| 1200 return eq; | |
| 1201 case Token::LT: | |
| 1202 return lt; | |
| 1203 case Token::GT: | |
| 1204 return gt; | |
| 1205 case Token::LTE: | |
| 1206 return le; | |
| 1207 case Token::GTE: | |
| 1208 return ge; | |
| 1209 default: | |
| 1210 UNREACHABLE(); | |
| 1211 return al; | |
| 1212 } | |
| 1213 } | |
| 1214 | |
| 1215 | |
| 1216 bool CompareIC::HasInlinedSmiCode(Address address) { | |
| 1217 // The address of the instruction following the call. | |
| 1218 Address info_address = | |
| 1219 Assembler::return_address_from_call_start(address); | |
| 1220 | |
| 1221 InstructionSequence* patch_info = InstructionSequence::At(info_address); | |
| 1222 return patch_info->IsInlineData(); | |
| 1223 } | |
| 1224 | |
| 1225 | |
| 1226 // Activate a SMI fast-path by patching the instructions generated by | |
| 1227 // JumpPatchSite::EmitJumpIf(Not)Smi(), using the information encoded by | |
| 1228 // JumpPatchSite::EmitPatchInfo(). | |
| 1229 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { | |
| 1230 // The patch information is encoded in the instruction stream using | |
| 1231 // instructions which have no side effects, so we can safely execute them. | |
| 1232 // The patch information is encoded directly after the call to the helper | |
| 1233 // function which is requesting this patch operation. | |
| 1234 Address info_address = | |
| 1235 Assembler::return_address_from_call_start(address); | |
| 1236 InlineSmiCheckInfo info(info_address); | |
| 1237 | |
| 1238 // Check and decode the patch information instruction. | |
| 1239 if (!info.HasSmiCheck()) { | |
| 1240 return; | |
| 1241 } | |
| 1242 | |
| 1243 if (FLAG_trace_ic) { | |
| 1244 PrintF("[ Patching ic at %p, marker=%p, SMI check=%p\n", | |
| 1245 address, info_address, reinterpret_cast<void*>(info.SmiCheck())); | |
| 1246 } | |
| 1247 | |
| 1248 // Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi() | |
| 1249 // and JumpPatchSite::EmitJumpIfSmi(). | |
| 1250 // Changing | |
| 1251 // tb(n)z xzr, #0, <target> | |
| 1252 // to | |
| 1253 // tb(!n)z test_reg, #0, <target> | |
| 1254 Instruction* to_patch = info.SmiCheck(); | |
| 1255 PatchingAssembler patcher(to_patch, 1); | |
| 1256 DCHECK(to_patch->IsTestBranch()); | |
| 1257 DCHECK(to_patch->ImmTestBranchBit5() == 0); | |
| 1258 DCHECK(to_patch->ImmTestBranchBit40() == 0); | |
| 1259 | |
| 1260 STATIC_ASSERT(kSmiTag == 0); | |
| 1261 STATIC_ASSERT(kSmiTagMask == 1); | |
| 1262 | |
| 1263 int branch_imm = to_patch->ImmTestBranch(); | |
| 1264 Register smi_reg; | |
| 1265 if (check == ENABLE_INLINED_SMI_CHECK) { | |
| 1266 DCHECK(to_patch->Rt() == xzr.code()); | |
| 1267 smi_reg = info.SmiRegister(); | |
| 1268 } else { | |
| 1269 DCHECK(check == DISABLE_INLINED_SMI_CHECK); | |
| 1270 DCHECK(to_patch->Rt() != xzr.code()); | |
| 1271 smi_reg = xzr; | |
| 1272 } | |
| 1273 | |
| 1274 if (to_patch->Mask(TestBranchMask) == TBZ) { | |
| 1275 // This is JumpIfNotSmi(smi_reg, branch_imm). | |
| 1276 patcher.tbnz(smi_reg, 0, branch_imm); | |
| 1277 } else { | |
| 1278 DCHECK(to_patch->Mask(TestBranchMask) == TBNZ); | |
| 1279 // This is JumpIfSmi(smi_reg, branch_imm). | |
| 1280 patcher.tbz(smi_reg, 0, branch_imm); | |
| 1281 } | |
| 1282 } | |
| 1283 | |
| 1284 | |
| 1285 } } // namespace v8::internal | |
| 1286 | |
| 1287 #endif // V8_TARGET_ARCH_ARM64 | |
| OLD | NEW |