| OLD | NEW |
| (Empty) |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | |
| 2 // Redistribution and use in source and binary forms, with or without | |
| 3 // modification, are permitted provided that the following conditions are | |
| 4 // met: | |
| 5 // | |
| 6 // * Redistributions of source code must retain the above copyright | |
| 7 // notice, this list of conditions and the following disclaimer. | |
| 8 // * Redistributions in binary form must reproduce the above | |
| 9 // copyright notice, this list of conditions and the following | |
| 10 // disclaimer in the documentation and/or other materials provided | |
| 11 // with the distribution. | |
| 12 // * Neither the name of Google Inc. nor the names of its | |
| 13 // contributors may be used to endorse or promote products derived | |
| 14 // from this software without specific prior written permission. | |
| 15 // | |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
| 27 | |
| 28 #include "v8.h" | |
| 29 | |
| 30 #if V8_TARGET_ARCH_A64 | |
| 31 | |
| 32 #include "a64/assembler-a64.h" | |
| 33 #include "code-stubs.h" | |
| 34 #include "codegen.h" | |
| 35 #include "disasm.h" | |
| 36 #include "ic-inl.h" | |
| 37 #include "runtime.h" | |
| 38 #include "stub-cache.h" | |
| 39 | |
| 40 namespace v8 { | |
| 41 namespace internal { | |
| 42 | |
| 43 | |
| 44 #define __ ACCESS_MASM(masm) | |
| 45 | |
| 46 | |
| 47 // "type" holds an instance type on entry and is not clobbered. | |
| 48 // Generated code branch on "global_object" if type is any kind of global | |
| 49 // JS object. | |
| 50 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, | |
| 51 Register type, | |
| 52 Label* global_object) { | |
| 53 __ Cmp(type, JS_GLOBAL_OBJECT_TYPE); | |
| 54 __ Ccmp(type, JS_BUILTINS_OBJECT_TYPE, ZFlag, ne); | |
| 55 __ Ccmp(type, JS_GLOBAL_PROXY_TYPE, ZFlag, ne); | |
| 56 __ B(eq, global_object); | |
| 57 } | |
| 58 | |
| 59 | |
| 60 // Generated code falls through if the receiver is a regular non-global | |
| 61 // JS object with slow properties and no interceptors. | |
| 62 // | |
| 63 // "receiver" holds the receiver on entry and is unchanged. | |
| 64 // "elements" holds the property dictionary on fall through. | |
| 65 static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm, | |
| 66 Register receiver, | |
| 67 Register elements, | |
| 68 Register scratch0, | |
| 69 Register scratch1, | |
| 70 Label* miss) { | |
| 71 ASSERT(!AreAliased(receiver, elements, scratch0, scratch1)); | |
| 72 | |
| 73 // Check that the receiver isn't a smi. | |
| 74 __ JumpIfSmi(receiver, miss); | |
| 75 | |
| 76 // Check that the receiver is a valid JS object. | |
| 77 // Let t be the object instance type, we want: | |
| 78 // FIRST_SPEC_OBJECT_TYPE <= t <= LAST_SPEC_OBJECT_TYPE. | |
| 79 // Since LAST_SPEC_OBJECT_TYPE is the last possible instance type we only | |
| 80 // check the lower bound. | |
| 81 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); | |
| 82 | |
| 83 __ JumpIfObjectType(receiver, scratch0, scratch1, FIRST_SPEC_OBJECT_TYPE, | |
| 84 miss, lt); | |
| 85 | |
| 86 // scratch0 now contains the map of the receiver and scratch1 the object type. | |
| 87 Register map = scratch0; | |
| 88 Register type = scratch1; | |
| 89 | |
| 90 // Check if the receiver is a global JS object. | |
| 91 GenerateGlobalInstanceTypeCheck(masm, type, miss); | |
| 92 | |
| 93 // Check that the object does not require access checks. | |
| 94 __ Ldrb(scratch1, FieldMemOperand(map, Map::kBitFieldOffset)); | |
| 95 __ Tbnz(scratch1, Map::kIsAccessCheckNeeded, miss); | |
| 96 __ Tbnz(scratch1, Map::kHasNamedInterceptor, miss); | |
| 97 | |
| 98 // Check that the properties dictionary is valid. | |
| 99 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | |
| 100 __ Ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset)); | |
| 101 __ JumpIfNotRoot(scratch1, Heap::kHashTableMapRootIndex, miss); | |
| 102 } | |
| 103 | |
| 104 | |
| 105 // Helper function used from LoadIC GenerateNormal. | |
| 106 // | |
| 107 // elements: Property dictionary. It is not clobbered if a jump to the miss | |
| 108 // label is done. | |
| 109 // name: Property name. It is not clobbered if a jump to the miss label is | |
| 110 // done | |
| 111 // result: Register for the result. It is only updated if a jump to the miss | |
| 112 // label is not done. | |
| 113 // The scratch registers need to be different from elements, name and result. | |
| 114 // The generated code assumes that the receiver has slow properties, | |
| 115 // is not a global object and does not have interceptors. | |
| 116 static void GenerateDictionaryLoad(MacroAssembler* masm, | |
| 117 Label* miss, | |
| 118 Register elements, | |
| 119 Register name, | |
| 120 Register result, | |
| 121 Register scratch1, | |
| 122 Register scratch2) { | |
| 123 ASSERT(!AreAliased(elements, name, scratch1, scratch2)); | |
| 124 ASSERT(!AreAliased(result, scratch1, scratch2)); | |
| 125 | |
| 126 Label done; | |
| 127 | |
| 128 // Probe the dictionary. | |
| 129 NameDictionaryLookupStub::GeneratePositiveLookup(masm, | |
| 130 miss, | |
| 131 &done, | |
| 132 elements, | |
| 133 name, | |
| 134 scratch1, | |
| 135 scratch2); | |
| 136 | |
| 137 // If probing finds an entry check that the value is a normal property. | |
| 138 __ Bind(&done); | |
| 139 | |
| 140 static const int kElementsStartOffset = NameDictionary::kHeaderSize + | |
| 141 NameDictionary::kElementsStartIndex * kPointerSize; | |
| 142 static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; | |
| 143 __ Ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); | |
| 144 __ Tst(scratch1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask))); | |
| 145 __ B(ne, miss); | |
| 146 | |
| 147 // Get the value at the masked, scaled index and return. | |
| 148 __ Ldr(result, | |
| 149 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize)); | |
| 150 } | |
| 151 | |
| 152 | |
| 153 // Helper function used from StoreIC::GenerateNormal. | |
| 154 // | |
| 155 // elements: Property dictionary. It is not clobbered if a jump to the miss | |
| 156 // label is done. | |
| 157 // name: Property name. It is not clobbered if a jump to the miss label is | |
| 158 // done | |
| 159 // value: The value to store (never clobbered). | |
| 160 // | |
| 161 // The generated code assumes that the receiver has slow properties, | |
| 162 // is not a global object and does not have interceptors. | |
| 163 static void GenerateDictionaryStore(MacroAssembler* masm, | |
| 164 Label* miss, | |
| 165 Register elements, | |
| 166 Register name, | |
| 167 Register value, | |
| 168 Register scratch1, | |
| 169 Register scratch2) { | |
| 170 ASSERT(!AreAliased(elements, name, value, scratch1, scratch2)); | |
| 171 | |
| 172 Label done; | |
| 173 | |
| 174 // Probe the dictionary. | |
| 175 NameDictionaryLookupStub::GeneratePositiveLookup(masm, | |
| 176 miss, | |
| 177 &done, | |
| 178 elements, | |
| 179 name, | |
| 180 scratch1, | |
| 181 scratch2); | |
| 182 | |
| 183 // If probing finds an entry in the dictionary check that the value | |
| 184 // is a normal property that is not read only. | |
| 185 __ Bind(&done); | |
| 186 | |
| 187 static const int kElementsStartOffset = NameDictionary::kHeaderSize + | |
| 188 NameDictionary::kElementsStartIndex * kPointerSize; | |
| 189 static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; | |
| 190 static const int kTypeAndReadOnlyMask = | |
| 191 PropertyDetails::TypeField::kMask | | |
| 192 PropertyDetails::AttributesField::encode(READ_ONLY); | |
| 193 __ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset)); | |
| 194 __ Tst(scratch1, kTypeAndReadOnlyMask); | |
| 195 __ B(ne, miss); | |
| 196 | |
| 197 // Store the value at the masked, scaled index and return. | |
| 198 static const int kValueOffset = kElementsStartOffset + kPointerSize; | |
| 199 __ Add(scratch2, scratch2, kValueOffset - kHeapObjectTag); | |
| 200 __ Str(value, MemOperand(scratch2)); | |
| 201 | |
| 202 // Update the write barrier. Make sure not to clobber the value. | |
| 203 __ Mov(scratch1, value); | |
| 204 __ RecordWrite( | |
| 205 elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs); | |
| 206 } | |
| 207 | |
| 208 | |
| 209 // Checks the receiver for special cases (value type, slow case bits). | |
| 210 // Falls through for regular JS object and return the map of the | |
| 211 // receiver in 'map_scratch' if the receiver is not a SMI. | |
| 212 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, | |
| 213 Register receiver, | |
| 214 Register map_scratch, | |
| 215 Register scratch, | |
| 216 int interceptor_bit, | |
| 217 Label* slow) { | |
| 218 ASSERT(!AreAliased(map_scratch, scratch)); | |
| 219 | |
| 220 // Check that the object isn't a smi. | |
| 221 __ JumpIfSmi(receiver, slow); | |
| 222 // Get the map of the receiver. | |
| 223 __ Ldr(map_scratch, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
| 224 // Check bit field. | |
| 225 __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kBitFieldOffset)); | |
| 226 __ Tbnz(scratch, Map::kIsAccessCheckNeeded, slow); | |
| 227 __ Tbnz(scratch, interceptor_bit, slow); | |
| 228 | |
| 229 // Check that the object is some kind of JS object EXCEPT JS Value type. | |
| 230 // In the case that the object is a value-wrapper object, we enter the | |
| 231 // runtime system to make sure that indexing into string objects work | |
| 232 // as intended. | |
| 233 STATIC_ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE); | |
| 234 __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset)); | |
| 235 __ Cmp(scratch, JS_OBJECT_TYPE); | |
| 236 __ B(lt, slow); | |
| 237 } | |
| 238 | |
| 239 | |
| 240 // Loads an indexed element from a fast case array. | |
| 241 // If not_fast_array is NULL, doesn't perform the elements map check. | |
| 242 // | |
| 243 // receiver - holds the receiver on entry. | |
| 244 // Unchanged unless 'result' is the same register. | |
| 245 // | |
| 246 // key - holds the smi key on entry. | |
| 247 // Unchanged unless 'result' is the same register. | |
| 248 // | |
| 249 // elements - holds the elements of the receiver on exit. | |
| 250 // | |
| 251 // elements_map - holds the elements map on exit if the not_fast_array branch is | |
| 252 // taken. Otherwise, this is used as a scratch register. | |
| 253 // | |
| 254 // result - holds the result on exit if the load succeeded. | |
| 255 // Allowed to be the the same as 'receiver' or 'key'. | |
| 256 // Unchanged on bailout so 'receiver' and 'key' can be safely | |
| 257 // used by further computation. | |
| 258 static void GenerateFastArrayLoad(MacroAssembler* masm, | |
| 259 Register receiver, | |
| 260 Register key, | |
| 261 Register elements, | |
| 262 Register elements_map, | |
| 263 Register scratch2, | |
| 264 Register result, | |
| 265 Label* not_fast_array, | |
| 266 Label* slow) { | |
| 267 ASSERT(!AreAliased(receiver, key, elements, elements_map, scratch2)); | |
| 268 | |
| 269 // Check for fast array. | |
| 270 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
| 271 if (not_fast_array != NULL) { | |
| 272 // Check that the object is in fast mode and writable. | |
| 273 __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); | |
| 274 __ JumpIfNotRoot(elements_map, Heap::kFixedArrayMapRootIndex, | |
| 275 not_fast_array); | |
| 276 } else { | |
| 277 __ AssertFastElements(elements); | |
| 278 } | |
| 279 | |
| 280 // The elements_map register is only used for the not_fast_array path, which | |
| 281 // was handled above. From this point onward it is a scratch register. | |
| 282 Register scratch1 = elements_map; | |
| 283 | |
| 284 // Check that the key (index) is within bounds. | |
| 285 __ Ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset)); | |
| 286 __ Cmp(key, scratch1); | |
| 287 __ B(hs, slow); | |
| 288 | |
| 289 // Fast case: Do the load. | |
| 290 __ Add(scratch1, elements, FixedArray::kHeaderSize - kHeapObjectTag); | |
| 291 __ SmiUntag(scratch2, key); | |
| 292 __ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2)); | |
| 293 | |
| 294 // In case the loaded value is the_hole we have to consult GetProperty | |
| 295 // to ensure the prototype chain is searched. | |
| 296 __ JumpIfRoot(scratch2, Heap::kTheHoleValueRootIndex, slow); | |
| 297 | |
| 298 // Move the value to the result register. | |
| 299 // 'result' can alias with 'receiver' or 'key' but these two must be | |
| 300 // preserved if we jump to 'slow'. | |
| 301 __ Mov(result, scratch2); | |
| 302 } | |
| 303 | |
| 304 | |
| 305 // Checks whether a key is an array index string or a unique name. | |
| 306 // Falls through if a key is a unique name. | |
| 307 // The map of the key is returned in 'map_scratch'. | |
| 308 // If the jump to 'index_string' is done the hash of the key is left | |
| 309 // in 'hash_scratch'. | |
| 310 static void GenerateKeyNameCheck(MacroAssembler* masm, | |
| 311 Register key, | |
| 312 Register map_scratch, | |
| 313 Register hash_scratch, | |
| 314 Label* index_string, | |
| 315 Label* not_unique) { | |
| 316 ASSERT(!AreAliased(key, map_scratch, hash_scratch)); | |
| 317 | |
| 318 // Is the key a name? | |
| 319 Label unique; | |
| 320 __ JumpIfObjectType(key, map_scratch, hash_scratch, LAST_UNIQUE_NAME_TYPE, | |
| 321 not_unique, hi); | |
| 322 STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE); | |
| 323 __ B(eq, &unique); | |
| 324 | |
| 325 // Is the string an array index with cached numeric value? | |
| 326 __ Ldr(hash_scratch.W(), FieldMemOperand(key, Name::kHashFieldOffset)); | |
| 327 __ TestAndBranchIfAllClear(hash_scratch, | |
| 328 Name::kContainsCachedArrayIndexMask, | |
| 329 index_string); | |
| 330 | |
| 331 // Is the string internalized? We know it's a string, so a single bit test is | |
| 332 // enough. | |
| 333 __ Ldrb(hash_scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset)); | |
| 334 STATIC_ASSERT(kInternalizedTag == 0); | |
| 335 __ TestAndBranchIfAnySet(hash_scratch, kIsNotInternalizedMask, not_unique); | |
| 336 | |
| 337 __ Bind(&unique); | |
| 338 // Fall through if the key is a unique name. | |
| 339 } | |
| 340 | |
| 341 | |
| 342 // Neither 'object' nor 'key' are modified by this function. | |
| 343 // | |
| 344 // If the 'unmapped_case' or 'slow_case' exit is taken, the 'map' register is | |
| 345 // left with the object's elements map. Otherwise, it is used as a scratch | |
| 346 // register. | |
| 347 static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm, | |
| 348 Register object, | |
| 349 Register key, | |
| 350 Register map, | |
| 351 Register scratch1, | |
| 352 Register scratch2, | |
| 353 Label* unmapped_case, | |
| 354 Label* slow_case) { | |
| 355 ASSERT(!AreAliased(object, key, map, scratch1, scratch2)); | |
| 356 | |
| 357 Heap* heap = masm->isolate()->heap(); | |
| 358 | |
| 359 // Check that the receiver is a JSObject. Because of the elements | |
| 360 // map check later, we do not need to check for interceptors or | |
| 361 // whether it requires access checks. | |
| 362 __ JumpIfSmi(object, slow_case); | |
| 363 // Check that the object is some kind of JSObject. | |
| 364 __ JumpIfObjectType(object, map, scratch1, FIRST_JS_RECEIVER_TYPE, | |
| 365 slow_case, lt); | |
| 366 | |
| 367 // Check that the key is a positive smi. | |
| 368 __ JumpIfNotSmi(key, slow_case); | |
| 369 __ Tbnz(key, kXSignBit, slow_case); | |
| 370 | |
| 371 // Load the elements object and check its map. | |
| 372 Handle<Map> arguments_map(heap->non_strict_arguments_elements_map()); | |
| 373 __ Ldr(map, FieldMemOperand(object, JSObject::kElementsOffset)); | |
| 374 __ CheckMap(map, scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK); | |
| 375 | |
| 376 // Check if element is in the range of mapped arguments. If not, jump | |
| 377 // to the unmapped lookup. | |
| 378 __ Ldr(scratch1, FieldMemOperand(map, FixedArray::kLengthOffset)); | |
| 379 __ Sub(scratch1, scratch1, Operand(Smi::FromInt(2))); | |
| 380 __ Cmp(key, scratch1); | |
| 381 __ B(hs, unmapped_case); | |
| 382 | |
| 383 // Load element index and check whether it is the hole. | |
| 384 static const int offset = | |
| 385 FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag; | |
| 386 | |
| 387 __ Add(scratch1, map, offset); | |
| 388 __ SmiUntag(scratch2, key); | |
| 389 __ Ldr(scratch1, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2)); | |
| 390 __ JumpIfRoot(scratch1, Heap::kTheHoleValueRootIndex, unmapped_case); | |
| 391 | |
| 392 // Load value from context and return it. | |
| 393 __ Ldr(scratch2, FieldMemOperand(map, FixedArray::kHeaderSize)); | |
| 394 __ SmiUntag(scratch1); | |
| 395 __ Add(scratch2, scratch2, Context::kHeaderSize - kHeapObjectTag); | |
| 396 return MemOperand(scratch2, scratch1, LSL, kPointerSizeLog2); | |
| 397 } | |
| 398 | |
| 399 | |
| 400 // The 'parameter_map' register must be loaded with the parameter map of the | |
| 401 // arguments object and is overwritten. | |
| 402 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, | |
| 403 Register key, | |
| 404 Register parameter_map, | |
| 405 Register scratch, | |
| 406 Label* slow_case) { | |
| 407 ASSERT(!AreAliased(key, parameter_map, scratch)); | |
| 408 | |
| 409 // Element is in arguments backing store, which is referenced by the | |
| 410 // second element of the parameter_map. | |
| 411 const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize; | |
| 412 Register backing_store = parameter_map; | |
| 413 __ Ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset)); | |
| 414 Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map()); | |
| 415 __ CheckMap( | |
| 416 backing_store, scratch, fixed_array_map, slow_case, DONT_DO_SMI_CHECK); | |
| 417 __ Ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset)); | |
| 418 __ Cmp(key, scratch); | |
| 419 __ B(hs, slow_case); | |
| 420 | |
| 421 __ Add(backing_store, | |
| 422 backing_store, | |
| 423 FixedArray::kHeaderSize - kHeapObjectTag); | |
| 424 __ SmiUntag(scratch, key); | |
| 425 return MemOperand(backing_store, scratch, LSL, kPointerSizeLog2); | |
| 426 } | |
| 427 | |
| 428 | |
| 429 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { | |
| 430 // ----------- S t a t e ------------- | |
| 431 // -- x2 : name | |
| 432 // -- lr : return address | |
| 433 // -- x0 : receiver | |
| 434 // ----------------------------------- | |
| 435 | |
| 436 // Probe the stub cache. | |
| 437 Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC); | |
| 438 masm->isolate()->stub_cache()->GenerateProbe( | |
| 439 masm, flags, x0, x2, x3, x4, x5, x6); | |
| 440 | |
| 441 // Cache miss: Jump to runtime. | |
| 442 GenerateMiss(masm); | |
| 443 } | |
| 444 | |
| 445 | |
| 446 void LoadIC::GenerateNormal(MacroAssembler* masm) { | |
| 447 // ----------- S t a t e ------------- | |
| 448 // -- x2 : name | |
| 449 // -- lr : return address | |
| 450 // -- x0 : receiver | |
| 451 // ----------------------------------- | |
| 452 Label miss; | |
| 453 | |
| 454 GenerateNameDictionaryReceiverCheck(masm, x0, x1, x3, x4, &miss); | |
| 455 | |
| 456 // x1 now holds the property dictionary. | |
| 457 GenerateDictionaryLoad(masm, &miss, x1, x2, x0, x3, x4); | |
| 458 __ Ret(); | |
| 459 | |
| 460 // Cache miss: Jump to runtime. | |
| 461 __ Bind(&miss); | |
| 462 GenerateMiss(masm); | |
| 463 } | |
| 464 | |
| 465 | |
| 466 void LoadIC::GenerateMiss(MacroAssembler* masm) { | |
| 467 // ----------- S t a t e ------------- | |
| 468 // -- x2 : name | |
| 469 // -- lr : return address | |
| 470 // -- x0 : receiver | |
| 471 // ----------------------------------- | |
| 472 Isolate* isolate = masm->isolate(); | |
| 473 ASM_LOCATION("LoadIC::GenerateMiss"); | |
| 474 | |
| 475 __ IncrementCounter(isolate->counters()->load_miss(), 1, x3, x4); | |
| 476 | |
| 477 // TODO(jbramley): Does the target actually expect an argument in x3, or is | |
| 478 // this inherited from ARM's push semantics? | |
| 479 __ Mov(x3, x0); | |
| 480 __ Push(x3, x2); | |
| 481 | |
| 482 // Perform tail call to the entry. | |
| 483 ExternalReference ref = | |
| 484 ExternalReference(IC_Utility(kLoadIC_Miss), isolate); | |
| 485 __ TailCallExternalReference(ref, 2, 1); | |
| 486 } | |
| 487 | |
| 488 | |
| 489 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { | |
| 490 // ---------- S t a t e -------------- | |
| 491 // -- x2 : name | |
| 492 // -- lr : return address | |
| 493 // -- x0 : receiver | |
| 494 // ----------------------------------- | |
| 495 | |
| 496 // TODO(jbramley): Does the target actually expect an argument in x3, or is | |
| 497 // this inherited from ARM's push semantics? | |
| 498 __ Mov(x3, x0); | |
| 499 __ Push(x3, x2); | |
| 500 | |
| 501 __ TailCallRuntime(Runtime::kGetProperty, 2, 1); | |
| 502 } | |
| 503 | |
| 504 | |
| 505 void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) { | |
| 506 // ---------- S t a t e -------------- | |
| 507 // -- lr : return address | |
| 508 // -- x0 : key | |
| 509 // -- x1 : receiver | |
| 510 // ----------------------------------- | |
| 511 Register result = x0; | |
| 512 Register key = x0; | |
| 513 Register receiver = x1; | |
| 514 Label miss, unmapped; | |
| 515 | |
| 516 Register map_scratch = x2; | |
| 517 MemOperand mapped_location = GenerateMappedArgumentsLookup( | |
| 518 masm, receiver, key, map_scratch, x3, x4, &unmapped, &miss); | |
| 519 __ Ldr(result, mapped_location); | |
| 520 __ Ret(); | |
| 521 | |
| 522 __ Bind(&unmapped); | |
| 523 // Parameter map is left in map_scratch when a jump on unmapped is done. | |
| 524 MemOperand unmapped_location = | |
| 525 GenerateUnmappedArgumentsLookup(masm, key, map_scratch, x3, &miss); | |
| 526 __ Ldr(x2, unmapped_location); | |
| 527 __ JumpIfRoot(x2, Heap::kTheHoleValueRootIndex, &miss); | |
| 528 // Move the result in x0. x0 must be preserved on miss. | |
| 529 __ Mov(result, x2); | |
| 530 __ Ret(); | |
| 531 | |
| 532 __ Bind(&miss); | |
| 533 GenerateMiss(masm); | |
| 534 } | |
| 535 | |
| 536 | |
| 537 void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) { | |
| 538 ASM_LOCATION("KeyedStoreIC::GenerateNonStrictArguments"); | |
| 539 // ---------- S t a t e -------------- | |
| 540 // -- lr : return address | |
| 541 // -- x0 : value | |
| 542 // -- x1 : key | |
| 543 // -- x2 : receiver | |
| 544 // ----------------------------------- | |
| 545 | |
| 546 Label slow, notin; | |
| 547 | |
| 548 Register value = x0; | |
| 549 Register key = x1; | |
| 550 Register receiver = x2; | |
| 551 Register map = x3; | |
| 552 | |
| 553 // These registers are used by GenerateMappedArgumentsLookup to build a | |
| 554 // MemOperand. They are live for as long as the MemOperand is live. | |
| 555 Register mapped1 = x4; | |
| 556 Register mapped2 = x5; | |
| 557 | |
| 558 MemOperand mapped = | |
| 559 GenerateMappedArgumentsLookup(masm, receiver, key, map, | |
| 560 mapped1, mapped2, | |
| 561 ¬in, &slow); | |
| 562 Operand mapped_offset = mapped.OffsetAsOperand(); | |
| 563 __ Str(value, mapped); | |
| 564 __ Add(x10, mapped.base(), mapped_offset); | |
| 565 __ Mov(x11, value); | |
| 566 __ RecordWrite(mapped.base(), x10, x11, kLRHasNotBeenSaved, kDontSaveFPRegs); | |
| 567 __ Ret(); | |
| 568 | |
| 569 __ Bind(¬in); | |
| 570 | |
| 571 // These registers are used by GenerateMappedArgumentsLookup to build a | |
| 572 // MemOperand. They are live for as long as the MemOperand is live. | |
| 573 Register unmapped1 = map; // This is assumed to alias 'map'. | |
| 574 Register unmapped2 = x4; | |
| 575 MemOperand unmapped = | |
| 576 GenerateUnmappedArgumentsLookup(masm, key, unmapped1, unmapped2, &slow); | |
| 577 Operand unmapped_offset = unmapped.OffsetAsOperand(); | |
| 578 __ Str(value, unmapped); | |
| 579 __ Add(x10, unmapped.base(), unmapped_offset); | |
| 580 __ Mov(x11, value); | |
| 581 __ RecordWrite(unmapped.base(), x10, x11, | |
| 582 kLRHasNotBeenSaved, kDontSaveFPRegs); | |
| 583 __ Ret(); | |
| 584 __ Bind(&slow); | |
| 585 GenerateMiss(masm); | |
| 586 } | |
| 587 | |
| 588 | |
| 589 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { | |
| 590 // ---------- S t a t e -------------- | |
| 591 // -- lr : return address | |
| 592 // -- x0 : key | |
| 593 // -- x1 : receiver | |
| 594 // ----------------------------------- | |
| 595 Isolate* isolate = masm->isolate(); | |
| 596 | |
| 597 __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11); | |
| 598 | |
| 599 __ Push(x1, x0); | |
| 600 | |
| 601 // Perform tail call to the entry. | |
| 602 ExternalReference ref = | |
| 603 ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate); | |
| 604 | |
| 605 __ TailCallExternalReference(ref, 2, 1); | |
| 606 } | |
| 607 | |
| 608 | |
| 609 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { | |
| 610 // ---------- S t a t e -------------- | |
| 611 // -- lr : return address | |
| 612 // -- x0 : key | |
| 613 // -- x1 : receiver | |
| 614 // ----------------------------------- | |
| 615 Register key = x0; | |
| 616 Register receiver = x1; | |
| 617 | |
| 618 __ Push(receiver, key); | |
| 619 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1); | |
| 620 } | |
| 621 | |
| 622 | |
| 623 static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm, | |
| 624 Register key, | |
| 625 Register receiver, | |
| 626 Register scratch1, | |
| 627 Register scratch2, | |
| 628 Register scratch3, | |
| 629 Register scratch4, | |
| 630 Register scratch5, | |
| 631 Label *slow) { | |
| 632 ASSERT(!AreAliased( | |
| 633 key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5)); | |
| 634 | |
| 635 Isolate* isolate = masm->isolate(); | |
| 636 Label check_number_dictionary; | |
| 637 // If we can load the value, it should be returned in x0. | |
| 638 Register result = x0; | |
| 639 | |
| 640 GenerateKeyedLoadReceiverCheck( | |
| 641 masm, receiver, scratch1, scratch2, Map::kHasIndexedInterceptor, slow); | |
| 642 | |
| 643 // Check the receiver's map to see if it has fast elements. | |
| 644 __ CheckFastElements(scratch1, scratch2, &check_number_dictionary); | |
| 645 | |
| 646 GenerateFastArrayLoad( | |
| 647 masm, receiver, key, scratch3, scratch2, scratch1, result, NULL, slow); | |
| 648 __ IncrementCounter( | |
| 649 isolate->counters()->keyed_load_generic_smi(), 1, scratch1, scratch2); | |
| 650 __ Ret(); | |
| 651 | |
| 652 __ Bind(&check_number_dictionary); | |
| 653 __ Ldr(scratch3, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
| 654 __ Ldr(scratch2, FieldMemOperand(scratch3, JSObject::kMapOffset)); | |
| 655 | |
| 656 // Check whether we have a number dictionary. | |
| 657 __ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow); | |
| 658 | |
| 659 __ LoadFromNumberDictionary( | |
| 660 slow, scratch3, key, result, scratch1, scratch2, scratch4, scratch5); | |
| 661 __ Ret(); | |
| 662 } | |
| 663 | |
| 664 static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, | |
| 665 Register key, | |
| 666 Register receiver, | |
| 667 Register scratch1, | |
| 668 Register scratch2, | |
| 669 Register scratch3, | |
| 670 Register scratch4, | |
| 671 Register scratch5, | |
| 672 Label *slow) { | |
| 673 ASSERT(!AreAliased( | |
| 674 key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5)); | |
| 675 | |
| 676 Isolate* isolate = masm->isolate(); | |
| 677 Label probe_dictionary, property_array_property; | |
| 678 // If we can load the value, it should be returned in x0. | |
| 679 Register result = x0; | |
| 680 | |
| 681 GenerateKeyedLoadReceiverCheck( | |
| 682 masm, receiver, scratch1, scratch2, Map::kHasNamedInterceptor, slow); | |
| 683 | |
| 684 // If the receiver is a fast-case object, check the keyed lookup cache. | |
| 685 // Otherwise probe the dictionary. | |
| 686 __ Ldr(scratch2, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | |
| 687 __ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset)); | |
| 688 __ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary); | |
| 689 | |
| 690 // We keep the map of the receiver in scratch1. | |
| 691 Register receiver_map = scratch1; | |
| 692 | |
| 693 // Load the map of the receiver, compute the keyed lookup cache hash | |
| 694 // based on 32 bits of the map pointer and the name hash. | |
| 695 __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
| 696 __ Mov(scratch2, Operand(receiver_map, ASR, KeyedLookupCache::kMapHashShift)); | |
| 697 __ Ldr(scratch3.W(), FieldMemOperand(key, Name::kHashFieldOffset)); | |
| 698 __ Eor(scratch2, scratch2, Operand(scratch3, ASR, Name::kHashShift)); | |
| 699 int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask; | |
| 700 __ And(scratch2, scratch2, mask); | |
| 701 | |
| 702 // Load the key (consisting of map and unique name) from the cache and | |
| 703 // check for match. | |
| 704 Label load_in_object_property; | |
| 705 static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket; | |
| 706 Label hit_on_nth_entry[kEntriesPerBucket]; | |
| 707 ExternalReference cache_keys = | |
| 708 ExternalReference::keyed_lookup_cache_keys(isolate); | |
| 709 | |
| 710 __ Mov(scratch3, Operand(cache_keys)); | |
| 711 __ Add(scratch3, scratch3, Operand(scratch2, LSL, kPointerSizeLog2 + 1)); | |
| 712 | |
| 713 for (int i = 0; i < kEntriesPerBucket - 1; i++) { | |
| 714 Label try_next_entry; | |
| 715 // Load map and make scratch3 pointing to the next entry. | |
| 716 __ Ldr(scratch4, MemOperand(scratch3, kPointerSize * 2, PostIndex)); | |
| 717 __ Cmp(receiver_map, scratch4); | |
| 718 __ B(ne, &try_next_entry); | |
| 719 __ Ldr(scratch4, MemOperand(scratch3, -kPointerSize)); // Load name | |
| 720 __ Cmp(key, scratch4); | |
| 721 __ B(eq, &hit_on_nth_entry[i]); | |
| 722 __ Bind(&try_next_entry); | |
| 723 } | |
| 724 | |
| 725 // Last entry. | |
| 726 __ Ldr(scratch4, MemOperand(scratch3, kPointerSize, PostIndex)); | |
| 727 __ Cmp(receiver_map, scratch4); | |
| 728 __ B(ne, slow); | |
| 729 __ Ldr(scratch4, MemOperand(scratch3)); | |
| 730 __ Cmp(key, scratch4); | |
| 731 __ B(ne, slow); | |
| 732 | |
| 733 // Get field offset. | |
| 734 ExternalReference cache_field_offsets = | |
| 735 ExternalReference::keyed_lookup_cache_field_offsets(isolate); | |
| 736 | |
| 737 // Hit on nth entry. | |
| 738 for (int i = kEntriesPerBucket - 1; i >= 0; i--) { | |
| 739 __ Bind(&hit_on_nth_entry[i]); | |
| 740 __ Mov(scratch3, Operand(cache_field_offsets)); | |
| 741 if (i != 0) { | |
| 742 __ Add(scratch2, scratch2, i); | |
| 743 } | |
| 744 __ Ldr(scratch4.W(), MemOperand(scratch3, scratch2, LSL, 2)); | |
| 745 __ Ldrb(scratch5, | |
| 746 FieldMemOperand(receiver_map, Map::kInObjectPropertiesOffset)); | |
| 747 __ Subs(scratch4, scratch4, scratch5); | |
| 748 __ B(ge, &property_array_property); | |
| 749 if (i != 0) { | |
| 750 __ B(&load_in_object_property); | |
| 751 } | |
| 752 } | |
| 753 | |
| 754 // Load in-object property. | |
| 755 __ Bind(&load_in_object_property); | |
| 756 __ Ldrb(scratch5, FieldMemOperand(receiver_map, Map::kInstanceSizeOffset)); | |
| 757 __ Add(scratch5, scratch5, scratch4); // Index from start of object. | |
| 758 __ Sub(receiver, receiver, kHeapObjectTag); // Remove the heap tag. | |
| 759 __ Ldr(result, MemOperand(receiver, scratch5, LSL, kPointerSizeLog2)); | |
| 760 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), | |
| 761 1, scratch1, scratch2); | |
| 762 __ Ret(); | |
| 763 | |
| 764 // Load property array property. | |
| 765 __ Bind(&property_array_property); | |
| 766 __ Ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | |
| 767 __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag); | |
| 768 __ Ldr(result, MemOperand(scratch1, scratch4, LSL, kPointerSizeLog2)); | |
| 769 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), | |
| 770 1, scratch1, scratch2); | |
| 771 __ Ret(); | |
| 772 | |
| 773 // Do a quick inline probe of the receiver's dictionary, if it exists. | |
| 774 __ Bind(&probe_dictionary); | |
| 775 __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
| 776 __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); | |
| 777 GenerateGlobalInstanceTypeCheck(masm, scratch1, slow); | |
| 778 // Load the property. | |
| 779 GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3); | |
| 780 __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), | |
| 781 1, scratch1, scratch2); | |
| 782 __ Ret(); | |
| 783 } | |
| 784 | |
| 785 | |
| 786 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { | |
| 787 // ---------- S t a t e -------------- | |
| 788 // -- lr : return address | |
| 789 // -- x0 : key | |
| 790 // -- x1 : receiver | |
| 791 // ----------------------------------- | |
| 792 Label slow, check_name, index_smi, index_name; | |
| 793 | |
| 794 Register key = x0; | |
| 795 Register receiver = x1; | |
| 796 | |
| 797 __ JumpIfNotSmi(key, &check_name); | |
| 798 __ Bind(&index_smi); | |
| 799 // Now the key is known to be a smi. This place is also jumped to from below | |
| 800 // where a numeric string is converted to a smi. | |
| 801 GenerateKeyedLoadWithSmiKey(masm, key, receiver, x2, x3, x4, x5, x6, &slow); | |
| 802 | |
| 803 // Slow case, key and receiver still in x0 and x1. | |
| 804 __ Bind(&slow); | |
| 805 __ IncrementCounter( | |
| 806 masm->isolate()->counters()->keyed_load_generic_slow(), 1, x2, x3); | |
| 807 GenerateRuntimeGetProperty(masm); | |
| 808 | |
| 809 __ Bind(&check_name); | |
| 810 GenerateKeyNameCheck(masm, key, x2, x3, &index_name, &slow); | |
| 811 | |
| 812 GenerateKeyedLoadWithNameKey(masm, key, receiver, x2, x3, x4, x5, x6, &slow); | |
| 813 | |
| 814 __ Bind(&index_name); | |
| 815 __ IndexFromHash(x3, key); | |
| 816 // Now jump to the place where smi keys are handled. | |
| 817 __ B(&index_smi); | |
| 818 } | |
| 819 | |
| 820 | |
| 821 void KeyedLoadIC::GenerateString(MacroAssembler* masm) { | |
| 822 // ---------- S t a t e -------------- | |
| 823 // -- lr : return address | |
| 824 // -- x0 : key (index) | |
| 825 // -- x1 : receiver | |
| 826 // ----------------------------------- | |
| 827 Label miss; | |
| 828 | |
| 829 Register index = x0; | |
| 830 Register receiver = x1; | |
| 831 Register result = x0; | |
| 832 Register scratch = x3; | |
| 833 | |
| 834 StringCharAtGenerator char_at_generator(receiver, | |
| 835 index, | |
| 836 scratch, | |
| 837 result, | |
| 838 &miss, // When not a string. | |
| 839 &miss, // When not a number. | |
| 840 &miss, // When index out of range. | |
| 841 STRING_INDEX_IS_ARRAY_INDEX); | |
| 842 char_at_generator.GenerateFast(masm); | |
| 843 __ Ret(); | |
| 844 | |
| 845 StubRuntimeCallHelper call_helper; | |
| 846 char_at_generator.GenerateSlow(masm, call_helper); | |
| 847 | |
| 848 __ Bind(&miss); | |
| 849 GenerateMiss(masm); | |
| 850 } | |
| 851 | |
| 852 | |
| 853 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { | |
| 854 // ---------- S t a t e -------------- | |
| 855 // -- lr : return address | |
| 856 // -- x0 : key | |
| 857 // -- x1 : receiver | |
| 858 // ----------------------------------- | |
| 859 Label slow; | |
| 860 Register key = x0; | |
| 861 Register receiver = x1; | |
| 862 | |
| 863 // Check that the receiver isn't a smi. | |
| 864 __ JumpIfSmi(receiver, &slow); | |
| 865 | |
| 866 // Check that the key is an array index, that is Uint32. | |
| 867 __ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow); | |
| 868 | |
| 869 // Get the map of the receiver. | |
| 870 Register map = x2; | |
| 871 __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
| 872 | |
| 873 // Check that it has indexed interceptor and access checks | |
| 874 // are not enabled for this object. | |
| 875 __ Ldrb(x3, FieldMemOperand(map, Map::kBitFieldOffset)); | |
| 876 ASSERT(kSlowCaseBitFieldMask == | |
| 877 ((1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor))); | |
| 878 __ Tbnz(x3, Map::kIsAccessCheckNeeded, &slow); | |
| 879 __ Tbz(x3, Map::kHasIndexedInterceptor, &slow); | |
| 880 | |
| 881 // Everything is fine, call runtime. | |
| 882 __ Push(receiver, key); | |
| 883 __ TailCallExternalReference( | |
| 884 ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor), | |
| 885 masm->isolate()), | |
| 886 2, | |
| 887 1); | |
| 888 | |
| 889 __ Bind(&slow); | |
| 890 GenerateMiss(masm); | |
| 891 } | |
| 892 | |
| 893 | |
| 894 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { | |
| 895 ASM_LOCATION("KeyedStoreIC::GenerateMiss"); | |
| 896 // ---------- S t a t e -------------- | |
| 897 // -- x0 : value | |
| 898 // -- x1 : key | |
| 899 // -- x2 : receiver | |
| 900 // -- lr : return address | |
| 901 // ----------------------------------- | |
| 902 | |
| 903 // Push receiver, key and value for runtime call. | |
| 904 __ Push(x2, x1, x0); | |
| 905 | |
| 906 ExternalReference ref = | |
| 907 ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate()); | |
| 908 __ TailCallExternalReference(ref, 3, 1); | |
| 909 } | |
| 910 | |
| 911 | |
| 912 void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) { | |
| 913 ASM_LOCATION("KeyedStoreIC::GenerateSlow"); | |
| 914 // ---------- S t a t e -------------- | |
| 915 // -- lr : return address | |
| 916 // -- x0 : value | |
| 917 // -- x1 : key | |
| 918 // -- x2 : receiver | |
| 919 // ----------------------------------- | |
| 920 | |
| 921 // Push receiver, key and value for runtime call. | |
| 922 __ Push(x2, x1, x0); | |
| 923 | |
| 924 // The slow case calls into the runtime to complete the store without causing | |
| 925 // an IC miss that would otherwise cause a transition to the generic stub. | |
| 926 ExternalReference ref = | |
| 927 ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate()); | |
| 928 __ TailCallExternalReference(ref, 3, 1); | |
| 929 } | |
| 930 | |
| 931 | |
| 932 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, | |
| 933 StrictModeFlag strict_mode) { | |
| 934 ASM_LOCATION("KeyedStoreIC::GenerateRuntimeSetProperty"); | |
| 935 // ---------- S t a t e -------------- | |
| 936 // -- x0 : value | |
| 937 // -- x1 : key | |
| 938 // -- x2 : receiver | |
| 939 // -- lr : return address | |
| 940 // ----------------------------------- | |
| 941 | |
| 942 // Push receiver, key and value for runtime call. | |
| 943 __ Push(x2, x1, x0); | |
| 944 | |
| 945 // Push PropertyAttributes(NONE) and strict_mode for runtime call. | |
| 946 STATIC_ASSERT(NONE == 0); | |
| 947 __ Mov(x10, Operand(Smi::FromInt(strict_mode))); | |
| 948 __ Push(xzr, x10); | |
| 949 | |
| 950 __ TailCallRuntime(Runtime::kSetProperty, 5, 1); | |
| 951 } | |
| 952 | |
| 953 | |
| 954 static void KeyedStoreGenerateGenericHelper( | |
| 955 MacroAssembler* masm, | |
| 956 Label* fast_object, | |
| 957 Label* fast_double, | |
| 958 Label* slow, | |
| 959 KeyedStoreCheckMap check_map, | |
| 960 KeyedStoreIncrementLength increment_length, | |
| 961 Register value, | |
| 962 Register key, | |
| 963 Register receiver, | |
| 964 Register receiver_map, | |
| 965 Register elements_map, | |
| 966 Register elements) { | |
| 967 ASSERT(!AreAliased( | |
| 968 value, key, receiver, receiver_map, elements_map, elements, x10, x11)); | |
| 969 | |
| 970 Label transition_smi_elements; | |
| 971 Label transition_double_elements; | |
| 972 Label fast_double_without_map_check; | |
| 973 Label non_double_value; | |
| 974 Label finish_store; | |
| 975 | |
| 976 __ Bind(fast_object); | |
| 977 if (check_map == kCheckMap) { | |
| 978 __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); | |
| 979 __ Cmp(elements_map, | |
| 980 Operand(masm->isolate()->factory()->fixed_array_map())); | |
| 981 __ B(ne, fast_double); | |
| 982 } | |
| 983 | |
| 984 // HOLECHECK: guards "A[i] = V" | |
| 985 // We have to go to the runtime if the current value is the hole because there | |
| 986 // may be a callback on the element. | |
| 987 Label holecheck_passed; | |
| 988 // TODO(all): This address calculation is repeated later (for the store | |
| 989 // itself). We should keep the result to avoid doing the work twice. | |
| 990 __ Add(x10, elements, FixedArray::kHeaderSize - kHeapObjectTag); | |
| 991 __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2)); | |
| 992 __ Ldr(x11, MemOperand(x10)); | |
| 993 __ JumpIfNotRoot(x11, Heap::kTheHoleValueRootIndex, &holecheck_passed); | |
| 994 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow); | |
| 995 __ bind(&holecheck_passed); | |
| 996 | |
| 997 // Smi stores don't require further checks. | |
| 998 __ JumpIfSmi(value, &finish_store); | |
| 999 | |
| 1000 // Escape to elements kind transition case. | |
| 1001 __ CheckFastObjectElements(receiver_map, x10, &transition_smi_elements); | |
| 1002 | |
| 1003 __ Bind(&finish_store); | |
| 1004 if (increment_length == kIncrementLength) { | |
| 1005 // Add 1 to receiver->length. | |
| 1006 __ Add(x10, key, Operand(Smi::FromInt(1))); | |
| 1007 __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset)); | |
| 1008 } | |
| 1009 | |
| 1010 Register address = x11; | |
| 1011 __ Add(address, elements, FixedArray::kHeaderSize - kHeapObjectTag); | |
| 1012 __ Add(address, address, Operand::UntagSmiAndScale(key, kPointerSizeLog2)); | |
| 1013 __ Str(value, MemOperand(address)); | |
| 1014 | |
| 1015 Label dont_record_write; | |
| 1016 __ JumpIfSmi(value, &dont_record_write); | |
| 1017 | |
| 1018 // Update write barrier for the elements array address. | |
| 1019 __ Mov(x10, value); // Preserve the value which is returned. | |
| 1020 __ RecordWrite(elements, | |
| 1021 address, | |
| 1022 x10, | |
| 1023 kLRHasNotBeenSaved, | |
| 1024 kDontSaveFPRegs, | |
| 1025 EMIT_REMEMBERED_SET, | |
| 1026 OMIT_SMI_CHECK); | |
| 1027 | |
| 1028 __ Bind(&dont_record_write); | |
| 1029 __ Ret(); | |
| 1030 | |
| 1031 | |
| 1032 __ Bind(fast_double); | |
| 1033 if (check_map == kCheckMap) { | |
| 1034 // Check for fast double array case. If this fails, call through to the | |
| 1035 // runtime. | |
| 1036 __ JumpIfNotRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex, slow); | |
| 1037 } | |
| 1038 | |
| 1039 // HOLECHECK: guards "A[i] double hole?" | |
| 1040 // We have to see if the double version of the hole is present. If so go to | |
| 1041 // the runtime. | |
| 1042 // TODO(all): This address calculation was done earlier. We should keep the | |
| 1043 // result to avoid doing the work twice. | |
| 1044 __ Add(x10, elements, FixedDoubleArray::kHeaderSize - kHeapObjectTag); | |
| 1045 __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2)); | |
| 1046 __ Ldr(x11, MemOperand(x10)); | |
| 1047 __ CompareAndBranch(x11, kHoleNanInt64, ne, &fast_double_without_map_check); | |
| 1048 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow); | |
| 1049 | |
| 1050 __ Bind(&fast_double_without_map_check); | |
| 1051 __ StoreNumberToDoubleElements(value, | |
| 1052 key, | |
| 1053 elements, | |
| 1054 x10, | |
| 1055 d0, | |
| 1056 d1, | |
| 1057 &transition_double_elements); | |
| 1058 if (increment_length == kIncrementLength) { | |
| 1059 // Add 1 to receiver->length. | |
| 1060 __ Add(x10, key, Operand(Smi::FromInt(1))); | |
| 1061 __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset)); | |
| 1062 } | |
| 1063 __ Ret(); | |
| 1064 | |
| 1065 | |
| 1066 __ Bind(&transition_smi_elements); | |
| 1067 // Transition the array appropriately depending on the value type. | |
| 1068 __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset)); | |
| 1069 __ JumpIfNotRoot(x10, Heap::kHeapNumberMapRootIndex, &non_double_value); | |
| 1070 | |
| 1071 // Value is a double. Transition FAST_SMI_ELEMENTS -> | |
| 1072 // FAST_DOUBLE_ELEMENTS and complete the store. | |
| 1073 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, | |
| 1074 FAST_DOUBLE_ELEMENTS, | |
| 1075 receiver_map, | |
| 1076 x10, | |
| 1077 slow); | |
| 1078 ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3. | |
| 1079 AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, | |
| 1080 FAST_DOUBLE_ELEMENTS); | |
| 1081 ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow); | |
| 1082 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
| 1083 __ B(&fast_double_without_map_check); | |
| 1084 | |
| 1085 __ Bind(&non_double_value); | |
| 1086 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS. | |
| 1087 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, | |
| 1088 FAST_ELEMENTS, | |
| 1089 receiver_map, | |
| 1090 x10, | |
| 1091 slow); | |
| 1092 ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3. | |
| 1093 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); | |
| 1094 ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode, | |
| 1095 slow); | |
| 1096 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
| 1097 __ B(&finish_store); | |
| 1098 | |
| 1099 __ Bind(&transition_double_elements); | |
| 1100 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a | |
| 1101 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and | |
| 1102 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS | |
| 1103 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, | |
| 1104 FAST_ELEMENTS, | |
| 1105 receiver_map, | |
| 1106 x10, | |
| 1107 slow); | |
| 1108 ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3. | |
| 1109 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); | |
| 1110 ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow); | |
| 1111 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
| 1112 __ B(&finish_store); | |
| 1113 } | |
| 1114 | |
| 1115 | |
| 1116 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, | |
| 1117 StrictModeFlag strict_mode) { | |
| 1118 ASM_LOCATION("KeyedStoreIC::GenerateGeneric"); | |
| 1119 // ---------- S t a t e -------------- | |
| 1120 // -- x0 : value | |
| 1121 // -- x1 : key | |
| 1122 // -- x2 : receiver | |
| 1123 // -- lr : return address | |
| 1124 // ----------------------------------- | |
| 1125 Label slow; | |
| 1126 Label array; | |
| 1127 Label fast_object; | |
| 1128 Label extra; | |
| 1129 Label fast_object_grow; | |
| 1130 Label fast_double_grow; | |
| 1131 Label fast_double; | |
| 1132 | |
| 1133 Register value = x0; | |
| 1134 Register key = x1; | |
| 1135 Register receiver = x2; | |
| 1136 Register receiver_map = x3; | |
| 1137 Register elements = x4; | |
| 1138 Register elements_map = x5; | |
| 1139 | |
| 1140 __ JumpIfNotSmi(key, &slow); | |
| 1141 __ JumpIfSmi(receiver, &slow); | |
| 1142 __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
| 1143 | |
| 1144 // Check that the receiver does not require access checks and is not observed. | |
| 1145 // The generic stub does not perform map checks or handle observed objects. | |
| 1146 __ Ldrb(x10, FieldMemOperand(receiver_map, Map::kBitFieldOffset)); | |
| 1147 __ TestAndBranchIfAnySet( | |
| 1148 x10, (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kIsObserved), &slow); | |
| 1149 | |
| 1150 // Check if the object is a JS array or not. | |
| 1151 Register instance_type = x10; | |
| 1152 __ CompareInstanceType(receiver_map, instance_type, JS_ARRAY_TYPE); | |
| 1153 __ B(eq, &array); | |
| 1154 // Check that the object is some kind of JSObject. | |
| 1155 __ Cmp(instance_type, FIRST_JS_OBJECT_TYPE); | |
| 1156 __ B(lt, &slow); | |
| 1157 | |
| 1158 // Object case: Check key against length in the elements array. | |
| 1159 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
| 1160 // Check array bounds. Both the key and the length of FixedArray are smis. | |
| 1161 __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset)); | |
| 1162 __ Cmp(x10, Operand::UntagSmi(key)); | |
| 1163 __ B(hi, &fast_object); | |
| 1164 | |
| 1165 | |
| 1166 __ Bind(&slow); | |
| 1167 // Slow case, handle jump to runtime. | |
| 1168 // Live values: | |
| 1169 // x0: value | |
| 1170 // x1: key | |
| 1171 // x2: receiver | |
| 1172 GenerateRuntimeSetProperty(masm, strict_mode); | |
| 1173 | |
| 1174 | |
| 1175 __ Bind(&extra); | |
| 1176 // Extra capacity case: Check if there is extra capacity to | |
| 1177 // perform the store and update the length. Used for adding one | |
| 1178 // element to the array by writing to array[array.length]. | |
| 1179 | |
| 1180 // Check for room in the elements backing store. | |
| 1181 // Both the key and the length of FixedArray are smis. | |
| 1182 __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset)); | |
| 1183 __ Cmp(x10, Operand::UntagSmi(key)); | |
| 1184 __ B(ls, &slow); | |
| 1185 | |
| 1186 __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); | |
| 1187 __ Cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map())); | |
| 1188 __ B(eq, &fast_object_grow); | |
| 1189 __ Cmp(elements_map, | |
| 1190 Operand(masm->isolate()->factory()->fixed_double_array_map())); | |
| 1191 __ B(eq, &fast_double_grow); | |
| 1192 __ B(&slow); | |
| 1193 | |
| 1194 | |
| 1195 __ Bind(&array); | |
| 1196 // Array case: Get the length and the elements array from the JS | |
| 1197 // array. Check that the array is in fast mode (and writable); if it | |
| 1198 // is the length is always a smi. | |
| 1199 | |
| 1200 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
| 1201 | |
| 1202 // Check the key against the length in the array. | |
| 1203 __ Ldrsw(x10, UntagSmiFieldMemOperand(receiver, JSArray::kLengthOffset)); | |
| 1204 __ Cmp(x10, Operand::UntagSmi(key)); | |
| 1205 __ B(eq, &extra); // We can handle the case where we are appending 1 element. | |
| 1206 __ B(lo, &slow); | |
| 1207 | |
| 1208 KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, | |
| 1209 &slow, kCheckMap, kDontIncrementLength, | |
| 1210 value, key, receiver, receiver_map, | |
| 1211 elements_map, elements); | |
| 1212 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow, | |
| 1213 &slow, kDontCheckMap, kIncrementLength, | |
| 1214 value, key, receiver, receiver_map, | |
| 1215 elements_map, elements); | |
| 1216 } | |
| 1217 | |
| 1218 | |
| 1219 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { | |
| 1220 // ----------- S t a t e ------------- | |
| 1221 // -- x0 : value | |
| 1222 // -- x1 : receiver | |
| 1223 // -- x2 : name | |
| 1224 // -- lr : return address | |
| 1225 // ----------------------------------- | |
| 1226 | |
| 1227 // Probe the stub cache. | |
| 1228 Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC); | |
| 1229 masm->isolate()->stub_cache()->GenerateProbe( | |
| 1230 masm, flags, x1, x2, x3, x4, x5, x6); | |
| 1231 | |
| 1232 // Cache miss: Jump to runtime. | |
| 1233 GenerateMiss(masm); | |
| 1234 } | |
| 1235 | |
| 1236 | |
| 1237 void StoreIC::GenerateMiss(MacroAssembler* masm) { | |
| 1238 // ----------- S t a t e ------------- | |
| 1239 // -- x0 : value | |
| 1240 // -- x1 : receiver | |
| 1241 // -- x2 : name | |
| 1242 // -- lr : return address | |
| 1243 // ----------------------------------- | |
| 1244 | |
| 1245 __ Push(x1, x2, x0); | |
| 1246 | |
| 1247 // Tail call to the entry. | |
| 1248 ExternalReference ref = | |
| 1249 ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate()); | |
| 1250 __ TailCallExternalReference(ref, 3, 1); | |
| 1251 } | |
| 1252 | |
| 1253 | |
| 1254 void StoreIC::GenerateNormal(MacroAssembler* masm) { | |
| 1255 // ----------- S t a t e ------------- | |
| 1256 // -- x0 : value | |
| 1257 // -- x1 : receiver | |
| 1258 // -- x2 : name | |
| 1259 // -- lr : return address | |
| 1260 // ----------------------------------- | |
| 1261 Label miss; | |
| 1262 Register value = x0; | |
| 1263 Register receiver = x1; | |
| 1264 Register name = x2; | |
| 1265 Register dictionary = x3; | |
| 1266 | |
| 1267 GenerateNameDictionaryReceiverCheck( | |
| 1268 masm, receiver, dictionary, x4, x5, &miss); | |
| 1269 | |
| 1270 GenerateDictionaryStore(masm, &miss, dictionary, name, value, x4, x5); | |
| 1271 Counters* counters = masm->isolate()->counters(); | |
| 1272 __ IncrementCounter(counters->store_normal_hit(), 1, x4, x5); | |
| 1273 __ Ret(); | |
| 1274 | |
| 1275 // Cache miss: Jump to runtime. | |
| 1276 __ Bind(&miss); | |
| 1277 __ IncrementCounter(counters->store_normal_miss(), 1, x4, x5); | |
| 1278 GenerateMiss(masm); | |
| 1279 } | |
| 1280 | |
| 1281 | |
| 1282 void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, | |
| 1283 StrictModeFlag strict_mode) { | |
| 1284 ASM_LOCATION("StoreIC::GenerateRuntimeSetProperty"); | |
| 1285 // ----------- S t a t e ------------- | |
| 1286 // -- x0 : value | |
| 1287 // -- x1 : receiver | |
| 1288 // -- x2 : name | |
| 1289 // -- lr : return address | |
| 1290 // ----------------------------------- | |
| 1291 | |
| 1292 __ Push(x1, x2, x0); | |
| 1293 | |
| 1294 __ Mov(x11, Operand(Smi::FromInt(NONE))); // PropertyAttributes | |
| 1295 __ Mov(x10, Operand(Smi::FromInt(strict_mode))); | |
| 1296 __ Push(x11, x10); | |
| 1297 | |
| 1298 // Do tail-call to runtime routine. | |
| 1299 __ TailCallRuntime(Runtime::kSetProperty, 5, 1); | |
| 1300 } | |
| 1301 | |
| 1302 | |
| 1303 void StoreIC::GenerateSlow(MacroAssembler* masm) { | |
| 1304 // ---------- S t a t e -------------- | |
| 1305 // -- x0 : value | |
| 1306 // -- x1 : receiver | |
| 1307 // -- x2 : name | |
| 1308 // -- lr : return address | |
| 1309 // ----------------------------------- | |
| 1310 | |
| 1311 // Push receiver, name and value for runtime call. | |
| 1312 __ Push(x1, x2, x0); | |
| 1313 | |
| 1314 // The slow case calls into the runtime to complete the store without causing | |
| 1315 // an IC miss that would otherwise cause a transition to the generic stub. | |
| 1316 ExternalReference ref = | |
| 1317 ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate()); | |
| 1318 __ TailCallExternalReference(ref, 3, 1); | |
| 1319 } | |
| 1320 | |
| 1321 | |
| 1322 Condition CompareIC::ComputeCondition(Token::Value op) { | |
| 1323 switch (op) { | |
| 1324 case Token::EQ_STRICT: | |
| 1325 case Token::EQ: | |
| 1326 return eq; | |
| 1327 case Token::LT: | |
| 1328 return lt; | |
| 1329 case Token::GT: | |
| 1330 return gt; | |
| 1331 case Token::LTE: | |
| 1332 return le; | |
| 1333 case Token::GTE: | |
| 1334 return ge; | |
| 1335 default: | |
| 1336 UNREACHABLE(); | |
| 1337 return al; | |
| 1338 } | |
| 1339 } | |
| 1340 | |
| 1341 | |
| 1342 bool CompareIC::HasInlinedSmiCode(Address address) { | |
| 1343 // The address of the instruction following the call. | |
| 1344 Address info_address = | |
| 1345 Assembler::return_address_from_call_start(address); | |
| 1346 | |
| 1347 InstructionSequence* patch_info = InstructionSequence::At(info_address); | |
| 1348 return patch_info->IsInlineData(); | |
| 1349 } | |
| 1350 | |
| 1351 | |
| 1352 // Activate a SMI fast-path by patching the instructions generated by | |
| 1353 // JumpPatchSite::EmitJumpIf(Not)Smi(), using the information encoded by | |
| 1354 // JumpPatchSite::EmitPatchInfo(). | |
| 1355 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { | |
| 1356 // The patch information is encoded in the instruction stream using | |
| 1357 // instructions which have no side effects, so we can safely execute them. | |
| 1358 // The patch information is encoded directly after the call to the helper | |
| 1359 // function which is requesting this patch operation. | |
| 1360 Address info_address = | |
| 1361 Assembler::return_address_from_call_start(address); | |
| 1362 InlineSmiCheckInfo info(info_address); | |
| 1363 | |
| 1364 // Check and decode the patch information instruction. | |
| 1365 if (!info.HasSmiCheck()) { | |
| 1366 return; | |
| 1367 } | |
| 1368 | |
| 1369 if (FLAG_trace_ic) { | |
| 1370 PrintF("[ Patching ic at %p, marker=%p, SMI check=%p\n", | |
| 1371 address, info_address, reinterpret_cast<void*>(info.SmiCheck())); | |
| 1372 } | |
| 1373 | |
| 1374 // Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi() | |
| 1375 // and JumpPatchSite::EmitJumpIfSmi(). | |
| 1376 // Changing | |
| 1377 // tb(n)z xzr, #0, <target> | |
| 1378 // to | |
| 1379 // tb(!n)z test_reg, #0, <target> | |
| 1380 Instruction* to_patch = info.SmiCheck(); | |
| 1381 PatchingAssembler patcher(to_patch, 1); | |
| 1382 ASSERT(to_patch->IsTestBranch()); | |
| 1383 ASSERT(to_patch->ImmTestBranchBit5() == 0); | |
| 1384 ASSERT(to_patch->ImmTestBranchBit40() == 0); | |
| 1385 | |
| 1386 STATIC_ASSERT(kSmiTag == 0); | |
| 1387 STATIC_ASSERT(kSmiTagMask == 1); | |
| 1388 | |
| 1389 int branch_imm = to_patch->ImmTestBranch(); | |
| 1390 Register smi_reg; | |
| 1391 if (check == ENABLE_INLINED_SMI_CHECK) { | |
| 1392 ASSERT(to_patch->Rt() == xzr.code()); | |
| 1393 smi_reg = info.SmiRegister(); | |
| 1394 } else { | |
| 1395 ASSERT(check == DISABLE_INLINED_SMI_CHECK); | |
| 1396 ASSERT(to_patch->Rt() != xzr.code()); | |
| 1397 smi_reg = xzr; | |
| 1398 } | |
| 1399 | |
| 1400 if (to_patch->Mask(TestBranchMask) == TBZ) { | |
| 1401 // This is JumpIfNotSmi(smi_reg, branch_imm). | |
| 1402 patcher.tbnz(smi_reg, 0, branch_imm); | |
| 1403 } else { | |
| 1404 ASSERT(to_patch->Mask(TestBranchMask) == TBNZ); | |
| 1405 // This is JumpIfSmi(smi_reg, branch_imm). | |
| 1406 patcher.tbz(smi_reg, 0, branch_imm); | |
| 1407 } | |
| 1408 } | |
| 1409 | |
| 1410 | |
| 1411 } } // namespace v8::internal | |
| 1412 | |
| 1413 #endif // V8_TARGET_ARCH_A64 | |
| OLD | NEW |