| OLD | NEW |
| (Empty) |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 | |
| 6 | |
| 7 #include "src/v8.h" | |
| 8 | |
| 9 #if V8_TARGET_ARCH_MIPS | |
| 10 | |
| 11 #include "src/code-stubs.h" | |
| 12 #include "src/codegen.h" | |
| 13 #include "src/ic-inl.h" | |
| 14 #include "src/runtime.h" | |
| 15 #include "src/stub-cache.h" | |
| 16 | |
| 17 namespace v8 { | |
| 18 namespace internal { | |
| 19 | |
| 20 | |
| 21 // ---------------------------------------------------------------------------- | |
| 22 // Static IC stub generators. | |
| 23 // | |
| 24 | |
| 25 #define __ ACCESS_MASM(masm) | |
| 26 | |
| 27 | |
| 28 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, | |
| 29 Register type, | |
| 30 Label* global_object) { | |
| 31 // Register usage: | |
| 32 // type: holds the receiver instance type on entry. | |
| 33 __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE)); | |
| 34 __ Branch(global_object, eq, type, Operand(JS_BUILTINS_OBJECT_TYPE)); | |
| 35 __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE)); | |
| 36 } | |
| 37 | |
| 38 | |
| 39 // Helper function used from LoadIC GenerateNormal. | |
| 40 // | |
| 41 // elements: Property dictionary. It is not clobbered if a jump to the miss | |
| 42 // label is done. | |
| 43 // name: Property name. It is not clobbered if a jump to the miss label is | |
| 44 // done | |
| 45 // result: Register for the result. It is only updated if a jump to the miss | |
| 46 // label is not done. Can be the same as elements or name clobbering | |
| 47 // one of these in the case of not jumping to the miss label. | |
| 48 // The two scratch registers need to be different from elements, name and | |
| 49 // result. | |
| 50 // The generated code assumes that the receiver has slow properties, | |
| 51 // is not a global object and does not have interceptors. | |
| 52 // The address returned from GenerateStringDictionaryProbes() in scratch2 | |
| 53 // is used. | |
| 54 static void GenerateDictionaryLoad(MacroAssembler* masm, | |
| 55 Label* miss, | |
| 56 Register elements, | |
| 57 Register name, | |
| 58 Register result, | |
| 59 Register scratch1, | |
| 60 Register scratch2) { | |
| 61 // Main use of the scratch registers. | |
| 62 // scratch1: Used as temporary and to hold the capacity of the property | |
| 63 // dictionary. | |
| 64 // scratch2: Used as temporary. | |
| 65 Label done; | |
| 66 | |
| 67 // Probe the dictionary. | |
| 68 NameDictionaryLookupStub::GeneratePositiveLookup(masm, | |
| 69 miss, | |
| 70 &done, | |
| 71 elements, | |
| 72 name, | |
| 73 scratch1, | |
| 74 scratch2); | |
| 75 | |
| 76 // If probing finds an entry check that the value is a normal | |
| 77 // property. | |
| 78 __ bind(&done); // scratch2 == elements + 4 * index. | |
| 79 const int kElementsStartOffset = NameDictionary::kHeaderSize + | |
| 80 NameDictionary::kElementsStartIndex * kPointerSize; | |
| 81 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; | |
| 82 __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); | |
| 83 __ And(at, | |
| 84 scratch1, | |
| 85 Operand(PropertyDetails::TypeField::kMask << kSmiTagSize)); | |
| 86 __ Branch(miss, ne, at, Operand(zero_reg)); | |
| 87 | |
| 88 // Get the value at the masked, scaled index and return. | |
| 89 __ lw(result, | |
| 90 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize)); | |
| 91 } | |
| 92 | |
| 93 | |
| 94 // Helper function used from StoreIC::GenerateNormal. | |
| 95 // | |
| 96 // elements: Property dictionary. It is not clobbered if a jump to the miss | |
| 97 // label is done. | |
| 98 // name: Property name. It is not clobbered if a jump to the miss label is | |
| 99 // done | |
| 100 // value: The value to store. | |
| 101 // The two scratch registers need to be different from elements, name and | |
| 102 // result. | |
| 103 // The generated code assumes that the receiver has slow properties, | |
| 104 // is not a global object and does not have interceptors. | |
| 105 // The address returned from GenerateStringDictionaryProbes() in scratch2 | |
| 106 // is used. | |
| 107 static void GenerateDictionaryStore(MacroAssembler* masm, | |
| 108 Label* miss, | |
| 109 Register elements, | |
| 110 Register name, | |
| 111 Register value, | |
| 112 Register scratch1, | |
| 113 Register scratch2) { | |
| 114 // Main use of the scratch registers. | |
| 115 // scratch1: Used as temporary and to hold the capacity of the property | |
| 116 // dictionary. | |
| 117 // scratch2: Used as temporary. | |
| 118 Label done; | |
| 119 | |
| 120 // Probe the dictionary. | |
| 121 NameDictionaryLookupStub::GeneratePositiveLookup(masm, | |
| 122 miss, | |
| 123 &done, | |
| 124 elements, | |
| 125 name, | |
| 126 scratch1, | |
| 127 scratch2); | |
| 128 | |
| 129 // If probing finds an entry in the dictionary check that the value | |
| 130 // is a normal property that is not read only. | |
| 131 __ bind(&done); // scratch2 == elements + 4 * index. | |
| 132 const int kElementsStartOffset = NameDictionary::kHeaderSize + | |
| 133 NameDictionary::kElementsStartIndex * kPointerSize; | |
| 134 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; | |
| 135 const int kTypeAndReadOnlyMask = | |
| 136 (PropertyDetails::TypeField::kMask | | |
| 137 PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize; | |
| 138 __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); | |
| 139 __ And(at, scratch1, Operand(kTypeAndReadOnlyMask)); | |
| 140 __ Branch(miss, ne, at, Operand(zero_reg)); | |
| 141 | |
| 142 // Store the value at the masked, scaled index and return. | |
| 143 const int kValueOffset = kElementsStartOffset + kPointerSize; | |
| 144 __ Addu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag)); | |
| 145 __ sw(value, MemOperand(scratch2)); | |
| 146 | |
| 147 // Update the write barrier. Make sure not to clobber the value. | |
| 148 __ mov(scratch1, value); | |
| 149 __ RecordWrite( | |
| 150 elements, scratch2, scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs); | |
| 151 } | |
| 152 | |
| 153 | |
| 154 // Checks the receiver for special cases (value type, slow case bits). | |
| 155 // Falls through for regular JS object. | |
| 156 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, | |
| 157 Register receiver, | |
| 158 Register map, | |
| 159 Register scratch, | |
| 160 int interceptor_bit, | |
| 161 Label* slow) { | |
| 162 // Check that the object isn't a smi. | |
| 163 __ JumpIfSmi(receiver, slow); | |
| 164 // Get the map of the receiver. | |
| 165 __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
| 166 // Check bit field. | |
| 167 __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); | |
| 168 __ And(at, scratch, | |
| 169 Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit))); | |
| 170 __ Branch(slow, ne, at, Operand(zero_reg)); | |
| 171 // Check that the object is some kind of JS object EXCEPT JS Value type. | |
| 172 // In the case that the object is a value-wrapper object, | |
| 173 // we enter the runtime system to make sure that indexing into string | |
| 174 // objects work as intended. | |
| 175 DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE); | |
| 176 __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); | |
| 177 __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE)); | |
| 178 } | |
| 179 | |
| 180 | |
| 181 // Loads an indexed element from a fast case array. | |
| 182 // If not_fast_array is NULL, doesn't perform the elements map check. | |
| 183 static void GenerateFastArrayLoad(MacroAssembler* masm, | |
| 184 Register receiver, | |
| 185 Register key, | |
| 186 Register elements, | |
| 187 Register scratch1, | |
| 188 Register scratch2, | |
| 189 Register result, | |
| 190 Label* not_fast_array, | |
| 191 Label* out_of_range) { | |
| 192 // Register use: | |
| 193 // | |
| 194 // receiver - holds the receiver on entry. | |
| 195 // Unchanged unless 'result' is the same register. | |
| 196 // | |
| 197 // key - holds the smi key on entry. | |
| 198 // Unchanged unless 'result' is the same register. | |
| 199 // | |
| 200 // elements - holds the elements of the receiver on exit. | |
| 201 // | |
| 202 // result - holds the result on exit if the load succeeded. | |
| 203 // Allowed to be the the same as 'receiver' or 'key'. | |
| 204 // Unchanged on bailout so 'receiver' and 'key' can be safely | |
| 205 // used by further computation. | |
| 206 // | |
| 207 // Scratch registers: | |
| 208 // | |
| 209 // scratch1 - used to hold elements map and elements length. | |
| 210 // Holds the elements map if not_fast_array branch is taken. | |
| 211 // | |
| 212 // scratch2 - used to hold the loaded value. | |
| 213 | |
| 214 __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
| 215 if (not_fast_array != NULL) { | |
| 216 // Check that the object is in fast mode (not dictionary). | |
| 217 __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset)); | |
| 218 __ LoadRoot(at, Heap::kFixedArrayMapRootIndex); | |
| 219 __ Branch(not_fast_array, ne, scratch1, Operand(at)); | |
| 220 } else { | |
| 221 __ AssertFastElements(elements); | |
| 222 } | |
| 223 | |
| 224 // Check that the key (index) is within bounds. | |
| 225 __ lw(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset)); | |
| 226 __ Branch(out_of_range, hs, key, Operand(scratch1)); | |
| 227 | |
| 228 // Fast case: Do the load. | |
| 229 __ Addu(scratch1, elements, | |
| 230 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | |
| 231 // The key is a smi. | |
| 232 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); | |
| 233 __ sll(at, key, kPointerSizeLog2 - kSmiTagSize); | |
| 234 __ addu(at, at, scratch1); | |
| 235 __ lw(scratch2, MemOperand(at)); | |
| 236 | |
| 237 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | |
| 238 // In case the loaded value is the_hole we have to consult GetProperty | |
| 239 // to ensure the prototype chain is searched. | |
| 240 __ Branch(out_of_range, eq, scratch2, Operand(at)); | |
| 241 __ mov(result, scratch2); | |
| 242 } | |
| 243 | |
| 244 | |
| 245 // Checks whether a key is an array index string or a unique name. | |
| 246 // Falls through if a key is a unique name. | |
| 247 static void GenerateKeyNameCheck(MacroAssembler* masm, | |
| 248 Register key, | |
| 249 Register map, | |
| 250 Register hash, | |
| 251 Label* index_string, | |
| 252 Label* not_unique) { | |
| 253 // The key is not a smi. | |
| 254 Label unique; | |
| 255 // Is it a name? | |
| 256 __ GetObjectType(key, map, hash); | |
| 257 __ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE)); | |
| 258 STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE); | |
| 259 __ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE)); | |
| 260 | |
| 261 // Is the string an array index, with cached numeric value? | |
| 262 __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset)); | |
| 263 __ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask)); | |
| 264 __ Branch(index_string, eq, at, Operand(zero_reg)); | |
| 265 | |
| 266 // Is the string internalized? We know it's a string, so a single | |
| 267 // bit test is enough. | |
| 268 // map: key map | |
| 269 __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset)); | |
| 270 STATIC_ASSERT(kInternalizedTag == 0); | |
| 271 __ And(at, hash, Operand(kIsNotInternalizedMask)); | |
| 272 __ Branch(not_unique, ne, at, Operand(zero_reg)); | |
| 273 | |
| 274 __ bind(&unique); | |
| 275 } | |
| 276 | |
| 277 | |
| 278 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { | |
| 279 // The return address is in lr. | |
| 280 Register receiver = ReceiverRegister(); | |
| 281 Register name = NameRegister(); | |
| 282 DCHECK(receiver.is(a1)); | |
| 283 DCHECK(name.is(a2)); | |
| 284 | |
| 285 // Probe the stub cache. | |
| 286 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( | |
| 287 Code::ComputeHandlerFlags(Code::LOAD_IC)); | |
| 288 masm->isolate()->stub_cache()->GenerateProbe( | |
| 289 masm, flags, receiver, name, a3, t0, t1, t2); | |
| 290 | |
| 291 // Cache miss: Jump to runtime. | |
| 292 GenerateMiss(masm); | |
| 293 } | |
| 294 | |
| 295 | |
| 296 void LoadIC::GenerateNormal(MacroAssembler* masm) { | |
| 297 Register dictionary = a0; | |
| 298 DCHECK(!dictionary.is(ReceiverRegister())); | |
| 299 DCHECK(!dictionary.is(NameRegister())); | |
| 300 | |
| 301 Label slow; | |
| 302 | |
| 303 __ lw(dictionary, | |
| 304 FieldMemOperand(ReceiverRegister(), JSObject::kPropertiesOffset)); | |
| 305 GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), v0, a3, t0); | |
| 306 __ Ret(); | |
| 307 | |
| 308 // Dictionary load failed, go slow (but don't miss). | |
| 309 __ bind(&slow); | |
| 310 GenerateRuntimeGetProperty(masm); | |
| 311 } | |
| 312 | |
| 313 | |
| 314 // A register that isn't one of the parameters to the load ic. | |
| 315 static const Register LoadIC_TempRegister() { return a3; } | |
| 316 | |
| 317 | |
| 318 void LoadIC::GenerateMiss(MacroAssembler* masm) { | |
| 319 // The return address is in ra. | |
| 320 Isolate* isolate = masm->isolate(); | |
| 321 | |
| 322 __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0); | |
| 323 | |
| 324 __ mov(LoadIC_TempRegister(), ReceiverRegister()); | |
| 325 __ Push(LoadIC_TempRegister(), NameRegister()); | |
| 326 | |
| 327 // Perform tail call to the entry. | |
| 328 ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate); | |
| 329 __ TailCallExternalReference(ref, 2, 1); | |
| 330 } | |
| 331 | |
| 332 | |
| 333 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { | |
| 334 // The return address is in ra. | |
| 335 | |
| 336 __ mov(LoadIC_TempRegister(), ReceiverRegister()); | |
| 337 __ Push(LoadIC_TempRegister(), NameRegister()); | |
| 338 | |
| 339 __ TailCallRuntime(Runtime::kGetProperty, 2, 1); | |
| 340 } | |
| 341 | |
| 342 | |
| 343 static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm, | |
| 344 Register object, | |
| 345 Register key, | |
| 346 Register scratch1, | |
| 347 Register scratch2, | |
| 348 Register scratch3, | |
| 349 Label* unmapped_case, | |
| 350 Label* slow_case) { | |
| 351 Heap* heap = masm->isolate()->heap(); | |
| 352 | |
| 353 // Check that the receiver is a JSObject. Because of the map check | |
| 354 // later, we do not need to check for interceptors or whether it | |
| 355 // requires access checks. | |
| 356 __ JumpIfSmi(object, slow_case); | |
| 357 // Check that the object is some kind of JSObject. | |
| 358 __ GetObjectType(object, scratch1, scratch2); | |
| 359 __ Branch(slow_case, lt, scratch2, Operand(FIRST_JS_RECEIVER_TYPE)); | |
| 360 | |
| 361 // Check that the key is a positive smi. | |
| 362 __ And(scratch1, key, Operand(0x80000001)); | |
| 363 __ Branch(slow_case, ne, scratch1, Operand(zero_reg)); | |
| 364 | |
| 365 // Load the elements into scratch1 and check its map. | |
| 366 Handle<Map> arguments_map(heap->sloppy_arguments_elements_map()); | |
| 367 __ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset)); | |
| 368 __ CheckMap(scratch1, | |
| 369 scratch2, | |
| 370 arguments_map, | |
| 371 slow_case, | |
| 372 DONT_DO_SMI_CHECK); | |
| 373 // Check if element is in the range of mapped arguments. If not, jump | |
| 374 // to the unmapped lookup with the parameter map in scratch1. | |
| 375 __ lw(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset)); | |
| 376 __ Subu(scratch2, scratch2, Operand(Smi::FromInt(2))); | |
| 377 __ Branch(unmapped_case, Ugreater_equal, key, Operand(scratch2)); | |
| 378 | |
| 379 // Load element index and check whether it is the hole. | |
| 380 const int kOffset = | |
| 381 FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag; | |
| 382 | |
| 383 __ li(scratch3, Operand(kPointerSize >> 1)); | |
| 384 __ Mul(scratch3, key, scratch3); | |
| 385 __ Addu(scratch3, scratch3, Operand(kOffset)); | |
| 386 | |
| 387 __ Addu(scratch2, scratch1, scratch3); | |
| 388 __ lw(scratch2, MemOperand(scratch2)); | |
| 389 __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex); | |
| 390 __ Branch(unmapped_case, eq, scratch2, Operand(scratch3)); | |
| 391 | |
| 392 // Load value from context and return it. We can reuse scratch1 because | |
| 393 // we do not jump to the unmapped lookup (which requires the parameter | |
| 394 // map in scratch1). | |
| 395 __ lw(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); | |
| 396 __ li(scratch3, Operand(kPointerSize >> 1)); | |
| 397 __ Mul(scratch3, scratch2, scratch3); | |
| 398 __ Addu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag)); | |
| 399 __ Addu(scratch2, scratch1, scratch3); | |
| 400 return MemOperand(scratch2); | |
| 401 } | |
| 402 | |
| 403 | |
| 404 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, | |
| 405 Register key, | |
| 406 Register parameter_map, | |
| 407 Register scratch, | |
| 408 Label* slow_case) { | |
| 409 // Element is in arguments backing store, which is referenced by the | |
| 410 // second element of the parameter_map. The parameter_map register | |
| 411 // must be loaded with the parameter map of the arguments object and is | |
| 412 // overwritten. | |
| 413 const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize; | |
| 414 Register backing_store = parameter_map; | |
| 415 __ lw(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset)); | |
| 416 __ CheckMap(backing_store, | |
| 417 scratch, | |
| 418 Heap::kFixedArrayMapRootIndex, | |
| 419 slow_case, | |
| 420 DONT_DO_SMI_CHECK); | |
| 421 __ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset)); | |
| 422 __ Branch(slow_case, Ugreater_equal, key, Operand(scratch)); | |
| 423 __ li(scratch, Operand(kPointerSize >> 1)); | |
| 424 __ Mul(scratch, key, scratch); | |
| 425 __ Addu(scratch, | |
| 426 scratch, | |
| 427 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | |
| 428 __ Addu(scratch, backing_store, scratch); | |
| 429 return MemOperand(scratch); | |
| 430 } | |
| 431 | |
| 432 | |
| 433 void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) { | |
| 434 // The return address is in ra. | |
| 435 Register receiver = ReceiverRegister(); | |
| 436 Register key = NameRegister(); | |
| 437 DCHECK(receiver.is(a1)); | |
| 438 DCHECK(key.is(a2)); | |
| 439 | |
| 440 Label slow, notin; | |
| 441 MemOperand mapped_location = | |
| 442 GenerateMappedArgumentsLookup( | |
| 443 masm, receiver, key, a0, a3, t0, ¬in, &slow); | |
| 444 __ Ret(USE_DELAY_SLOT); | |
| 445 __ lw(v0, mapped_location); | |
| 446 __ bind(¬in); | |
| 447 // The unmapped lookup expects that the parameter map is in a0. | |
| 448 MemOperand unmapped_location = | |
| 449 GenerateUnmappedArgumentsLookup(masm, key, a0, a3, &slow); | |
| 450 __ lw(a0, unmapped_location); | |
| 451 __ LoadRoot(a3, Heap::kTheHoleValueRootIndex); | |
| 452 __ Branch(&slow, eq, a0, Operand(a3)); | |
| 453 __ Ret(USE_DELAY_SLOT); | |
| 454 __ mov(v0, a0); | |
| 455 __ bind(&slow); | |
| 456 GenerateMiss(masm); | |
| 457 } | |
| 458 | |
| 459 | |
| 460 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { | |
| 461 Register receiver = ReceiverRegister(); | |
| 462 Register key = NameRegister(); | |
| 463 Register value = ValueRegister(); | |
| 464 DCHECK(value.is(a0)); | |
| 465 | |
| 466 Label slow, notin; | |
| 467 // Store address is returned in register (of MemOperand) mapped_location. | |
| 468 MemOperand mapped_location = GenerateMappedArgumentsLookup( | |
| 469 masm, receiver, key, a3, t0, t1, ¬in, &slow); | |
| 470 __ sw(value, mapped_location); | |
| 471 __ mov(t5, value); | |
| 472 DCHECK_EQ(mapped_location.offset(), 0); | |
| 473 __ RecordWrite(a3, mapped_location.rm(), t5, | |
| 474 kRAHasNotBeenSaved, kDontSaveFPRegs); | |
| 475 __ Ret(USE_DELAY_SLOT); | |
| 476 __ mov(v0, value); // (In delay slot) return the value stored in v0. | |
| 477 __ bind(¬in); | |
| 478 // The unmapped lookup expects that the parameter map is in a3. | |
| 479 // Store address is returned in register (of MemOperand) unmapped_location. | |
| 480 MemOperand unmapped_location = | |
| 481 GenerateUnmappedArgumentsLookup(masm, key, a3, t0, &slow); | |
| 482 __ sw(value, unmapped_location); | |
| 483 __ mov(t5, value); | |
| 484 DCHECK_EQ(unmapped_location.offset(), 0); | |
| 485 __ RecordWrite(a3, unmapped_location.rm(), t5, | |
| 486 kRAHasNotBeenSaved, kDontSaveFPRegs); | |
| 487 __ Ret(USE_DELAY_SLOT); | |
| 488 __ mov(v0, a0); // (In delay slot) return the value stored in v0. | |
| 489 __ bind(&slow); | |
| 490 GenerateMiss(masm); | |
| 491 } | |
| 492 | |
| 493 | |
| 494 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { | |
| 495 // The return address is in ra. | |
| 496 Isolate* isolate = masm->isolate(); | |
| 497 | |
| 498 __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0); | |
| 499 | |
| 500 __ Push(ReceiverRegister(), NameRegister()); | |
| 501 | |
| 502 // Perform tail call to the entry. | |
| 503 ExternalReference ref = | |
| 504 ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate); | |
| 505 | |
| 506 __ TailCallExternalReference(ref, 2, 1); | |
| 507 } | |
| 508 | |
| 509 | |
| 510 // IC register specifications | |
| 511 const Register LoadIC::ReceiverRegister() { return a1; } | |
| 512 const Register LoadIC::NameRegister() { return a2; } | |
| 513 | |
| 514 | |
| 515 const Register LoadIC::SlotRegister() { | |
| 516 DCHECK(FLAG_vector_ics); | |
| 517 return a0; | |
| 518 } | |
| 519 | |
| 520 | |
| 521 const Register LoadIC::VectorRegister() { | |
| 522 DCHECK(FLAG_vector_ics); | |
| 523 return a3; | |
| 524 } | |
| 525 | |
| 526 | |
| 527 const Register StoreIC::ReceiverRegister() { return a1; } | |
| 528 const Register StoreIC::NameRegister() { return a2; } | |
| 529 const Register StoreIC::ValueRegister() { return a0; } | |
| 530 | |
| 531 | |
| 532 const Register KeyedStoreIC::MapRegister() { | |
| 533 return a3; | |
| 534 } | |
| 535 | |
| 536 | |
| 537 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { | |
| 538 // The return address is in ra. | |
| 539 | |
| 540 __ Push(ReceiverRegister(), NameRegister()); | |
| 541 | |
| 542 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1); | |
| 543 } | |
| 544 | |
| 545 | |
| 546 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { | |
| 547 // The return address is in ra. | |
| 548 Label slow, check_name, index_smi, index_name, property_array_property; | |
| 549 Label probe_dictionary, check_number_dictionary; | |
| 550 | |
| 551 Register key = NameRegister(); | |
| 552 Register receiver = ReceiverRegister(); | |
| 553 DCHECK(key.is(a2)); | |
| 554 DCHECK(receiver.is(a1)); | |
| 555 | |
| 556 Isolate* isolate = masm->isolate(); | |
| 557 | |
| 558 // Check that the key is a smi. | |
| 559 __ JumpIfNotSmi(key, &check_name); | |
| 560 __ bind(&index_smi); | |
| 561 // Now the key is known to be a smi. This place is also jumped to from below | |
| 562 // where a numeric string is converted to a smi. | |
| 563 | |
| 564 GenerateKeyedLoadReceiverCheck( | |
| 565 masm, receiver, a0, a3, Map::kHasIndexedInterceptor, &slow); | |
| 566 | |
| 567 // Check the receiver's map to see if it has fast elements. | |
| 568 __ CheckFastElements(a0, a3, &check_number_dictionary); | |
| 569 | |
| 570 GenerateFastArrayLoad( | |
| 571 masm, receiver, key, a0, a3, t0, v0, NULL, &slow); | |
| 572 __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, t0, a3); | |
| 573 __ Ret(); | |
| 574 | |
| 575 __ bind(&check_number_dictionary); | |
| 576 __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
| 577 __ lw(a3, FieldMemOperand(t0, JSObject::kMapOffset)); | |
| 578 | |
| 579 // Check whether the elements is a number dictionary. | |
| 580 // a3: elements map | |
| 581 // t0: elements | |
| 582 __ LoadRoot(at, Heap::kHashTableMapRootIndex); | |
| 583 __ Branch(&slow, ne, a3, Operand(at)); | |
| 584 __ sra(a0, key, kSmiTagSize); | |
| 585 __ LoadFromNumberDictionary(&slow, t0, key, v0, a0, a3, t1); | |
| 586 __ Ret(); | |
| 587 | |
| 588 // Slow case, key and receiver still in a2 and a1. | |
| 589 __ bind(&slow); | |
| 590 __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), | |
| 591 1, | |
| 592 t0, | |
| 593 a3); | |
| 594 GenerateRuntimeGetProperty(masm); | |
| 595 | |
| 596 __ bind(&check_name); | |
| 597 GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow); | |
| 598 | |
| 599 GenerateKeyedLoadReceiverCheck( | |
| 600 masm, receiver, a0, a3, Map::kHasNamedInterceptor, &slow); | |
| 601 | |
| 602 | |
| 603 // If the receiver is a fast-case object, check the keyed lookup | |
| 604 // cache. Otherwise probe the dictionary. | |
| 605 __ lw(a3, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | |
| 606 __ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset)); | |
| 607 __ LoadRoot(at, Heap::kHashTableMapRootIndex); | |
| 608 __ Branch(&probe_dictionary, eq, t0, Operand(at)); | |
| 609 | |
| 610 // Load the map of the receiver, compute the keyed lookup cache hash | |
| 611 // based on 32 bits of the map pointer and the name hash. | |
| 612 __ lw(a0, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
| 613 __ sra(a3, a0, KeyedLookupCache::kMapHashShift); | |
| 614 __ lw(t0, FieldMemOperand(key, Name::kHashFieldOffset)); | |
| 615 __ sra(at, t0, Name::kHashShift); | |
| 616 __ xor_(a3, a3, at); | |
| 617 int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask; | |
| 618 __ And(a3, a3, Operand(mask)); | |
| 619 | |
| 620 // Load the key (consisting of map and unique name) from the cache and | |
| 621 // check for match. | |
| 622 Label load_in_object_property; | |
| 623 static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket; | |
| 624 Label hit_on_nth_entry[kEntriesPerBucket]; | |
| 625 ExternalReference cache_keys = | |
| 626 ExternalReference::keyed_lookup_cache_keys(isolate); | |
| 627 __ li(t0, Operand(cache_keys)); | |
| 628 __ sll(at, a3, kPointerSizeLog2 + 1); | |
| 629 __ addu(t0, t0, at); | |
| 630 | |
| 631 for (int i = 0; i < kEntriesPerBucket - 1; i++) { | |
| 632 Label try_next_entry; | |
| 633 __ lw(t1, MemOperand(t0, kPointerSize * i * 2)); | |
| 634 __ Branch(&try_next_entry, ne, a0, Operand(t1)); | |
| 635 __ lw(t1, MemOperand(t0, kPointerSize * (i * 2 + 1))); | |
| 636 __ Branch(&hit_on_nth_entry[i], eq, key, Operand(t1)); | |
| 637 __ bind(&try_next_entry); | |
| 638 } | |
| 639 | |
| 640 __ lw(t1, MemOperand(t0, kPointerSize * (kEntriesPerBucket - 1) * 2)); | |
| 641 __ Branch(&slow, ne, a0, Operand(t1)); | |
| 642 __ lw(t1, MemOperand(t0, kPointerSize * ((kEntriesPerBucket - 1) * 2 + 1))); | |
| 643 __ Branch(&slow, ne, key, Operand(t1)); | |
| 644 | |
| 645 // Get field offset. | |
| 646 // a0 : receiver's map | |
| 647 // a3 : lookup cache index | |
| 648 ExternalReference cache_field_offsets = | |
| 649 ExternalReference::keyed_lookup_cache_field_offsets(isolate); | |
| 650 | |
| 651 // Hit on nth entry. | |
| 652 for (int i = kEntriesPerBucket - 1; i >= 0; i--) { | |
| 653 __ bind(&hit_on_nth_entry[i]); | |
| 654 __ li(t0, Operand(cache_field_offsets)); | |
| 655 __ sll(at, a3, kPointerSizeLog2); | |
| 656 __ addu(at, t0, at); | |
| 657 __ lw(t1, MemOperand(at, kPointerSize * i)); | |
| 658 __ lbu(t2, FieldMemOperand(a0, Map::kInObjectPropertiesOffset)); | |
| 659 __ Subu(t1, t1, t2); | |
| 660 __ Branch(&property_array_property, ge, t1, Operand(zero_reg)); | |
| 661 if (i != 0) { | |
| 662 __ Branch(&load_in_object_property); | |
| 663 } | |
| 664 } | |
| 665 | |
| 666 // Load in-object property. | |
| 667 __ bind(&load_in_object_property); | |
| 668 __ lbu(t2, FieldMemOperand(a0, Map::kInstanceSizeOffset)); | |
| 669 __ addu(t2, t2, t1); // Index from start of object. | |
| 670 __ Subu(receiver, receiver, Operand(kHeapObjectTag)); // Remove the heap tag. | |
| 671 __ sll(at, t2, kPointerSizeLog2); | |
| 672 __ addu(at, receiver, at); | |
| 673 __ lw(v0, MemOperand(at)); | |
| 674 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), | |
| 675 1, | |
| 676 t0, | |
| 677 a3); | |
| 678 __ Ret(); | |
| 679 | |
| 680 // Load property array property. | |
| 681 __ bind(&property_array_property); | |
| 682 __ lw(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | |
| 683 __ Addu(receiver, receiver, FixedArray::kHeaderSize - kHeapObjectTag); | |
| 684 __ sll(v0, t1, kPointerSizeLog2); | |
| 685 __ Addu(v0, v0, receiver); | |
| 686 __ lw(v0, MemOperand(v0)); | |
| 687 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), | |
| 688 1, | |
| 689 t0, | |
| 690 a3); | |
| 691 __ Ret(); | |
| 692 | |
| 693 | |
| 694 // Do a quick inline probe of the receiver's dictionary, if it | |
| 695 // exists. | |
| 696 __ bind(&probe_dictionary); | |
| 697 // a3: elements | |
| 698 __ lw(a0, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
| 699 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset)); | |
| 700 GenerateGlobalInstanceTypeCheck(masm, a0, &slow); | |
| 701 // Load the property to v0. | |
| 702 GenerateDictionaryLoad(masm, &slow, a3, key, v0, t1, t0); | |
| 703 __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), | |
| 704 1, | |
| 705 t0, | |
| 706 a3); | |
| 707 __ Ret(); | |
| 708 | |
| 709 __ bind(&index_name); | |
| 710 __ IndexFromHash(a3, key); | |
| 711 // Now jump to the place where smi keys are handled. | |
| 712 __ Branch(&index_smi); | |
| 713 } | |
| 714 | |
| 715 | |
| 716 void KeyedLoadIC::GenerateString(MacroAssembler* masm) { | |
| 717 // Return address is in ra. | |
| 718 Label miss; | |
| 719 | |
| 720 Register receiver = ReceiverRegister(); | |
| 721 Register index = NameRegister(); | |
| 722 Register scratch = a3; | |
| 723 Register result = v0; | |
| 724 DCHECK(!scratch.is(receiver) && !scratch.is(index)); | |
| 725 | |
| 726 StringCharAtGenerator char_at_generator(receiver, | |
| 727 index, | |
| 728 scratch, | |
| 729 result, | |
| 730 &miss, // When not a string. | |
| 731 &miss, // When not a number. | |
| 732 &miss, // When index out of range. | |
| 733 STRING_INDEX_IS_ARRAY_INDEX); | |
| 734 char_at_generator.GenerateFast(masm); | |
| 735 __ Ret(); | |
| 736 | |
| 737 StubRuntimeCallHelper call_helper; | |
| 738 char_at_generator.GenerateSlow(masm, call_helper); | |
| 739 | |
| 740 __ bind(&miss); | |
| 741 GenerateMiss(masm); | |
| 742 } | |
| 743 | |
| 744 | |
| 745 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, | |
| 746 StrictMode strict_mode) { | |
| 747 // Push receiver, key and value for runtime call. | |
| 748 __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); | |
| 749 __ li(a0, Operand(Smi::FromInt(strict_mode))); // Strict mode. | |
| 750 __ Push(a0); | |
| 751 | |
| 752 __ TailCallRuntime(Runtime::kSetProperty, 4, 1); | |
| 753 } | |
| 754 | |
| 755 | |
| 756 static void KeyedStoreGenerateGenericHelper( | |
| 757 MacroAssembler* masm, | |
| 758 Label* fast_object, | |
| 759 Label* fast_double, | |
| 760 Label* slow, | |
| 761 KeyedStoreCheckMap check_map, | |
| 762 KeyedStoreIncrementLength increment_length, | |
| 763 Register value, | |
| 764 Register key, | |
| 765 Register receiver, | |
| 766 Register receiver_map, | |
| 767 Register elements_map, | |
| 768 Register elements) { | |
| 769 Label transition_smi_elements; | |
| 770 Label finish_object_store, non_double_value, transition_double_elements; | |
| 771 Label fast_double_without_map_check; | |
| 772 | |
| 773 // Fast case: Do the store, could be either Object or double. | |
| 774 __ bind(fast_object); | |
| 775 Register scratch_value = t0; | |
| 776 Register address = t1; | |
| 777 if (check_map == kCheckMap) { | |
| 778 __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); | |
| 779 __ Branch(fast_double, ne, elements_map, | |
| 780 Operand(masm->isolate()->factory()->fixed_array_map())); | |
| 781 } | |
| 782 | |
| 783 // HOLECHECK: guards "A[i] = V" | |
| 784 // We have to go to the runtime if the current value is the hole because | |
| 785 // there may be a callback on the element. | |
| 786 Label holecheck_passed1; | |
| 787 __ Addu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag); | |
| 788 __ sll(at, key, kPointerSizeLog2 - kSmiTagSize); | |
| 789 __ addu(address, address, at); | |
| 790 __ lw(scratch_value, MemOperand(address)); | |
| 791 __ Branch(&holecheck_passed1, ne, scratch_value, | |
| 792 Operand(masm->isolate()->factory()->the_hole_value())); | |
| 793 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value, | |
| 794 slow); | |
| 795 | |
| 796 __ bind(&holecheck_passed1); | |
| 797 | |
| 798 // Smi stores don't require further checks. | |
| 799 Label non_smi_value; | |
| 800 __ JumpIfNotSmi(value, &non_smi_value); | |
| 801 | |
| 802 if (increment_length == kIncrementLength) { | |
| 803 // Add 1 to receiver->length. | |
| 804 __ Addu(scratch_value, key, Operand(Smi::FromInt(1))); | |
| 805 __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); | |
| 806 } | |
| 807 // It's irrelevant whether array is smi-only or not when writing a smi. | |
| 808 __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | |
| 809 __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize); | |
| 810 __ Addu(address, address, scratch_value); | |
| 811 __ sw(value, MemOperand(address)); | |
| 812 __ Ret(); | |
| 813 | |
| 814 __ bind(&non_smi_value); | |
| 815 // Escape to elements kind transition case. | |
| 816 __ CheckFastObjectElements(receiver_map, scratch_value, | |
| 817 &transition_smi_elements); | |
| 818 | |
| 819 // Fast elements array, store the value to the elements backing store. | |
| 820 __ bind(&finish_object_store); | |
| 821 if (increment_length == kIncrementLength) { | |
| 822 // Add 1 to receiver->length. | |
| 823 __ Addu(scratch_value, key, Operand(Smi::FromInt(1))); | |
| 824 __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); | |
| 825 } | |
| 826 __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | |
| 827 __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize); | |
| 828 __ Addu(address, address, scratch_value); | |
| 829 __ sw(value, MemOperand(address)); | |
| 830 // Update write barrier for the elements array address. | |
| 831 __ mov(scratch_value, value); // Preserve the value which is returned. | |
| 832 __ RecordWrite(elements, | |
| 833 address, | |
| 834 scratch_value, | |
| 835 kRAHasNotBeenSaved, | |
| 836 kDontSaveFPRegs, | |
| 837 EMIT_REMEMBERED_SET, | |
| 838 OMIT_SMI_CHECK); | |
| 839 __ Ret(); | |
| 840 | |
| 841 __ bind(fast_double); | |
| 842 if (check_map == kCheckMap) { | |
| 843 // Check for fast double array case. If this fails, call through to the | |
| 844 // runtime. | |
| 845 __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex); | |
| 846 __ Branch(slow, ne, elements_map, Operand(at)); | |
| 847 } | |
| 848 | |
| 849 // HOLECHECK: guards "A[i] double hole?" | |
| 850 // We have to see if the double version of the hole is present. If so | |
| 851 // go to the runtime. | |
| 852 __ Addu(address, elements, | |
| 853 Operand(FixedDoubleArray::kHeaderSize + kHoleNanUpper32Offset | |
| 854 - kHeapObjectTag)); | |
| 855 __ sll(at, key, kPointerSizeLog2); | |
| 856 __ addu(address, address, at); | |
| 857 __ lw(scratch_value, MemOperand(address)); | |
| 858 __ Branch(&fast_double_without_map_check, ne, scratch_value, | |
| 859 Operand(kHoleNanUpper32)); | |
| 860 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value, | |
| 861 slow); | |
| 862 | |
| 863 __ bind(&fast_double_without_map_check); | |
| 864 __ StoreNumberToDoubleElements(value, | |
| 865 key, | |
| 866 elements, // Overwritten. | |
| 867 a3, // Scratch regs... | |
| 868 t0, | |
| 869 t1, | |
| 870 &transition_double_elements); | |
| 871 if (increment_length == kIncrementLength) { | |
| 872 // Add 1 to receiver->length. | |
| 873 __ Addu(scratch_value, key, Operand(Smi::FromInt(1))); | |
| 874 __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); | |
| 875 } | |
| 876 __ Ret(); | |
| 877 | |
| 878 __ bind(&transition_smi_elements); | |
| 879 // Transition the array appropriately depending on the value type. | |
| 880 __ lw(t0, FieldMemOperand(value, HeapObject::kMapOffset)); | |
| 881 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | |
| 882 __ Branch(&non_double_value, ne, t0, Operand(at)); | |
| 883 | |
| 884 // Value is a double. Transition FAST_SMI_ELEMENTS -> | |
| 885 // FAST_DOUBLE_ELEMENTS and complete the store. | |
| 886 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, | |
| 887 FAST_DOUBLE_ELEMENTS, | |
| 888 receiver_map, | |
| 889 t0, | |
| 890 slow); | |
| 891 AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, | |
| 892 FAST_DOUBLE_ELEMENTS); | |
| 893 ElementsTransitionGenerator::GenerateSmiToDouble( | |
| 894 masm, receiver, key, value, receiver_map, mode, slow); | |
| 895 __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
| 896 __ jmp(&fast_double_without_map_check); | |
| 897 | |
| 898 __ bind(&non_double_value); | |
| 899 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS | |
| 900 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, | |
| 901 FAST_ELEMENTS, | |
| 902 receiver_map, | |
| 903 t0, | |
| 904 slow); | |
| 905 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); | |
| 906 ElementsTransitionGenerator::GenerateMapChangeElementsTransition( | |
| 907 masm, receiver, key, value, receiver_map, mode, slow); | |
| 908 __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
| 909 __ jmp(&finish_object_store); | |
| 910 | |
| 911 __ bind(&transition_double_elements); | |
| 912 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a | |
| 913 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and | |
| 914 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS | |
| 915 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, | |
| 916 FAST_ELEMENTS, | |
| 917 receiver_map, | |
| 918 t0, | |
| 919 slow); | |
| 920 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); | |
| 921 ElementsTransitionGenerator::GenerateDoubleToObject( | |
| 922 masm, receiver, key, value, receiver_map, mode, slow); | |
| 923 __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
| 924 __ jmp(&finish_object_store); | |
| 925 } | |
| 926 | |
| 927 | |
| 928 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, | |
| 929 StrictMode strict_mode) { | |
| 930 // ---------- S t a t e -------------- | |
| 931 // -- a0 : value | |
| 932 // -- a1 : key | |
| 933 // -- a2 : receiver | |
| 934 // -- ra : return address | |
| 935 // ----------------------------------- | |
| 936 Label slow, fast_object, fast_object_grow; | |
| 937 Label fast_double, fast_double_grow; | |
| 938 Label array, extra, check_if_double_array; | |
| 939 | |
| 940 // Register usage. | |
| 941 Register value = ValueRegister(); | |
| 942 Register key = NameRegister(); | |
| 943 Register receiver = ReceiverRegister(); | |
| 944 DCHECK(value.is(a0)); | |
| 945 Register receiver_map = a3; | |
| 946 Register elements_map = t2; | |
| 947 Register elements = t3; // Elements array of the receiver. | |
| 948 // t0 and t1 are used as general scratch registers. | |
| 949 | |
| 950 // Check that the key is a smi. | |
| 951 __ JumpIfNotSmi(key, &slow); | |
| 952 // Check that the object isn't a smi. | |
| 953 __ JumpIfSmi(receiver, &slow); | |
| 954 // Get the map of the object. | |
| 955 __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
| 956 // Check that the receiver does not require access checks and is not observed. | |
| 957 // The generic stub does not perform map checks or handle observed objects. | |
| 958 __ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset)); | |
| 959 __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded | | |
| 960 1 << Map::kIsObserved)); | |
| 961 __ Branch(&slow, ne, t0, Operand(zero_reg)); | |
| 962 // Check if the object is a JS array or not. | |
| 963 __ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset)); | |
| 964 __ Branch(&array, eq, t0, Operand(JS_ARRAY_TYPE)); | |
| 965 // Check that the object is some kind of JSObject. | |
| 966 __ Branch(&slow, lt, t0, Operand(FIRST_JS_OBJECT_TYPE)); | |
| 967 | |
| 968 // Object case: Check key against length in the elements array. | |
| 969 __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
| 970 // Check array bounds. Both the key and the length of FixedArray are smis. | |
| 971 __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset)); | |
| 972 __ Branch(&fast_object, lo, key, Operand(t0)); | |
| 973 | |
| 974 // Slow case, handle jump to runtime. | |
| 975 __ bind(&slow); | |
| 976 // Entry registers are intact. | |
| 977 // a0: value. | |
| 978 // a1: key. | |
| 979 // a2: receiver. | |
| 980 GenerateRuntimeSetProperty(masm, strict_mode); | |
| 981 | |
| 982 // Extra capacity case: Check if there is extra capacity to | |
| 983 // perform the store and update the length. Used for adding one | |
| 984 // element to the array by writing to array[array.length]. | |
| 985 __ bind(&extra); | |
| 986 // Condition code from comparing key and array length is still available. | |
| 987 // Only support writing to array[array.length]. | |
| 988 __ Branch(&slow, ne, key, Operand(t0)); | |
| 989 // Check for room in the elements backing store. | |
| 990 // Both the key and the length of FixedArray are smis. | |
| 991 __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset)); | |
| 992 __ Branch(&slow, hs, key, Operand(t0)); | |
| 993 __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); | |
| 994 __ Branch( | |
| 995 &check_if_double_array, ne, elements_map, Heap::kFixedArrayMapRootIndex); | |
| 996 | |
| 997 __ jmp(&fast_object_grow); | |
| 998 | |
| 999 __ bind(&check_if_double_array); | |
| 1000 __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex); | |
| 1001 __ jmp(&fast_double_grow); | |
| 1002 | |
| 1003 // Array case: Get the length and the elements array from the JS | |
| 1004 // array. Check that the array is in fast mode (and writable); if it | |
| 1005 // is the length is always a smi. | |
| 1006 __ bind(&array); | |
| 1007 __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
| 1008 | |
| 1009 // Check the key against the length in the array. | |
| 1010 __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset)); | |
| 1011 __ Branch(&extra, hs, key, Operand(t0)); | |
| 1012 | |
| 1013 KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, | |
| 1014 &slow, kCheckMap, kDontIncrementLength, | |
| 1015 value, key, receiver, receiver_map, | |
| 1016 elements_map, elements); | |
| 1017 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow, | |
| 1018 &slow, kDontCheckMap, kIncrementLength, | |
| 1019 value, key, receiver, receiver_map, | |
| 1020 elements_map, elements); | |
| 1021 } | |
| 1022 | |
| 1023 | |
| 1024 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { | |
| 1025 // Return address is in ra. | |
| 1026 Label slow; | |
| 1027 | |
| 1028 Register receiver = ReceiverRegister(); | |
| 1029 Register key = NameRegister(); | |
| 1030 Register scratch1 = a3; | |
| 1031 Register scratch2 = t0; | |
| 1032 DCHECK(!scratch1.is(receiver) && !scratch1.is(key)); | |
| 1033 DCHECK(!scratch2.is(receiver) && !scratch2.is(key)); | |
| 1034 | |
| 1035 // Check that the receiver isn't a smi. | |
| 1036 __ JumpIfSmi(receiver, &slow); | |
| 1037 | |
| 1038 // Check that the key is an array index, that is Uint32. | |
| 1039 __ And(t0, key, Operand(kSmiTagMask | kSmiSignMask)); | |
| 1040 __ Branch(&slow, ne, t0, Operand(zero_reg)); | |
| 1041 | |
| 1042 // Get the map of the receiver. | |
| 1043 __ lw(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
| 1044 | |
| 1045 // Check that it has indexed interceptor and access checks | |
| 1046 // are not enabled for this object. | |
| 1047 __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset)); | |
| 1048 __ And(scratch2, scratch2, Operand(kSlowCaseBitFieldMask)); | |
| 1049 __ Branch(&slow, ne, scratch2, Operand(1 << Map::kHasIndexedInterceptor)); | |
| 1050 // Everything is fine, call runtime. | |
| 1051 __ Push(receiver, key); // Receiver, key. | |
| 1052 | |
| 1053 // Perform tail call to the entry. | |
| 1054 __ TailCallExternalReference(ExternalReference( | |
| 1055 IC_Utility(kLoadElementWithInterceptor), masm->isolate()), 2, 1); | |
| 1056 | |
| 1057 __ bind(&slow); | |
| 1058 GenerateMiss(masm); | |
| 1059 } | |
| 1060 | |
| 1061 | |
| 1062 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { | |
| 1063 // Push receiver, key and value for runtime call. | |
| 1064 __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); | |
| 1065 | |
| 1066 ExternalReference ref = | |
| 1067 ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate()); | |
| 1068 __ TailCallExternalReference(ref, 3, 1); | |
| 1069 } | |
| 1070 | |
| 1071 | |
| 1072 void StoreIC::GenerateSlow(MacroAssembler* masm) { | |
| 1073 // Push receiver, key and value for runtime call. | |
| 1074 __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); | |
| 1075 | |
| 1076 // The slow case calls into the runtime to complete the store without causing | |
| 1077 // an IC miss that would otherwise cause a transition to the generic stub. | |
| 1078 ExternalReference ref = | |
| 1079 ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate()); | |
| 1080 __ TailCallExternalReference(ref, 3, 1); | |
| 1081 } | |
| 1082 | |
| 1083 | |
| 1084 void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) { | |
| 1085 // Push receiver, key and value for runtime call. | |
| 1086 // We can't use MultiPush as the order of the registers is important. | |
| 1087 __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); | |
| 1088 | |
| 1089 // The slow case calls into the runtime to complete the store without causing | |
| 1090 // an IC miss that would otherwise cause a transition to the generic stub. | |
| 1091 ExternalReference ref = | |
| 1092 ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate()); | |
| 1093 | |
| 1094 __ TailCallExternalReference(ref, 3, 1); | |
| 1095 } | |
| 1096 | |
| 1097 | |
| 1098 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { | |
| 1099 Register receiver = ReceiverRegister(); | |
| 1100 Register name = NameRegister(); | |
| 1101 DCHECK(receiver.is(a1)); | |
| 1102 DCHECK(name.is(a2)); | |
| 1103 DCHECK(ValueRegister().is(a0)); | |
| 1104 | |
| 1105 // Get the receiver from the stack and probe the stub cache. | |
| 1106 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( | |
| 1107 Code::ComputeHandlerFlags(Code::STORE_IC)); | |
| 1108 masm->isolate()->stub_cache()->GenerateProbe( | |
| 1109 masm, flags, receiver, name, a3, t0, t1, t2); | |
| 1110 | |
| 1111 // Cache miss: Jump to runtime. | |
| 1112 GenerateMiss(masm); | |
| 1113 } | |
| 1114 | |
| 1115 | |
| 1116 void StoreIC::GenerateMiss(MacroAssembler* masm) { | |
| 1117 __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); | |
| 1118 // Perform tail call to the entry. | |
| 1119 ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss), | |
| 1120 masm->isolate()); | |
| 1121 __ TailCallExternalReference(ref, 3, 1); | |
| 1122 } | |
| 1123 | |
| 1124 | |
| 1125 void StoreIC::GenerateNormal(MacroAssembler* masm) { | |
| 1126 Label miss; | |
| 1127 Register receiver = ReceiverRegister(); | |
| 1128 Register name = NameRegister(); | |
| 1129 Register value = ValueRegister(); | |
| 1130 Register dictionary = a3; | |
| 1131 DCHECK(receiver.is(a1)); | |
| 1132 DCHECK(name.is(a2)); | |
| 1133 DCHECK(value.is(a0)); | |
| 1134 | |
| 1135 __ lw(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | |
| 1136 | |
| 1137 GenerateDictionaryStore(masm, &miss, dictionary, name, value, t0, t1); | |
| 1138 Counters* counters = masm->isolate()->counters(); | |
| 1139 __ IncrementCounter(counters->store_normal_hit(), 1, t0, t1); | |
| 1140 __ Ret(); | |
| 1141 | |
| 1142 __ bind(&miss); | |
| 1143 __ IncrementCounter(counters->store_normal_miss(), 1, t0, t1); | |
| 1144 GenerateMiss(masm); | |
| 1145 } | |
| 1146 | |
| 1147 | |
| 1148 void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, | |
| 1149 StrictMode strict_mode) { | |
| 1150 __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); | |
| 1151 | |
| 1152 __ li(a0, Operand(Smi::FromInt(strict_mode))); | |
| 1153 __ Push(a0); | |
| 1154 | |
| 1155 // Do tail-call to runtime routine. | |
| 1156 __ TailCallRuntime(Runtime::kSetProperty, 4, 1); | |
| 1157 } | |
| 1158 | |
| 1159 | |
| 1160 #undef __ | |
| 1161 | |
| 1162 | |
| 1163 Condition CompareIC::ComputeCondition(Token::Value op) { | |
| 1164 switch (op) { | |
| 1165 case Token::EQ_STRICT: | |
| 1166 case Token::EQ: | |
| 1167 return eq; | |
| 1168 case Token::LT: | |
| 1169 return lt; | |
| 1170 case Token::GT: | |
| 1171 return gt; | |
| 1172 case Token::LTE: | |
| 1173 return le; | |
| 1174 case Token::GTE: | |
| 1175 return ge; | |
| 1176 default: | |
| 1177 UNREACHABLE(); | |
| 1178 return kNoCondition; | |
| 1179 } | |
| 1180 } | |
| 1181 | |
| 1182 | |
| 1183 bool CompareIC::HasInlinedSmiCode(Address address) { | |
| 1184 // The address of the instruction following the call. | |
| 1185 Address andi_instruction_address = | |
| 1186 address + Assembler::kCallTargetAddressOffset; | |
| 1187 | |
| 1188 // If the instruction following the call is not a andi at, rx, #yyy, nothing | |
| 1189 // was inlined. | |
| 1190 Instr instr = Assembler::instr_at(andi_instruction_address); | |
| 1191 return Assembler::IsAndImmediate(instr) && | |
| 1192 Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()); | |
| 1193 } | |
| 1194 | |
| 1195 | |
| 1196 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { | |
| 1197 Address andi_instruction_address = | |
| 1198 address + Assembler::kCallTargetAddressOffset; | |
| 1199 | |
| 1200 // If the instruction following the call is not a andi at, rx, #yyy, nothing | |
| 1201 // was inlined. | |
| 1202 Instr instr = Assembler::instr_at(andi_instruction_address); | |
| 1203 if (!(Assembler::IsAndImmediate(instr) && | |
| 1204 Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()))) { | |
| 1205 return; | |
| 1206 } | |
| 1207 | |
| 1208 // The delta to the start of the map check instruction and the | |
| 1209 // condition code uses at the patched jump. | |
| 1210 int delta = Assembler::GetImmediate16(instr); | |
| 1211 delta += Assembler::GetRs(instr) * kImm16Mask; | |
| 1212 // If the delta is 0 the instruction is andi at, zero_reg, #0 which also | |
| 1213 // signals that nothing was inlined. | |
| 1214 if (delta == 0) { | |
| 1215 return; | |
| 1216 } | |
| 1217 | |
| 1218 if (FLAG_trace_ic) { | |
| 1219 PrintF("[ patching ic at %p, andi=%p, delta=%d\n", | |
| 1220 address, andi_instruction_address, delta); | |
| 1221 } | |
| 1222 | |
| 1223 Address patch_address = | |
| 1224 andi_instruction_address - delta * Instruction::kInstrSize; | |
| 1225 Instr instr_at_patch = Assembler::instr_at(patch_address); | |
| 1226 Instr branch_instr = | |
| 1227 Assembler::instr_at(patch_address + Instruction::kInstrSize); | |
| 1228 // This is patching a conditional "jump if not smi/jump if smi" site. | |
| 1229 // Enabling by changing from | |
| 1230 // andi at, rx, 0 | |
| 1231 // Branch <target>, eq, at, Operand(zero_reg) | |
| 1232 // to: | |
| 1233 // andi at, rx, #kSmiTagMask | |
| 1234 // Branch <target>, ne, at, Operand(zero_reg) | |
| 1235 // and vice-versa to be disabled again. | |
| 1236 CodePatcher patcher(patch_address, 2); | |
| 1237 Register reg = Register::from_code(Assembler::GetRs(instr_at_patch)); | |
| 1238 if (check == ENABLE_INLINED_SMI_CHECK) { | |
| 1239 DCHECK(Assembler::IsAndImmediate(instr_at_patch)); | |
| 1240 DCHECK_EQ(0, Assembler::GetImmediate16(instr_at_patch)); | |
| 1241 patcher.masm()->andi(at, reg, kSmiTagMask); | |
| 1242 } else { | |
| 1243 DCHECK(check == DISABLE_INLINED_SMI_CHECK); | |
| 1244 DCHECK(Assembler::IsAndImmediate(instr_at_patch)); | |
| 1245 patcher.masm()->andi(at, reg, 0); | |
| 1246 } | |
| 1247 DCHECK(Assembler::IsBranch(branch_instr)); | |
| 1248 if (Assembler::IsBeq(branch_instr)) { | |
| 1249 patcher.ChangeBranchCondition(ne); | |
| 1250 } else { | |
| 1251 DCHECK(Assembler::IsBne(branch_instr)); | |
| 1252 patcher.ChangeBranchCondition(eq); | |
| 1253 } | |
| 1254 } | |
| 1255 | |
| 1256 | |
| 1257 } } // namespace v8::internal | |
| 1258 | |
| 1259 #endif // V8_TARGET_ARCH_MIPS | |
| OLD | NEW |