| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #if V8_TARGET_ARCH_ARM | 7 #if V8_TARGET_ARCH_ARM |
| 8 | 8 |
| 9 #include "src/codegen.h" | 9 #include "src/ic/ic-compiler.h" |
| 10 #include "src/ic-inl.h" | |
| 11 #include "src/stub-cache.h" | |
| 12 | 10 |
| 13 namespace v8 { | 11 namespace v8 { |
| 14 namespace internal { | 12 namespace internal { |
| 15 | 13 |
| 16 #define __ ACCESS_MASM(masm) | 14 #define __ ACCESS_MASM(masm) |
| 17 | 15 |
| 18 | 16 |
| 19 static void ProbeTable(Isolate* isolate, | |
| 20 MacroAssembler* masm, | |
| 21 Code::Flags flags, | |
| 22 StubCache::Table table, | |
| 23 Register receiver, | |
| 24 Register name, | |
| 25 // Number of the cache entry, not scaled. | |
| 26 Register offset, | |
| 27 Register scratch, | |
| 28 Register scratch2, | |
| 29 Register offset_scratch) { | |
| 30 ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); | |
| 31 ExternalReference value_offset(isolate->stub_cache()->value_reference(table)); | |
| 32 ExternalReference map_offset(isolate->stub_cache()->map_reference(table)); | |
| 33 | |
| 34 uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address()); | |
| 35 uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address()); | |
| 36 uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address()); | |
| 37 | |
| 38 // Check the relative positions of the address fields. | |
| 39 DCHECK(value_off_addr > key_off_addr); | |
| 40 DCHECK((value_off_addr - key_off_addr) % 4 == 0); | |
| 41 DCHECK((value_off_addr - key_off_addr) < (256 * 4)); | |
| 42 DCHECK(map_off_addr > key_off_addr); | |
| 43 DCHECK((map_off_addr - key_off_addr) % 4 == 0); | |
| 44 DCHECK((map_off_addr - key_off_addr) < (256 * 4)); | |
| 45 | |
| 46 Label miss; | |
| 47 Register base_addr = scratch; | |
| 48 scratch = no_reg; | |
| 49 | |
| 50 // Multiply by 3 because there are 3 fields per entry (name, code, map). | |
| 51 __ add(offset_scratch, offset, Operand(offset, LSL, 1)); | |
| 52 | |
| 53 // Calculate the base address of the entry. | |
| 54 __ mov(base_addr, Operand(key_offset)); | |
| 55 __ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2)); | |
| 56 | |
| 57 // Check that the key in the entry matches the name. | |
| 58 __ ldr(ip, MemOperand(base_addr, 0)); | |
| 59 __ cmp(name, ip); | |
| 60 __ b(ne, &miss); | |
| 61 | |
| 62 // Check the map matches. | |
| 63 __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr)); | |
| 64 __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
| 65 __ cmp(ip, scratch2); | |
| 66 __ b(ne, &miss); | |
| 67 | |
| 68 // Get the code entry from the cache. | |
| 69 Register code = scratch2; | |
| 70 scratch2 = no_reg; | |
| 71 __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr)); | |
| 72 | |
| 73 // Check that the flags match what we're looking for. | |
| 74 Register flags_reg = base_addr; | |
| 75 base_addr = no_reg; | |
| 76 __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset)); | |
| 77 // It's a nice optimization if this constant is encodable in the bic insn. | |
| 78 | |
| 79 uint32_t mask = Code::kFlagsNotUsedInLookup; | |
| 80 DCHECK(__ ImmediateFitsAddrMode1Instruction(mask)); | |
| 81 __ bic(flags_reg, flags_reg, Operand(mask)); | |
| 82 __ cmp(flags_reg, Operand(flags)); | |
| 83 __ b(ne, &miss); | |
| 84 | |
| 85 #ifdef DEBUG | |
| 86 if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { | |
| 87 __ jmp(&miss); | |
| 88 } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { | |
| 89 __ jmp(&miss); | |
| 90 } | |
| 91 #endif | |
| 92 | |
| 93 // Jump to the first instruction in the code stub. | |
| 94 __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag)); | |
| 95 | |
| 96 // Miss: fall through. | |
| 97 __ bind(&miss); | |
| 98 } | |
| 99 | |
| 100 | |
| 101 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( | 17 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( |
| 102 MacroAssembler* masm, Label* miss_label, Register receiver, | 18 MacroAssembler* masm, Label* miss_label, Register receiver, |
| 103 Handle<Name> name, Register scratch0, Register scratch1) { | 19 Handle<Name> name, Register scratch0, Register scratch1) { |
| 104 DCHECK(name->IsUniqueName()); | 20 DCHECK(name->IsUniqueName()); |
| 105 DCHECK(!receiver.is(scratch0)); | 21 DCHECK(!receiver.is(scratch0)); |
| 106 Counters* counters = masm->isolate()->counters(); | 22 Counters* counters = masm->isolate()->counters(); |
| 107 __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1); | 23 __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1); |
| 108 __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); | 24 __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); |
| 109 | 25 |
| 110 Label done; | 26 Label done; |
| (...skipping 20 matching lines...) Expand all Loading... |
| 131 __ ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset)); | 47 __ ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset)); |
| 132 Register tmp = properties; | 48 Register tmp = properties; |
| 133 __ LoadRoot(tmp, Heap::kHashTableMapRootIndex); | 49 __ LoadRoot(tmp, Heap::kHashTableMapRootIndex); |
| 134 __ cmp(map, tmp); | 50 __ cmp(map, tmp); |
| 135 __ b(ne, miss_label); | 51 __ b(ne, miss_label); |
| 136 | 52 |
| 137 // Restore the temporarily used register. | 53 // Restore the temporarily used register. |
| 138 __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 54 __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
| 139 | 55 |
| 140 | 56 |
| 141 NameDictionaryLookupStub::GenerateNegativeLookup(masm, | 57 NameDictionaryLookupStub::GenerateNegativeLookup( |
| 142 miss_label, | 58 masm, miss_label, &done, receiver, properties, name, scratch1); |
| 143 &done, | |
| 144 receiver, | |
| 145 properties, | |
| 146 name, | |
| 147 scratch1); | |
| 148 __ bind(&done); | 59 __ bind(&done); |
| 149 __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); | 60 __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); |
| 150 } | 61 } |
| 151 | 62 |
| 152 | 63 |
| 153 void StubCache::GenerateProbe(MacroAssembler* masm, | |
| 154 Code::Flags flags, | |
| 155 Register receiver, | |
| 156 Register name, | |
| 157 Register scratch, | |
| 158 Register extra, | |
| 159 Register extra2, | |
| 160 Register extra3) { | |
| 161 Isolate* isolate = masm->isolate(); | |
| 162 Label miss; | |
| 163 | |
| 164 // Make sure that code is valid. The multiplying code relies on the | |
| 165 // entry size being 12. | |
| 166 DCHECK(sizeof(Entry) == 12); | |
| 167 | |
| 168 // Make sure the flags does not name a specific type. | |
| 169 DCHECK(Code::ExtractTypeFromFlags(flags) == 0); | |
| 170 | |
| 171 // Make sure that there are no register conflicts. | |
| 172 DCHECK(!scratch.is(receiver)); | |
| 173 DCHECK(!scratch.is(name)); | |
| 174 DCHECK(!extra.is(receiver)); | |
| 175 DCHECK(!extra.is(name)); | |
| 176 DCHECK(!extra.is(scratch)); | |
| 177 DCHECK(!extra2.is(receiver)); | |
| 178 DCHECK(!extra2.is(name)); | |
| 179 DCHECK(!extra2.is(scratch)); | |
| 180 DCHECK(!extra2.is(extra)); | |
| 181 | |
| 182 // Check scratch, extra and extra2 registers are valid. | |
| 183 DCHECK(!scratch.is(no_reg)); | |
| 184 DCHECK(!extra.is(no_reg)); | |
| 185 DCHECK(!extra2.is(no_reg)); | |
| 186 DCHECK(!extra3.is(no_reg)); | |
| 187 | |
| 188 Counters* counters = masm->isolate()->counters(); | |
| 189 __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, | |
| 190 extra2, extra3); | |
| 191 | |
| 192 // Check that the receiver isn't a smi. | |
| 193 __ JumpIfSmi(receiver, &miss); | |
| 194 | |
| 195 // Get the map of the receiver and compute the hash. | |
| 196 __ ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset)); | |
| 197 __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
| 198 __ add(scratch, scratch, Operand(ip)); | |
| 199 uint32_t mask = kPrimaryTableSize - 1; | |
| 200 // We shift out the last two bits because they are not part of the hash and | |
| 201 // they are always 01 for maps. | |
| 202 __ mov(scratch, Operand(scratch, LSR, kCacheIndexShift)); | |
| 203 // Mask down the eor argument to the minimum to keep the immediate | |
| 204 // ARM-encodable. | |
| 205 __ eor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask)); | |
| 206 // Prefer and_ to ubfx here because ubfx takes 2 cycles. | |
| 207 __ and_(scratch, scratch, Operand(mask)); | |
| 208 | |
| 209 // Probe the primary table. | |
| 210 ProbeTable(isolate, | |
| 211 masm, | |
| 212 flags, | |
| 213 kPrimary, | |
| 214 receiver, | |
| 215 name, | |
| 216 scratch, | |
| 217 extra, | |
| 218 extra2, | |
| 219 extra3); | |
| 220 | |
| 221 // Primary miss: Compute hash for secondary probe. | |
| 222 __ sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift)); | |
| 223 uint32_t mask2 = kSecondaryTableSize - 1; | |
| 224 __ add(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2)); | |
| 225 __ and_(scratch, scratch, Operand(mask2)); | |
| 226 | |
| 227 // Probe the secondary table. | |
| 228 ProbeTable(isolate, | |
| 229 masm, | |
| 230 flags, | |
| 231 kSecondary, | |
| 232 receiver, | |
| 233 name, | |
| 234 scratch, | |
| 235 extra, | |
| 236 extra2, | |
| 237 extra3); | |
| 238 | |
| 239 // Cache miss: Fall-through and let caller handle the miss by | |
| 240 // entering the runtime system. | |
| 241 __ bind(&miss); | |
| 242 __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, | |
| 243 extra2, extra3); | |
| 244 } | |
| 245 | |
| 246 | |
| 247 void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype( | 64 void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype( |
| 248 MacroAssembler* masm, int index, Register prototype, Label* miss) { | 65 MacroAssembler* masm, int index, Register prototype, Label* miss) { |
| 249 Isolate* isolate = masm->isolate(); | 66 Isolate* isolate = masm->isolate(); |
| 250 // Get the global function with the given index. | 67 // Get the global function with the given index. |
| 251 Handle<JSFunction> function( | 68 Handle<JSFunction> function( |
| 252 JSFunction::cast(isolate->native_context()->get(index))); | 69 JSFunction::cast(isolate->native_context()->get(index))); |
| 253 | 70 |
| 254 // Check we're still in the same context. | 71 // Check we're still in the same context. |
| 255 Register scratch = prototype; | 72 Register scratch = prototype; |
| 256 const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX); | 73 const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX); |
| (...skipping 202 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 459 __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow, | 276 __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow, |
| 460 TAG_RESULT, MUTABLE); | 277 TAG_RESULT, MUTABLE); |
| 461 | 278 |
| 462 __ JumpIfNotSmi(value_reg, &heap_number); | 279 __ JumpIfNotSmi(value_reg, &heap_number); |
| 463 __ SmiUntag(scratch1, value_reg); | 280 __ SmiUntag(scratch1, value_reg); |
| 464 __ vmov(s0, scratch1); | 281 __ vmov(s0, scratch1); |
| 465 __ vcvt_f64_s32(d0, s0); | 282 __ vcvt_f64_s32(d0, s0); |
| 466 __ jmp(&do_store); | 283 __ jmp(&do_store); |
| 467 | 284 |
| 468 __ bind(&heap_number); | 285 __ bind(&heap_number); |
| 469 __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, | 286 __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, miss_label, |
| 470 miss_label, DONT_DO_SMI_CHECK); | 287 DONT_DO_SMI_CHECK); |
| 471 __ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); | 288 __ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); |
| 472 | 289 |
| 473 __ bind(&do_store); | 290 __ bind(&do_store); |
| 474 __ vstr(d0, FieldMemOperand(storage_reg, HeapNumber::kValueOffset)); | 291 __ vstr(d0, FieldMemOperand(storage_reg, HeapNumber::kValueOffset)); |
| 475 } | 292 } |
| 476 | 293 |
| 477 // Stub never generated for objects that require access checks. | 294 // Stub never generated for objects that require access checks. |
| 478 DCHECK(!transition->is_access_check_needed()); | 295 DCHECK(!transition->is_access_check_needed()); |
| 479 | 296 |
| 480 // Perform map transition for the receiver if necessary. | 297 // Perform map transition for the receiver if necessary. |
| 481 if (details.type() == FIELD && | 298 if (details.type() == FIELD && |
| 482 Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) { | 299 Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) { |
| 483 // The properties must be extended before we can store the value. | 300 // The properties must be extended before we can store the value. |
| 484 // We jump to a runtime call that extends the properties array. | 301 // We jump to a runtime call that extends the properties array. |
| 485 __ push(receiver_reg); | 302 __ push(receiver_reg); |
| 486 __ mov(r2, Operand(transition)); | 303 __ mov(r2, Operand(transition)); |
| 487 __ Push(r2, r0); | 304 __ Push(r2, r0); |
| 488 __ TailCallExternalReference( | 305 __ TailCallExternalReference( |
| 489 ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage), | 306 ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage), |
| 490 isolate()), | 307 isolate()), |
| 491 3, 1); | 308 3, 1); |
| 492 return; | 309 return; |
| 493 } | 310 } |
| 494 | 311 |
| 495 // Update the map of the object. | 312 // Update the map of the object. |
| 496 __ mov(scratch1, Operand(transition)); | 313 __ mov(scratch1, Operand(transition)); |
| 497 __ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset)); | 314 __ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset)); |
| 498 | 315 |
| 499 // Update the write barrier for the map field. | 316 // Update the write barrier for the map field. |
| 500 __ RecordWriteField(receiver_reg, | 317 __ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2, |
| 501 HeapObject::kMapOffset, | 318 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, |
| 502 scratch1, | |
| 503 scratch2, | |
| 504 kLRHasNotBeenSaved, | |
| 505 kDontSaveFPRegs, | |
| 506 OMIT_REMEMBERED_SET, | |
| 507 OMIT_SMI_CHECK); | 319 OMIT_SMI_CHECK); |
| 508 | 320 |
| 509 if (details.type() == CONSTANT) { | 321 if (details.type() == CONSTANT) { |
| 510 DCHECK(value_reg.is(r0)); | 322 DCHECK(value_reg.is(r0)); |
| 511 __ Ret(); | 323 __ Ret(); |
| 512 return; | 324 return; |
| 513 } | 325 } |
| 514 | 326 |
| 515 int index = transition->instance_descriptors()->GetFieldIndex( | 327 int index = transition->instance_descriptors()->GetFieldIndex( |
| 516 transition->LastAdded()); | 328 transition->LastAdded()); |
| 517 | 329 |
| 518 // Adjust for the number of properties stored in the object. Even in the | 330 // Adjust for the number of properties stored in the object. Even in the |
| 519 // face of a transition we can use the old map here because the size of the | 331 // face of a transition we can use the old map here because the size of the |
| 520 // object and the number of in-object properties is not going to change. | 332 // object and the number of in-object properties is not going to change. |
| 521 index -= transition->inobject_properties(); | 333 index -= transition->inobject_properties(); |
| 522 | 334 |
| 523 // TODO(verwaest): Share this code as a code stub. | 335 // TODO(verwaest): Share this code as a code stub. |
| 524 SmiCheck smi_check = representation.IsTagged() | 336 SmiCheck smi_check = |
| 525 ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; | 337 representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; |
| 526 if (index < 0) { | 338 if (index < 0) { |
| 527 // Set the property straight into the object. | 339 // Set the property straight into the object. |
| 528 int offset = transition->instance_size() + (index * kPointerSize); | 340 int offset = transition->instance_size() + (index * kPointerSize); |
| 529 if (representation.IsDouble()) { | 341 if (representation.IsDouble()) { |
| 530 __ str(storage_reg, FieldMemOperand(receiver_reg, offset)); | 342 __ str(storage_reg, FieldMemOperand(receiver_reg, offset)); |
| 531 } else { | 343 } else { |
| 532 __ str(value_reg, FieldMemOperand(receiver_reg, offset)); | 344 __ str(value_reg, FieldMemOperand(receiver_reg, offset)); |
| 533 } | 345 } |
| 534 | 346 |
| 535 if (!representation.IsSmi()) { | 347 if (!representation.IsSmi()) { |
| 536 // Update the write barrier for the array address. | 348 // Update the write barrier for the array address. |
| 537 if (!representation.IsDouble()) { | 349 if (!representation.IsDouble()) { |
| 538 __ mov(storage_reg, value_reg); | 350 __ mov(storage_reg, value_reg); |
| 539 } | 351 } |
| 540 __ RecordWriteField(receiver_reg, | 352 __ RecordWriteField(receiver_reg, offset, storage_reg, scratch1, |
| 541 offset, | 353 kLRHasNotBeenSaved, kDontSaveFPRegs, |
| 542 storage_reg, | 354 EMIT_REMEMBERED_SET, smi_check); |
| 543 scratch1, | |
| 544 kLRHasNotBeenSaved, | |
| 545 kDontSaveFPRegs, | |
| 546 EMIT_REMEMBERED_SET, | |
| 547 smi_check); | |
| 548 } | 355 } |
| 549 } else { | 356 } else { |
| 550 // Write to the properties array. | 357 // Write to the properties array. |
| 551 int offset = index * kPointerSize + FixedArray::kHeaderSize; | 358 int offset = index * kPointerSize + FixedArray::kHeaderSize; |
| 552 // Get the properties array | 359 // Get the properties array |
| 553 __ ldr(scratch1, | 360 __ ldr(scratch1, |
| 554 FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); | 361 FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); |
| 555 if (representation.IsDouble()) { | 362 if (representation.IsDouble()) { |
| 556 __ str(storage_reg, FieldMemOperand(scratch1, offset)); | 363 __ str(storage_reg, FieldMemOperand(scratch1, offset)); |
| 557 } else { | 364 } else { |
| 558 __ str(value_reg, FieldMemOperand(scratch1, offset)); | 365 __ str(value_reg, FieldMemOperand(scratch1, offset)); |
| 559 } | 366 } |
| 560 | 367 |
| 561 if (!representation.IsSmi()) { | 368 if (!representation.IsSmi()) { |
| 562 // Update the write barrier for the array address. | 369 // Update the write barrier for the array address. |
| 563 if (!representation.IsDouble()) { | 370 if (!representation.IsDouble()) { |
| 564 __ mov(storage_reg, value_reg); | 371 __ mov(storage_reg, value_reg); |
| 565 } | 372 } |
| 566 __ RecordWriteField(scratch1, | 373 __ RecordWriteField(scratch1, offset, storage_reg, receiver_reg, |
| 567 offset, | 374 kLRHasNotBeenSaved, kDontSaveFPRegs, |
| 568 storage_reg, | 375 EMIT_REMEMBERED_SET, smi_check); |
| 569 receiver_reg, | |
| 570 kLRHasNotBeenSaved, | |
| 571 kDontSaveFPRegs, | |
| 572 EMIT_REMEMBERED_SET, | |
| 573 smi_check); | |
| 574 } | 376 } |
| 575 } | 377 } |
| 576 | 378 |
| 577 // Return the value (register r0). | 379 // Return the value (register r0). |
| 578 DCHECK(value_reg.is(r0)); | 380 DCHECK(value_reg.is(r0)); |
| 579 __ bind(&exit); | 381 __ bind(&exit); |
| 580 __ Ret(); | 382 __ Ret(); |
| 581 } | 383 } |
| 582 | 384 |
| 583 | 385 |
| (...skipping 23 matching lines...) Expand all Loading... |
| 607 | 409 |
| 608 | 410 |
| 609 Register PropertyHandlerCompiler::CheckPrototypes( | 411 Register PropertyHandlerCompiler::CheckPrototypes( |
| 610 Register object_reg, Register holder_reg, Register scratch1, | 412 Register object_reg, Register holder_reg, Register scratch1, |
| 611 Register scratch2, Handle<Name> name, Label* miss, | 413 Register scratch2, Handle<Name> name, Label* miss, |
| 612 PrototypeCheckType check) { | 414 PrototypeCheckType check) { |
| 613 Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate())); | 415 Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate())); |
| 614 | 416 |
| 615 // Make sure there's no overlap between holder and object registers. | 417 // Make sure there's no overlap between holder and object registers. |
| 616 DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg)); | 418 DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg)); |
| 617 DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) | 419 DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) && |
| 618 && !scratch2.is(scratch1)); | 420 !scratch2.is(scratch1)); |
| 619 | 421 |
| 620 // Keep track of the current object in register reg. | 422 // Keep track of the current object in register reg. |
| 621 Register reg = object_reg; | 423 Register reg = object_reg; |
| 622 int depth = 0; | 424 int depth = 0; |
| 623 | 425 |
| 624 Handle<JSObject> current = Handle<JSObject>::null(); | 426 Handle<JSObject> current = Handle<JSObject>::null(); |
| 625 if (type()->IsConstant()) { | 427 if (type()->IsConstant()) { |
| 626 current = Handle<JSObject>::cast(type()->AsConstant()->Value()); | 428 current = Handle<JSObject>::cast(type()->AsConstant()->Value()); |
| 627 } | 429 } |
| 628 Handle<JSObject> prototype = Handle<JSObject>::null(); | 430 Handle<JSObject> prototype = Handle<JSObject>::null(); |
| (...skipping 12 matching lines...) Expand all Loading... |
| 641 prototype = handle(JSObject::cast(current_map->prototype())); | 443 prototype = handle(JSObject::cast(current_map->prototype())); |
| 642 if (current_map->is_dictionary_map() && | 444 if (current_map->is_dictionary_map() && |
| 643 !current_map->IsJSGlobalObjectMap()) { | 445 !current_map->IsJSGlobalObjectMap()) { |
| 644 DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast. | 446 DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast. |
| 645 if (!name->IsUniqueName()) { | 447 if (!name->IsUniqueName()) { |
| 646 DCHECK(name->IsString()); | 448 DCHECK(name->IsString()); |
| 647 name = factory()->InternalizeString(Handle<String>::cast(name)); | 449 name = factory()->InternalizeString(Handle<String>::cast(name)); |
| 648 } | 450 } |
| 649 DCHECK(current.is_null() || | 451 DCHECK(current.is_null() || |
| 650 current->property_dictionary()->FindEntry(name) == | 452 current->property_dictionary()->FindEntry(name) == |
| 651 NameDictionary::kNotFound); | 453 NameDictionary::kNotFound); |
| 652 | 454 |
| 653 GenerateDictionaryNegativeLookup(masm(), miss, reg, name, | 455 GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1, |
| 654 scratch1, scratch2); | 456 scratch2); |
| 655 | 457 |
| 656 __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); | 458 __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); |
| 657 reg = holder_reg; // From now on the object will be in holder_reg. | 459 reg = holder_reg; // From now on the object will be in holder_reg. |
| 658 __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset)); | 460 __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset)); |
| 659 } else { | 461 } else { |
| 660 Register map_reg = scratch1; | 462 Register map_reg = scratch1; |
| 661 if (depth != 1 || check == CHECK_ALL_MAPS) { | 463 if (depth != 1 || check == CHECK_ALL_MAPS) { |
| 662 // CheckMap implicitly loads the map of |reg| into |map_reg|. | 464 // CheckMap implicitly loads the map of |reg| into |map_reg|. |
| 663 __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK); | 465 __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK); |
| 664 } else { | 466 } else { |
| 665 __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); | 467 __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); |
| 666 } | 468 } |
| 667 | 469 |
| 668 // Check access rights to the global object. This has to happen after | 470 // Check access rights to the global object. This has to happen after |
| 669 // the map check so that we know that the object is actually a global | 471 // the map check so that we know that the object is actually a global |
| 670 // object. | 472 // object. |
| 671 // This allows us to install generated handlers for accesses to the | 473 // This allows us to install generated handlers for accesses to the |
| 672 // global proxy (as opposed to using slow ICs). See corresponding code | 474 // global proxy (as opposed to using slow ICs). See corresponding code |
| 673 // in LookupForRead(). | 475 // in LookupForRead(). |
| 674 if (current_map->IsJSGlobalProxyMap()) { | 476 if (current_map->IsJSGlobalProxyMap()) { |
| 675 __ CheckAccessGlobalProxy(reg, scratch2, miss); | 477 __ CheckAccessGlobalProxy(reg, scratch2, miss); |
| 676 } else if (current_map->IsJSGlobalObjectMap()) { | 478 } else if (current_map->IsJSGlobalObjectMap()) { |
| 677 GenerateCheckPropertyCell( | 479 GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current), |
| 678 masm(), Handle<JSGlobalObject>::cast(current), name, | 480 name, scratch2, miss); |
| 679 scratch2, miss); | |
| 680 } | 481 } |
| 681 | 482 |
| 682 reg = holder_reg; // From now on the object will be in holder_reg. | 483 reg = holder_reg; // From now on the object will be in holder_reg. |
| 683 | 484 |
| 684 // Two possible reasons for loading the prototype from the map: | 485 // Two possible reasons for loading the prototype from the map: |
| 685 // (1) Can't store references to new space in code. | 486 // (1) Can't store references to new space in code. |
| 686 // (2) Handler is shared for all receivers with the same prototype | 487 // (2) Handler is shared for all receivers with the same prototype |
| 687 // map (but not necessarily the same prototype instance). | 488 // map (but not necessarily the same prototype instance). |
| 688 bool load_prototype_from_map = | 489 bool load_prototype_from_map = |
| 689 heap()->InNewSpace(*prototype) || depth == 1; | 490 heap()->InNewSpace(*prototype) || depth == 1; |
| (...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 758 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3); | 559 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3); |
| 759 STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4); | 560 STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4); |
| 760 STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5); | 561 STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5); |
| 761 STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6); | 562 STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6); |
| 762 DCHECK(!scratch2().is(reg)); | 563 DCHECK(!scratch2().is(reg)); |
| 763 DCHECK(!scratch3().is(reg)); | 564 DCHECK(!scratch3().is(reg)); |
| 764 DCHECK(!scratch4().is(reg)); | 565 DCHECK(!scratch4().is(reg)); |
| 765 __ push(receiver()); | 566 __ push(receiver()); |
| 766 if (heap()->InNewSpace(callback->data())) { | 567 if (heap()->InNewSpace(callback->data())) { |
| 767 __ Move(scratch3(), callback); | 568 __ Move(scratch3(), callback); |
| 768 __ ldr(scratch3(), FieldMemOperand(scratch3(), | 569 __ ldr(scratch3(), |
| 769 ExecutableAccessorInfo::kDataOffset)); | 570 FieldMemOperand(scratch3(), ExecutableAccessorInfo::kDataOffset)); |
| 770 } else { | 571 } else { |
| 771 __ Move(scratch3(), Handle<Object>(callback->data(), isolate())); | 572 __ Move(scratch3(), Handle<Object>(callback->data(), isolate())); |
| 772 } | 573 } |
| 773 __ push(scratch3()); | 574 __ push(scratch3()); |
| 774 __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex); | 575 __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex); |
| 775 __ mov(scratch4(), scratch3()); | 576 __ mov(scratch4(), scratch3()); |
| 776 __ Push(scratch3(), scratch4()); | 577 __ Push(scratch3(), scratch4()); |
| 777 __ mov(scratch4(), | 578 __ mov(scratch4(), Operand(ExternalReference::isolate_address(isolate()))); |
| 778 Operand(ExternalReference::isolate_address(isolate()))); | |
| 779 __ Push(scratch4(), reg); | 579 __ Push(scratch4(), reg); |
| 780 __ mov(scratch2(), sp); // scratch2 = PropertyAccessorInfo::args_ | 580 __ mov(scratch2(), sp); // scratch2 = PropertyAccessorInfo::args_ |
| 781 __ push(name()); | 581 __ push(name()); |
| 782 | 582 |
| 783 // Abi for CallApiGetter | 583 // Abi for CallApiGetter |
| 784 Register getter_address_reg = r2; | 584 Register getter_address_reg = r2; |
| 785 | 585 |
| 786 Address getter_address = v8::ToCData<Address>(callback->getter()); | 586 Address getter_address = v8::ToCData<Address>(callback->getter()); |
| 787 ApiFunction fun(getter_address); | 587 ApiFunction fun(getter_address); |
| 788 ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL; | 588 ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL; |
| (...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 908 if (!setter.is_null()) { | 708 if (!setter.is_null()) { |
| 909 // Call the JavaScript setter with receiver and value on the stack. | 709 // Call the JavaScript setter with receiver and value on the stack. |
| 910 if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { | 710 if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { |
| 911 // Swap in the global receiver. | 711 // Swap in the global receiver. |
| 912 __ ldr(receiver, | 712 __ ldr(receiver, |
| 913 FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset)); | 713 FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset)); |
| 914 } | 714 } |
| 915 __ Push(receiver, value()); | 715 __ Push(receiver, value()); |
| 916 ParameterCount actual(1); | 716 ParameterCount actual(1); |
| 917 ParameterCount expected(setter); | 717 ParameterCount expected(setter); |
| 918 __ InvokeFunction(setter, expected, actual, | 718 __ InvokeFunction(setter, expected, actual, CALL_FUNCTION, |
| 919 CALL_FUNCTION, NullCallWrapper()); | 719 NullCallWrapper()); |
| 920 } else { | 720 } else { |
| 921 // If we generate a global code snippet for deoptimization only, remember | 721 // If we generate a global code snippet for deoptimization only, remember |
| 922 // the place to continue after deoptimization. | 722 // the place to continue after deoptimization. |
| 923 masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset()); | 723 masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset()); |
| 924 } | 724 } |
| 925 | 725 |
| 926 // We have to return the passed value, not the return value of the setter. | 726 // We have to return the passed value, not the return value of the setter. |
| 927 __ pop(r0); | 727 __ pop(r0); |
| 928 | 728 |
| 929 // Restore context register. | 729 // Restore context register. |
| (...skipping 18 matching lines...) Expand all Loading... |
| 948 | 748 |
| 949 // Return the generated code. | 749 // Return the generated code. |
| 950 return GetCode(kind(), Code::FAST, name); | 750 return GetCode(kind(), Code::FAST, name); |
| 951 } | 751 } |
| 952 | 752 |
| 953 | 753 |
| 954 Register* PropertyAccessCompiler::load_calling_convention() { | 754 Register* PropertyAccessCompiler::load_calling_convention() { |
| 955 // receiver, name, scratch1, scratch2, scratch3, scratch4. | 755 // receiver, name, scratch1, scratch2, scratch3, scratch4. |
| 956 Register receiver = LoadIC::ReceiverRegister(); | 756 Register receiver = LoadIC::ReceiverRegister(); |
| 957 Register name = LoadIC::NameRegister(); | 757 Register name = LoadIC::NameRegister(); |
| 958 static Register registers[] = { receiver, name, r3, r0, r4, r5 }; | 758 static Register registers[] = {receiver, name, r3, r0, r4, r5}; |
| 959 return registers; | 759 return registers; |
| 960 } | 760 } |
| 961 | 761 |
| 962 | 762 |
| 963 Register* PropertyAccessCompiler::store_calling_convention() { | 763 Register* PropertyAccessCompiler::store_calling_convention() { |
| 964 // receiver, name, scratch1, scratch2, scratch3. | 764 // receiver, name, scratch1, scratch2, scratch3. |
| 965 Register receiver = StoreIC::ReceiverRegister(); | 765 Register receiver = StoreIC::ReceiverRegister(); |
| 966 Register name = StoreIC::NameRegister(); | 766 Register name = StoreIC::NameRegister(); |
| 967 DCHECK(r3.is(KeyedStoreIC::MapRegister())); | 767 DCHECK(r3.is(KeyedStoreIC::MapRegister())); |
| 968 static Register registers[] = { receiver, name, r3, r4, r5 }; | 768 static Register registers[] = {receiver, name, r3, r4, r5}; |
| 969 return registers; | 769 return registers; |
| 970 } | 770 } |
| 971 | 771 |
| 972 | 772 |
| 973 Register NamedStoreHandlerCompiler::value() { return StoreIC::ValueRegister(); } | 773 Register NamedStoreHandlerCompiler::value() { return StoreIC::ValueRegister(); } |
| 974 | 774 |
| 975 | 775 |
| 976 #undef __ | 776 #undef __ |
| 977 #define __ ACCESS_MASM(masm) | 777 #define __ ACCESS_MASM(masm) |
| 978 | 778 |
| (...skipping 12 matching lines...) Expand all Loading... |
| 991 if (!getter.is_null()) { | 791 if (!getter.is_null()) { |
| 992 // Call the JavaScript getter with the receiver on the stack. | 792 // Call the JavaScript getter with the receiver on the stack. |
| 993 if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { | 793 if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { |
| 994 // Swap in the global receiver. | 794 // Swap in the global receiver. |
| 995 __ ldr(receiver, | 795 __ ldr(receiver, |
| 996 FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset)); | 796 FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset)); |
| 997 } | 797 } |
| 998 __ push(receiver); | 798 __ push(receiver); |
| 999 ParameterCount actual(0); | 799 ParameterCount actual(0); |
| 1000 ParameterCount expected(getter); | 800 ParameterCount expected(getter); |
| 1001 __ InvokeFunction(getter, expected, actual, | 801 __ InvokeFunction(getter, expected, actual, CALL_FUNCTION, |
| 1002 CALL_FUNCTION, NullCallWrapper()); | 802 NullCallWrapper()); |
| 1003 } else { | 803 } else { |
| 1004 // If we generate a global code snippet for deoptimization only, remember | 804 // If we generate a global code snippet for deoptimization only, remember |
| 1005 // the place to continue after deoptimization. | 805 // the place to continue after deoptimization. |
| 1006 masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset()); | 806 masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset()); |
| 1007 } | 807 } |
| 1008 | 808 |
| 1009 // Restore context register. | 809 // Restore context register. |
| 1010 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | 810 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 1011 } | 811 } |
| 1012 __ Ret(); | 812 __ Ret(); |
| (...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1146 DCHECK(receiver.is(r1)); | 946 DCHECK(receiver.is(r1)); |
| 1147 DCHECK(key.is(r2)); | 947 DCHECK(key.is(r2)); |
| 1148 | 948 |
| 1149 __ UntagAndJumpIfNotSmi(r6, key, &miss); | 949 __ UntagAndJumpIfNotSmi(r6, key, &miss); |
| 1150 __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 950 __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 1151 __ LoadFromNumberDictionary(&slow, r4, key, r0, r6, r3, r5); | 951 __ LoadFromNumberDictionary(&slow, r4, key, r0, r6, r3, r5); |
| 1152 __ Ret(); | 952 __ Ret(); |
| 1153 | 953 |
| 1154 __ bind(&slow); | 954 __ bind(&slow); |
| 1155 __ IncrementCounter( | 955 __ IncrementCounter( |
| 1156 masm->isolate()->counters()->keyed_load_external_array_slow(), | 956 masm->isolate()->counters()->keyed_load_external_array_slow(), 1, r2, r3); |
| 1157 1, r2, r3); | |
| 1158 | 957 |
| 1159 TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow); | 958 TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow); |
| 1160 | 959 |
| 1161 // Miss case, call the runtime. | 960 // Miss case, call the runtime. |
| 1162 __ bind(&miss); | 961 __ bind(&miss); |
| 1163 | 962 |
| 1164 TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss); | 963 TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss); |
| 1165 } | 964 } |
| 1166 | 965 |
| 1167 | 966 |
| 1168 #undef __ | 967 #undef __ |
| 1169 | 968 } |
| 1170 } } // namespace v8::internal | 969 } // namespace v8::internal |
| 1171 | 970 |
| 1172 #endif // V8_TARGET_ARCH_ARM | 971 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |