Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(319)

Side by Side Diff: src/ic/arm64/ic-compiler-arm64.cc

Issue 483683005: Move IC code into a subdir and move ic-compilation related code from stub-cache into ic-compiler (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Fix BUILD.gn Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ic/arm64/ic-arm64.cc ('k') | src/ic/arm64/stub-cache-arm64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #if V8_TARGET_ARCH_ARM64 7 #if V8_TARGET_ARCH_ARM64
8 8
9 #include "src/codegen.h" 9 #include "src/ic/ic-compiler.h"
10 #include "src/ic-inl.h"
11 #include "src/stub-cache.h"
12 10
13 namespace v8 { 11 namespace v8 {
14 namespace internal { 12 namespace internal {
15 13
16
17 #define __ ACCESS_MASM(masm) 14 #define __ ACCESS_MASM(masm)
18 15
19 16
20 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( 17 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
21 MacroAssembler* masm, Label* miss_label, Register receiver, 18 MacroAssembler* masm, Label* miss_label, Register receiver,
22 Handle<Name> name, Register scratch0, Register scratch1) { 19 Handle<Name> name, Register scratch0, Register scratch1) {
23 DCHECK(!AreAliased(receiver, scratch0, scratch1)); 20 DCHECK(!AreAliased(receiver, scratch0, scratch1));
24 DCHECK(name->IsUniqueName()); 21 DCHECK(name->IsUniqueName());
25 Counters* counters = masm->isolate()->counters(); 22 Counters* counters = masm->isolate()->counters();
26 __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1); 23 __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
(...skipping 16 matching lines...) Expand all
43 __ Cmp(scratch0, FIRST_SPEC_OBJECT_TYPE); 40 __ Cmp(scratch0, FIRST_SPEC_OBJECT_TYPE);
44 __ B(lt, miss_label); 41 __ B(lt, miss_label);
45 42
46 // Load properties array. 43 // Load properties array.
47 Register properties = scratch0; 44 Register properties = scratch0;
48 __ Ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); 45 __ Ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
49 // Check that the properties array is a dictionary. 46 // Check that the properties array is a dictionary.
50 __ Ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset)); 47 __ Ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
51 __ JumpIfNotRoot(map, Heap::kHashTableMapRootIndex, miss_label); 48 __ JumpIfNotRoot(map, Heap::kHashTableMapRootIndex, miss_label);
52 49
53 NameDictionaryLookupStub::GenerateNegativeLookup(masm, 50 NameDictionaryLookupStub::GenerateNegativeLookup(
54 miss_label, 51 masm, miss_label, &done, receiver, properties, name, scratch1);
55 &done,
56 receiver,
57 properties,
58 name,
59 scratch1);
60 __ Bind(&done); 52 __ Bind(&done);
61 __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); 53 __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
62 } 54 }
63 55
64 56
65 // Probe primary or secondary table.
66 // If the entry is found in the cache, the generated code jump to the first
67 // instruction of the stub in the cache.
68 // If there is a miss the code fall trough.
69 //
70 // 'receiver', 'name' and 'offset' registers are preserved on miss.
71 static void ProbeTable(Isolate* isolate,
72 MacroAssembler* masm,
73 Code::Flags flags,
74 StubCache::Table table,
75 Register receiver,
76 Register name,
77 Register offset,
78 Register scratch,
79 Register scratch2,
80 Register scratch3) {
81 // Some code below relies on the fact that the Entry struct contains
82 // 3 pointers (name, code, map).
83 STATIC_ASSERT(sizeof(StubCache::Entry) == (3 * kPointerSize));
84
85 ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
86 ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
87 ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
88
89 uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
90 uintptr_t value_off_addr =
91 reinterpret_cast<uintptr_t>(value_offset.address());
92 uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
93
94 Label miss;
95
96 DCHECK(!AreAliased(name, offset, scratch, scratch2, scratch3));
97
98 // Multiply by 3 because there are 3 fields per entry.
99 __ Add(scratch3, offset, Operand(offset, LSL, 1));
100
101 // Calculate the base address of the entry.
102 __ Mov(scratch, key_offset);
103 __ Add(scratch, scratch, Operand(scratch3, LSL, kPointerSizeLog2));
104
105 // Check that the key in the entry matches the name.
106 __ Ldr(scratch2, MemOperand(scratch));
107 __ Cmp(name, scratch2);
108 __ B(ne, &miss);
109
110 // Check the map matches.
111 __ Ldr(scratch2, MemOperand(scratch, map_off_addr - key_off_addr));
112 __ Ldr(scratch3, FieldMemOperand(receiver, HeapObject::kMapOffset));
113 __ Cmp(scratch2, scratch3);
114 __ B(ne, &miss);
115
116 // Get the code entry from the cache.
117 __ Ldr(scratch, MemOperand(scratch, value_off_addr - key_off_addr));
118
119 // Check that the flags match what we're looking for.
120 __ Ldr(scratch2.W(), FieldMemOperand(scratch, Code::kFlagsOffset));
121 __ Bic(scratch2.W(), scratch2.W(), Code::kFlagsNotUsedInLookup);
122 __ Cmp(scratch2.W(), flags);
123 __ B(ne, &miss);
124
125 #ifdef DEBUG
126 if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
127 __ B(&miss);
128 } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
129 __ B(&miss);
130 }
131 #endif
132
133 // Jump to the first instruction in the code stub.
134 __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
135 __ Br(scratch);
136
137 // Miss: fall through.
138 __ Bind(&miss);
139 }
140
141
142 void StubCache::GenerateProbe(MacroAssembler* masm,
143 Code::Flags flags,
144 Register receiver,
145 Register name,
146 Register scratch,
147 Register extra,
148 Register extra2,
149 Register extra3) {
150 Isolate* isolate = masm->isolate();
151 Label miss;
152
153 // Make sure the flags does not name a specific type.
154 DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
155
156 // Make sure that there are no register conflicts.
157 DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
158
159 // Make sure extra and extra2 registers are valid.
160 DCHECK(!extra.is(no_reg));
161 DCHECK(!extra2.is(no_reg));
162 DCHECK(!extra3.is(no_reg));
163
164 Counters* counters = masm->isolate()->counters();
165 __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
166 extra2, extra3);
167
168 // Check that the receiver isn't a smi.
169 __ JumpIfSmi(receiver, &miss);
170
171 // Compute the hash for primary table.
172 __ Ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
173 __ Ldr(extra, FieldMemOperand(receiver, HeapObject::kMapOffset));
174 __ Add(scratch, scratch, extra);
175 __ Eor(scratch, scratch, flags);
176 // We shift out the last two bits because they are not part of the hash.
177 __ Ubfx(scratch, scratch, kCacheIndexShift,
178 CountTrailingZeros(kPrimaryTableSize, 64));
179
180 // Probe the primary table.
181 ProbeTable(isolate, masm, flags, kPrimary, receiver, name,
182 scratch, extra, extra2, extra3);
183
184 // Primary miss: Compute hash for secondary table.
185 __ Sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
186 __ Add(scratch, scratch, flags >> kCacheIndexShift);
187 __ And(scratch, scratch, kSecondaryTableSize - 1);
188
189 // Probe the secondary table.
190 ProbeTable(isolate, masm, flags, kSecondary, receiver, name,
191 scratch, extra, extra2, extra3);
192
193 // Cache miss: Fall-through and let caller handle the miss by
194 // entering the runtime system.
195 __ Bind(&miss);
196 __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
197 extra2, extra3);
198 }
199
200
201 void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype( 57 void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
202 MacroAssembler* masm, int index, Register prototype, Label* miss) { 58 MacroAssembler* masm, int index, Register prototype, Label* miss) {
203 Isolate* isolate = masm->isolate(); 59 Isolate* isolate = masm->isolate();
204 // Get the global function with the given index. 60 // Get the global function with the given index.
205 Handle<JSFunction> function( 61 Handle<JSFunction> function(
206 JSFunction::cast(isolate->native_context()->get(index))); 62 JSFunction::cast(isolate->native_context()->get(index)));
207 63
208 // Check we're still in the same context. 64 // Check we're still in the same context.
209 Register scratch = prototype; 65 Register scratch = prototype;
210 __ Ldr(scratch, GlobalObjectMemOperand()); 66 __ Ldr(scratch, GlobalObjectMemOperand());
(...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after
371 // Generate StoreTransition code, value is passed in x0 register. 227 // Generate StoreTransition code, value is passed in x0 register.
372 // When leaving generated code after success, the receiver_reg and storage_reg 228 // When leaving generated code after success, the receiver_reg and storage_reg
373 // may be clobbered. Upon branch to miss_label, the receiver and name registers 229 // may be clobbered. Upon branch to miss_label, the receiver and name registers
374 // have their original values. 230 // have their original values.
375 void NamedStoreHandlerCompiler::GenerateStoreTransition( 231 void NamedStoreHandlerCompiler::GenerateStoreTransition(
376 Handle<Map> transition, Handle<Name> name, Register receiver_reg, 232 Handle<Map> transition, Handle<Name> name, Register receiver_reg,
377 Register storage_reg, Register value_reg, Register scratch1, 233 Register storage_reg, Register value_reg, Register scratch1,
378 Register scratch2, Register scratch3, Label* miss_label, Label* slow) { 234 Register scratch2, Register scratch3, Label* miss_label, Label* slow) {
379 Label exit; 235 Label exit;
380 236
381 DCHECK(!AreAliased(receiver_reg, storage_reg, value_reg, 237 DCHECK(!AreAliased(receiver_reg, storage_reg, value_reg, scratch1, scratch2,
382 scratch1, scratch2, scratch3)); 238 scratch3));
383 239
384 // We don't need scratch3. 240 // We don't need scratch3.
385 scratch3 = NoReg; 241 scratch3 = NoReg;
386 242
387 int descriptor = transition->LastAdded(); 243 int descriptor = transition->LastAdded();
388 DescriptorArray* descriptors = transition->instance_descriptors(); 244 DescriptorArray* descriptors = transition->instance_descriptors();
389 PropertyDetails details = descriptors->GetDetails(descriptor); 245 PropertyDetails details = descriptors->GetDetails(descriptor);
390 Representation representation = details.representation(); 246 Representation representation = details.representation();
391 DCHECK(!representation.IsNone()); 247 DCHECK(!representation.IsNone());
392 248
(...skipping 23 matching lines...) Expand all
416 __ Bind(&do_store); 272 __ Bind(&do_store);
417 } 273 }
418 } else if (representation.IsDouble()) { 274 } else if (representation.IsDouble()) {
419 UseScratchRegisterScope temps(masm()); 275 UseScratchRegisterScope temps(masm());
420 DoubleRegister temp_double = temps.AcquireD(); 276 DoubleRegister temp_double = temps.AcquireD();
421 __ SmiUntagToDouble(temp_double, value_reg, kSpeculativeUntag); 277 __ SmiUntagToDouble(temp_double, value_reg, kSpeculativeUntag);
422 278
423 Label do_store; 279 Label do_store;
424 __ JumpIfSmi(value_reg, &do_store); 280 __ JumpIfSmi(value_reg, &do_store);
425 281
426 __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, 282 __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, miss_label,
427 miss_label, DONT_DO_SMI_CHECK); 283 DONT_DO_SMI_CHECK);
428 __ Ldr(temp_double, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); 284 __ Ldr(temp_double, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
429 285
430 __ Bind(&do_store); 286 __ Bind(&do_store);
431 __ AllocateHeapNumber(storage_reg, slow, scratch1, scratch2, temp_double, 287 __ AllocateHeapNumber(storage_reg, slow, scratch1, scratch2, temp_double,
432 NoReg, MUTABLE); 288 NoReg, MUTABLE);
433 } 289 }
434 290
435 // Stub never generated for objects that require access checks. 291 // Stub never generated for objects that require access checks.
436 DCHECK(!transition->is_access_check_needed()); 292 DCHECK(!transition->is_access_check_needed());
437 293
438 // Perform map transition for the receiver if necessary. 294 // Perform map transition for the receiver if necessary.
439 if (details.type() == FIELD && 295 if (details.type() == FIELD &&
440 Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) { 296 Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) {
441 // The properties must be extended before we can store the value. 297 // The properties must be extended before we can store the value.
442 // We jump to a runtime call that extends the properties array. 298 // We jump to a runtime call that extends the properties array.
443 __ Mov(scratch1, Operand(transition)); 299 __ Mov(scratch1, Operand(transition));
444 __ Push(receiver_reg, scratch1, value_reg); 300 __ Push(receiver_reg, scratch1, value_reg);
445 __ TailCallExternalReference( 301 __ TailCallExternalReference(
446 ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage), 302 ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
447 isolate()), 303 isolate()),
448 3, 1); 304 3, 1);
449 return; 305 return;
450 } 306 }
451 307
452 // Update the map of the object. 308 // Update the map of the object.
453 __ Mov(scratch1, Operand(transition)); 309 __ Mov(scratch1, Operand(transition));
454 __ Str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset)); 310 __ Str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
455 311
456 // Update the write barrier for the map field. 312 // Update the write barrier for the map field.
457 __ RecordWriteField(receiver_reg, 313 __ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2,
458 HeapObject::kMapOffset, 314 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
459 scratch1,
460 scratch2,
461 kLRHasNotBeenSaved,
462 kDontSaveFPRegs,
463 OMIT_REMEMBERED_SET,
464 OMIT_SMI_CHECK); 315 OMIT_SMI_CHECK);
465 316
466 if (details.type() == CONSTANT) { 317 if (details.type() == CONSTANT) {
467 DCHECK(value_reg.is(x0)); 318 DCHECK(value_reg.is(x0));
468 __ Ret(); 319 __ Ret();
469 return; 320 return;
470 } 321 }
471 322
472 int index = transition->instance_descriptors()->GetFieldIndex( 323 int index = transition->instance_descriptors()->GetFieldIndex(
473 transition->LastAdded()); 324 transition->LastAdded());
474 325
475 // Adjust for the number of properties stored in the object. Even in the 326 // Adjust for the number of properties stored in the object. Even in the
476 // face of a transition we can use the old map here because the size of the 327 // face of a transition we can use the old map here because the size of the
477 // object and the number of in-object properties is not going to change. 328 // object and the number of in-object properties is not going to change.
478 index -= transition->inobject_properties(); 329 index -= transition->inobject_properties();
479 330
480 // TODO(verwaest): Share this code as a code stub. 331 // TODO(verwaest): Share this code as a code stub.
481 SmiCheck smi_check = representation.IsTagged() 332 SmiCheck smi_check =
482 ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; 333 representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
483 Register prop_reg = representation.IsDouble() ? storage_reg : value_reg; 334 Register prop_reg = representation.IsDouble() ? storage_reg : value_reg;
484 if (index < 0) { 335 if (index < 0) {
485 // Set the property straight into the object. 336 // Set the property straight into the object.
486 int offset = transition->instance_size() + (index * kPointerSize); 337 int offset = transition->instance_size() + (index * kPointerSize);
487 __ Str(prop_reg, FieldMemOperand(receiver_reg, offset)); 338 __ Str(prop_reg, FieldMemOperand(receiver_reg, offset));
488 339
489 if (!representation.IsSmi()) { 340 if (!representation.IsSmi()) {
490 // Update the write barrier for the array address. 341 // Update the write barrier for the array address.
491 if (!representation.IsDouble()) { 342 if (!representation.IsDouble()) {
492 __ Mov(storage_reg, value_reg); 343 __ Mov(storage_reg, value_reg);
493 } 344 }
494 __ RecordWriteField(receiver_reg, 345 __ RecordWriteField(receiver_reg, offset, storage_reg, scratch1,
495 offset, 346 kLRHasNotBeenSaved, kDontSaveFPRegs,
496 storage_reg, 347 EMIT_REMEMBERED_SET, smi_check);
497 scratch1,
498 kLRHasNotBeenSaved,
499 kDontSaveFPRegs,
500 EMIT_REMEMBERED_SET,
501 smi_check);
502 } 348 }
503 } else { 349 } else {
504 // Write to the properties array. 350 // Write to the properties array.
505 int offset = index * kPointerSize + FixedArray::kHeaderSize; 351 int offset = index * kPointerSize + FixedArray::kHeaderSize;
506 // Get the properties array 352 // Get the properties array
507 __ Ldr(scratch1, 353 __ Ldr(scratch1,
508 FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); 354 FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
509 __ Str(prop_reg, FieldMemOperand(scratch1, offset)); 355 __ Str(prop_reg, FieldMemOperand(scratch1, offset));
510 356
511 if (!representation.IsSmi()) { 357 if (!representation.IsSmi()) {
512 // Update the write barrier for the array address. 358 // Update the write barrier for the array address.
513 if (!representation.IsDouble()) { 359 if (!representation.IsDouble()) {
514 __ Mov(storage_reg, value_reg); 360 __ Mov(storage_reg, value_reg);
515 } 361 }
516 __ RecordWriteField(scratch1, 362 __ RecordWriteField(scratch1, offset, storage_reg, receiver_reg,
517 offset, 363 kLRHasNotBeenSaved, kDontSaveFPRegs,
518 storage_reg, 364 EMIT_REMEMBERED_SET, smi_check);
519 receiver_reg,
520 kLRHasNotBeenSaved,
521 kDontSaveFPRegs,
522 EMIT_REMEMBERED_SET,
523 smi_check);
524 } 365 }
525 } 366 }
526 367
527 __ Bind(&exit); 368 __ Bind(&exit);
528 // Return the value (register x0). 369 // Return the value (register x0).
529 DCHECK(value_reg.is(x0)); 370 DCHECK(value_reg.is(x0));
530 __ Ret(); 371 __ Ret();
531 } 372 }
532 373
533 374
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
588 !current_map->is_access_check_needed()); 429 !current_map->is_access_check_needed());
589 430
590 prototype = handle(JSObject::cast(current_map->prototype())); 431 prototype = handle(JSObject::cast(current_map->prototype()));
591 if (current_map->is_dictionary_map() && 432 if (current_map->is_dictionary_map() &&
592 !current_map->IsJSGlobalObjectMap()) { 433 !current_map->IsJSGlobalObjectMap()) {
593 DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast. 434 DCHECK(!current_map->IsJSGlobalProxyMap()); // Proxy maps are fast.
594 if (!name->IsUniqueName()) { 435 if (!name->IsUniqueName()) {
595 DCHECK(name->IsString()); 436 DCHECK(name->IsString());
596 name = factory()->InternalizeString(Handle<String>::cast(name)); 437 name = factory()->InternalizeString(Handle<String>::cast(name));
597 } 438 }
598 DCHECK(current.is_null() || 439 DCHECK(current.is_null() || (current->property_dictionary()->FindEntry(
599 (current->property_dictionary()->FindEntry(name) == 440 name) == NameDictionary::kNotFound));
600 NameDictionary::kNotFound));
601 441
602 GenerateDictionaryNegativeLookup(masm(), miss, reg, name, 442 GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
603 scratch1, scratch2); 443 scratch2);
604 444
605 __ Ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); 445 __ Ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
606 reg = holder_reg; // From now on the object will be in holder_reg. 446 reg = holder_reg; // From now on the object will be in holder_reg.
607 __ Ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset)); 447 __ Ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
608 } else { 448 } else {
609 // Two possible reasons for loading the prototype from the map: 449 // Two possible reasons for loading the prototype from the map:
610 // (1) Can't store references to new space in code. 450 // (1) Can't store references to new space in code.
611 // (2) Handler is shared for all receivers with the same prototype 451 // (2) Handler is shared for all receivers with the same prototype
612 // map (but not necessarily the same prototype instance). 452 // map (but not necessarily the same prototype instance).
613 bool load_prototype_from_map = 453 bool load_prototype_from_map =
614 heap()->InNewSpace(*prototype) || depth == 1; 454 heap()->InNewSpace(*prototype) || depth == 1;
615 Register map_reg = scratch1; 455 Register map_reg = scratch1;
616 __ Ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); 456 __ Ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
617 457
618 if (depth != 1 || check == CHECK_ALL_MAPS) { 458 if (depth != 1 || check == CHECK_ALL_MAPS) {
619 __ CheckMap(map_reg, current_map, miss, DONT_DO_SMI_CHECK); 459 __ CheckMap(map_reg, current_map, miss, DONT_DO_SMI_CHECK);
620 } 460 }
621 461
622 // Check access rights to the global object. This has to happen after 462 // Check access rights to the global object. This has to happen after
623 // the map check so that we know that the object is actually a global 463 // the map check so that we know that the object is actually a global
624 // object. 464 // object.
625 // This allows us to install generated handlers for accesses to the 465 // This allows us to install generated handlers for accesses to the
626 // global proxy (as opposed to using slow ICs). See corresponding code 466 // global proxy (as opposed to using slow ICs). See corresponding code
627 // in LookupForRead(). 467 // in LookupForRead().
628 if (current_map->IsJSGlobalProxyMap()) { 468 if (current_map->IsJSGlobalProxyMap()) {
629 UseScratchRegisterScope temps(masm()); 469 UseScratchRegisterScope temps(masm());
630 __ CheckAccessGlobalProxy(reg, scratch2, temps.AcquireX(), miss); 470 __ CheckAccessGlobalProxy(reg, scratch2, temps.AcquireX(), miss);
631 } else if (current_map->IsJSGlobalObjectMap()) { 471 } else if (current_map->IsJSGlobalObjectMap()) {
632 GenerateCheckPropertyCell( 472 GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
633 masm(), Handle<JSGlobalObject>::cast(current), name, 473 name, scratch2, miss);
634 scratch2, miss);
635 } 474 }
636 475
637 reg = holder_reg; // From now on the object will be in holder_reg. 476 reg = holder_reg; // From now on the object will be in holder_reg.
638 477
639 if (load_prototype_from_map) { 478 if (load_prototype_from_map) {
640 __ Ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset)); 479 __ Ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
641 } else { 480 } else {
642 __ Mov(reg, Operand(prototype)); 481 __ Mov(reg, Operand(prototype));
643 } 482 }
644 } 483 }
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
714 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2); 553 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
715 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3); 554 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
716 STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4); 555 STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
717 STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5); 556 STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
718 STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6); 557 STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
719 558
720 __ Push(receiver()); 559 __ Push(receiver());
721 560
722 if (heap()->InNewSpace(callback->data())) { 561 if (heap()->InNewSpace(callback->data())) {
723 __ Mov(scratch3(), Operand(callback)); 562 __ Mov(scratch3(), Operand(callback));
724 __ Ldr(scratch3(), FieldMemOperand(scratch3(), 563 __ Ldr(scratch3(),
725 ExecutableAccessorInfo::kDataOffset)); 564 FieldMemOperand(scratch3(), ExecutableAccessorInfo::kDataOffset));
726 } else { 565 } else {
727 __ Mov(scratch3(), Operand(Handle<Object>(callback->data(), isolate()))); 566 __ Mov(scratch3(), Operand(Handle<Object>(callback->data(), isolate())));
728 } 567 }
729 __ LoadRoot(scratch4(), Heap::kUndefinedValueRootIndex); 568 __ LoadRoot(scratch4(), Heap::kUndefinedValueRootIndex);
730 __ Mov(scratch2(), Operand(ExternalReference::isolate_address(isolate()))); 569 __ Mov(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
731 __ Push(scratch3(), scratch4(), scratch4(), scratch2(), reg, name()); 570 __ Push(scratch3(), scratch4(), scratch4(), scratch2(), reg, name());
732 571
733 Register args_addr = scratch2(); 572 Register args_addr = scratch2();
734 __ Add(args_addr, __ StackPointer(), kPointerSize); 573 __ Add(args_addr, __ StackPointer(), kPointerSize);
735 574
(...skipping 15 matching lines...) Expand all
751 ExternalReference ref = ExternalReference(&fun, type, isolate()); 590 ExternalReference ref = ExternalReference(&fun, type, isolate());
752 __ Mov(getter_address_reg, ref); 591 __ Mov(getter_address_reg, ref);
753 592
754 CallApiGetterStub stub(isolate()); 593 CallApiGetterStub stub(isolate());
755 __ TailCallStub(&stub); 594 __ TailCallStub(&stub);
756 } 595 }
757 596
758 597
759 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup( 598 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
760 LookupIterator* it, Register holder_reg) { 599 LookupIterator* it, Register holder_reg) {
761 DCHECK(!AreAliased(receiver(), this->name(), 600 DCHECK(!AreAliased(receiver(), this->name(), scratch1(), scratch2(),
762 scratch1(), scratch2(), scratch3())); 601 scratch3()));
763 DCHECK(holder()->HasNamedInterceptor()); 602 DCHECK(holder()->HasNamedInterceptor());
764 DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined()); 603 DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
765 604
766 // Compile the interceptor call, followed by inline code to load the 605 // Compile the interceptor call, followed by inline code to load the
767 // property from further up the prototype chain if the call fails. 606 // property from further up the prototype chain if the call fails.
768 // Check that the maps haven't changed. 607 // Check that the maps haven't changed.
769 DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1())); 608 DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
770 609
771 // Preserve the receiver register explicitly whenever it is different from the 610 // Preserve the receiver register explicitly whenever it is different from the
772 // holder and it is needed should the interceptor return without any result. 611 // holder and it is needed should the interceptor return without any result.
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after
877 if (!setter.is_null()) { 716 if (!setter.is_null()) {
878 // Call the JavaScript setter with receiver and value on the stack. 717 // Call the JavaScript setter with receiver and value on the stack.
879 if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { 718 if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
880 // Swap in the global receiver. 719 // Swap in the global receiver.
881 __ Ldr(receiver, 720 __ Ldr(receiver,
882 FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset)); 721 FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
883 } 722 }
884 __ Push(receiver, value()); 723 __ Push(receiver, value());
885 ParameterCount actual(1); 724 ParameterCount actual(1);
886 ParameterCount expected(setter); 725 ParameterCount expected(setter);
887 __ InvokeFunction(setter, expected, actual, 726 __ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
888 CALL_FUNCTION, NullCallWrapper()); 727 NullCallWrapper());
889 } else { 728 } else {
890 // If we generate a global code snippet for deoptimization only, remember 729 // If we generate a global code snippet for deoptimization only, remember
891 // the place to continue after deoptimization. 730 // the place to continue after deoptimization.
892 masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset()); 731 masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
893 } 732 }
894 733
895 // We have to return the passed value, not the return value of the setter. 734 // We have to return the passed value, not the return value of the setter.
896 __ Pop(x0); 735 __ Pop(x0);
897 736
898 // Restore context register. 737 // Restore context register.
(...skipping 29 matching lines...) Expand all
928 // example, PropertyAccessCompiler::keyed_store_calling_convention()[3] (x3) is 767 // example, PropertyAccessCompiler::keyed_store_calling_convention()[3] (x3) is
929 // actually 768 // actually
930 // used for KeyedStoreCompiler::transition_map(). We should verify which 769 // used for KeyedStoreCompiler::transition_map(). We should verify which
931 // registers are actually scratch registers, and which are important. For now, 770 // registers are actually scratch registers, and which are important. For now,
932 // we use the same assignments as ARM to remain on the safe side. 771 // we use the same assignments as ARM to remain on the safe side.
933 772
934 Register* PropertyAccessCompiler::load_calling_convention() { 773 Register* PropertyAccessCompiler::load_calling_convention() {
935 // receiver, name, scratch1, scratch2, scratch3, scratch4. 774 // receiver, name, scratch1, scratch2, scratch3, scratch4.
936 Register receiver = LoadIC::ReceiverRegister(); 775 Register receiver = LoadIC::ReceiverRegister();
937 Register name = LoadIC::NameRegister(); 776 Register name = LoadIC::NameRegister();
938 static Register registers[] = { receiver, name, x3, x0, x4, x5 }; 777 static Register registers[] = {receiver, name, x3, x0, x4, x5};
939 return registers; 778 return registers;
940 } 779 }
941 780
942 781
943 Register* PropertyAccessCompiler::store_calling_convention() { 782 Register* PropertyAccessCompiler::store_calling_convention() {
944 // receiver, value, scratch1, scratch2, scratch3. 783 // receiver, value, scratch1, scratch2, scratch3.
945 Register receiver = StoreIC::ReceiverRegister(); 784 Register receiver = StoreIC::ReceiverRegister();
946 Register name = StoreIC::NameRegister(); 785 Register name = StoreIC::NameRegister();
947 DCHECK(x3.is(KeyedStoreIC::MapRegister())); 786 DCHECK(x3.is(KeyedStoreIC::MapRegister()));
948 static Register registers[] = { receiver, name, x3, x4, x5 }; 787 static Register registers[] = {receiver, name, x3, x4, x5};
949 return registers; 788 return registers;
950 } 789 }
951 790
952 791
953 Register NamedStoreHandlerCompiler::value() { return StoreIC::ValueRegister(); } 792 Register NamedStoreHandlerCompiler::value() { return StoreIC::ValueRegister(); }
954 793
955 794
956 #undef __ 795 #undef __
957 #define __ ACCESS_MASM(masm) 796 #define __ ACCESS_MASM(masm)
958 797
959 void NamedLoadHandlerCompiler::GenerateLoadViaGetter( 798 void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
960 MacroAssembler* masm, Handle<HeapType> type, Register receiver, 799 MacroAssembler* masm, Handle<HeapType> type, Register receiver,
961 Handle<JSFunction> getter) { 800 Handle<JSFunction> getter) {
962 { 801 {
963 FrameScope scope(masm, StackFrame::INTERNAL); 802 FrameScope scope(masm, StackFrame::INTERNAL);
964 803
965 if (!getter.is_null()) { 804 if (!getter.is_null()) {
966 // Call the JavaScript getter with the receiver on the stack. 805 // Call the JavaScript getter with the receiver on the stack.
967 if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { 806 if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
968 // Swap in the global receiver. 807 // Swap in the global receiver.
969 __ Ldr(receiver, 808 __ Ldr(receiver,
970 FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset)); 809 FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
971 } 810 }
972 __ Push(receiver); 811 __ Push(receiver);
973 ParameterCount actual(0); 812 ParameterCount actual(0);
974 ParameterCount expected(getter); 813 ParameterCount expected(getter);
975 __ InvokeFunction(getter, expected, actual, 814 __ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
976 CALL_FUNCTION, NullCallWrapper()); 815 NullCallWrapper());
977 } else { 816 } else {
978 // If we generate a global code snippet for deoptimization only, remember 817 // If we generate a global code snippet for deoptimization only, remember
979 // the place to continue after deoptimization. 818 // the place to continue after deoptimization.
980 masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset()); 819 masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
981 } 820 }
982 821
983 // Restore context register. 822 // Restore context register.
984 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 823 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
985 } 824 }
986 __ Ret(); 825 __ Ret();
(...skipping 141 matching lines...) Expand 10 before | Expand all | Expand 10 after
1128 __ Bind(&slow); 967 __ Bind(&slow);
1129 __ IncrementCounter( 968 __ IncrementCounter(
1130 masm->isolate()->counters()->keyed_load_external_array_slow(), 1, x4, x3); 969 masm->isolate()->counters()->keyed_load_external_array_slow(), 1, x4, x3);
1131 TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow); 970 TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
1132 971
1133 // Miss case, call the runtime. 972 // Miss case, call the runtime.
1134 __ Bind(&miss); 973 __ Bind(&miss);
1135 TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss); 974 TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
1136 } 975 }
1137 976
1138 977 #undef __
1139 } } // namespace v8::internal 978 }
979 } // namespace v8::internal
1140 980
1141 #endif // V8_TARGET_ARCH_ARM64 981 #endif // V8_TARGET_ARCH_ARM64
OLDNEW
« no previous file with comments | « src/ic/arm64/ic-arm64.cc ('k') | src/ic/arm64/stub-cache-arm64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698