Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(553)

Side by Side Diff: src/a64/ic-a64.cc

Issue 148293020: Merge experimental/a64 to bleeding_edge. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Remove ARM from OWNERS Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/a64/full-codegen-a64.cc ('k') | src/a64/instructions-a64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #if V8_TARGET_ARCH_A64
31
32 #include "a64/assembler-a64.h"
33 #include "code-stubs.h"
34 #include "codegen.h"
35 #include "disasm.h"
36 #include "ic-inl.h"
37 #include "runtime.h"
38 #include "stub-cache.h"
39
40 namespace v8 {
41 namespace internal {
42
43
44 #define __ ACCESS_MASM(masm)
45
46
47 // "type" holds an instance type on entry and is not clobbered.
48 // Generated code branch on "global_object" if type is any kind of global
49 // JS object.
50 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
51 Register type,
52 Label* global_object) {
53 __ Cmp(type, JS_GLOBAL_OBJECT_TYPE);
54 __ Ccmp(type, JS_BUILTINS_OBJECT_TYPE, ZFlag, ne);
55 __ Ccmp(type, JS_GLOBAL_PROXY_TYPE, ZFlag, ne);
56 __ B(eq, global_object);
57 }
58
59
60 // Generated code falls through if the receiver is a regular non-global
61 // JS object with slow properties and no interceptors.
62 //
63 // "receiver" holds the receiver on entry and is unchanged.
64 // "elements" holds the property dictionary on fall through.
65 static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
66 Register receiver,
67 Register elements,
68 Register scratch0,
69 Register scratch1,
70 Label* miss) {
71 ASSERT(!AreAliased(receiver, elements, scratch0, scratch1));
72
73 // Check that the receiver isn't a smi.
74 __ JumpIfSmi(receiver, miss);
75
76 // Check that the receiver is a valid JS object.
77 // Let t be the object instance type, we want:
78 // FIRST_SPEC_OBJECT_TYPE <= t <= LAST_SPEC_OBJECT_TYPE.
79 // Since LAST_SPEC_OBJECT_TYPE is the last possible instance type we only
80 // check the lower bound.
81 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
82
83 __ JumpIfObjectType(receiver, scratch0, scratch1, FIRST_SPEC_OBJECT_TYPE,
84 miss, lt);
85
86 // scratch0 now contains the map of the receiver and scratch1 the object type.
87 Register map = scratch0;
88 Register type = scratch1;
89
90 // Check if the receiver is a global JS object.
91 GenerateGlobalInstanceTypeCheck(masm, type, miss);
92
93 // Check that the object does not require access checks.
94 __ Ldrb(scratch1, FieldMemOperand(map, Map::kBitFieldOffset));
95 __ Tbnz(scratch1, Map::kIsAccessCheckNeeded, miss);
96 __ Tbnz(scratch1, Map::kHasNamedInterceptor, miss);
97
98 // Check that the properties dictionary is valid.
99 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
100 __ Ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
101 __ JumpIfNotRoot(scratch1, Heap::kHashTableMapRootIndex, miss);
102 }
103
104
105 // Helper function used from LoadIC GenerateNormal.
106 //
107 // elements: Property dictionary. It is not clobbered if a jump to the miss
108 // label is done.
109 // name: Property name. It is not clobbered if a jump to the miss label is
110 // done
111 // result: Register for the result. It is only updated if a jump to the miss
112 // label is not done.
113 // The scratch registers need to be different from elements, name and result.
114 // The generated code assumes that the receiver has slow properties,
115 // is not a global object and does not have interceptors.
116 static void GenerateDictionaryLoad(MacroAssembler* masm,
117 Label* miss,
118 Register elements,
119 Register name,
120 Register result,
121 Register scratch1,
122 Register scratch2) {
123 ASSERT(!AreAliased(elements, name, scratch1, scratch2));
124 ASSERT(!AreAliased(result, scratch1, scratch2));
125
126 Label done;
127
128 // Probe the dictionary.
129 NameDictionaryLookupStub::GeneratePositiveLookup(masm,
130 miss,
131 &done,
132 elements,
133 name,
134 scratch1,
135 scratch2);
136
137 // If probing finds an entry check that the value is a normal property.
138 __ Bind(&done);
139
140 static const int kElementsStartOffset = NameDictionary::kHeaderSize +
141 NameDictionary::kElementsStartIndex * kPointerSize;
142 static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
143 __ Ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
144 __ Tst(scratch1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
145 __ B(ne, miss);
146
147 // Get the value at the masked, scaled index and return.
148 __ Ldr(result,
149 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
150 }
151
152
153 // Helper function used from StoreIC::GenerateNormal.
154 //
155 // elements: Property dictionary. It is not clobbered if a jump to the miss
156 // label is done.
157 // name: Property name. It is not clobbered if a jump to the miss label is
158 // done
159 // value: The value to store (never clobbered).
160 //
161 // The generated code assumes that the receiver has slow properties,
162 // is not a global object and does not have interceptors.
163 static void GenerateDictionaryStore(MacroAssembler* masm,
164 Label* miss,
165 Register elements,
166 Register name,
167 Register value,
168 Register scratch1,
169 Register scratch2) {
170 ASSERT(!AreAliased(elements, name, value, scratch1, scratch2));
171
172 Label done;
173
174 // Probe the dictionary.
175 NameDictionaryLookupStub::GeneratePositiveLookup(masm,
176 miss,
177 &done,
178 elements,
179 name,
180 scratch1,
181 scratch2);
182
183 // If probing finds an entry in the dictionary check that the value
184 // is a normal property that is not read only.
185 __ Bind(&done);
186
187 static const int kElementsStartOffset = NameDictionary::kHeaderSize +
188 NameDictionary::kElementsStartIndex * kPointerSize;
189 static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
190 static const int kTypeAndReadOnlyMask =
191 PropertyDetails::TypeField::kMask |
192 PropertyDetails::AttributesField::encode(READ_ONLY);
193 __ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
194 __ Tst(scratch1, kTypeAndReadOnlyMask);
195 __ B(ne, miss);
196
197 // Store the value at the masked, scaled index and return.
198 static const int kValueOffset = kElementsStartOffset + kPointerSize;
199 __ Add(scratch2, scratch2, kValueOffset - kHeapObjectTag);
200 __ Str(value, MemOperand(scratch2));
201
202 // Update the write barrier. Make sure not to clobber the value.
203 __ Mov(scratch1, value);
204 __ RecordWrite(
205 elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
206 }
207
208
209 // Checks the receiver for special cases (value type, slow case bits).
210 // Falls through for regular JS object and return the map of the
211 // receiver in 'map_scratch' if the receiver is not a SMI.
212 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
213 Register receiver,
214 Register map_scratch,
215 Register scratch,
216 int interceptor_bit,
217 Label* slow) {
218 ASSERT(!AreAliased(map_scratch, scratch));
219
220 // Check that the object isn't a smi.
221 __ JumpIfSmi(receiver, slow);
222 // Get the map of the receiver.
223 __ Ldr(map_scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
224 // Check bit field.
225 __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kBitFieldOffset));
226 __ Tbnz(scratch, Map::kIsAccessCheckNeeded, slow);
227 __ Tbnz(scratch, interceptor_bit, slow);
228
229 // Check that the object is some kind of JS object EXCEPT JS Value type.
230 // In the case that the object is a value-wrapper object, we enter the
231 // runtime system to make sure that indexing into string objects work
232 // as intended.
233 STATIC_ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
234 __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
235 __ Cmp(scratch, JS_OBJECT_TYPE);
236 __ B(lt, slow);
237 }
238
239
240 // Loads an indexed element from a fast case array.
241 // If not_fast_array is NULL, doesn't perform the elements map check.
242 //
243 // receiver - holds the receiver on entry.
244 // Unchanged unless 'result' is the same register.
245 //
246 // key - holds the smi key on entry.
247 // Unchanged unless 'result' is the same register.
248 //
249 // elements - holds the elements of the receiver on exit.
250 //
251 // elements_map - holds the elements map on exit if the not_fast_array branch is
252 // taken. Otherwise, this is used as a scratch register.
253 //
254 // result - holds the result on exit if the load succeeded.
255 // Allowed to be the the same as 'receiver' or 'key'.
256 // Unchanged on bailout so 'receiver' and 'key' can be safely
257 // used by further computation.
258 static void GenerateFastArrayLoad(MacroAssembler* masm,
259 Register receiver,
260 Register key,
261 Register elements,
262 Register elements_map,
263 Register scratch2,
264 Register result,
265 Label* not_fast_array,
266 Label* slow) {
267 ASSERT(!AreAliased(receiver, key, elements, elements_map, scratch2));
268
269 // Check for fast array.
270 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
271 if (not_fast_array != NULL) {
272 // Check that the object is in fast mode and writable.
273 __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
274 __ JumpIfNotRoot(elements_map, Heap::kFixedArrayMapRootIndex,
275 not_fast_array);
276 } else {
277 __ AssertFastElements(elements);
278 }
279
280 // The elements_map register is only used for the not_fast_array path, which
281 // was handled above. From this point onward it is a scratch register.
282 Register scratch1 = elements_map;
283
284 // Check that the key (index) is within bounds.
285 __ Ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
286 __ Cmp(key, scratch1);
287 __ B(hs, slow);
288
289 // Fast case: Do the load.
290 __ Add(scratch1, elements, FixedArray::kHeaderSize - kHeapObjectTag);
291 __ SmiUntag(scratch2, key);
292 __ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
293
294 // In case the loaded value is the_hole we have to consult GetProperty
295 // to ensure the prototype chain is searched.
296 __ JumpIfRoot(scratch2, Heap::kTheHoleValueRootIndex, slow);
297
298 // Move the value to the result register.
299 // 'result' can alias with 'receiver' or 'key' but these two must be
300 // preserved if we jump to 'slow'.
301 __ Mov(result, scratch2);
302 }
303
304
305 // Checks whether a key is an array index string or a unique name.
306 // Falls through if a key is a unique name.
307 // The map of the key is returned in 'map_scratch'.
308 // If the jump to 'index_string' is done the hash of the key is left
309 // in 'hash_scratch'.
310 static void GenerateKeyNameCheck(MacroAssembler* masm,
311 Register key,
312 Register map_scratch,
313 Register hash_scratch,
314 Label* index_string,
315 Label* not_unique) {
316 ASSERT(!AreAliased(key, map_scratch, hash_scratch));
317
318 // Is the key a name?
319 Label unique;
320 __ JumpIfObjectType(key, map_scratch, hash_scratch, LAST_UNIQUE_NAME_TYPE,
321 not_unique, hi);
322 STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
323 __ B(eq, &unique);
324
325 // Is the string an array index with cached numeric value?
326 __ Ldr(hash_scratch.W(), FieldMemOperand(key, Name::kHashFieldOffset));
327 __ TestAndBranchIfAllClear(hash_scratch,
328 Name::kContainsCachedArrayIndexMask,
329 index_string);
330
331 // Is the string internalized? We know it's a string, so a single bit test is
332 // enough.
333 __ Ldrb(hash_scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
334 STATIC_ASSERT(kInternalizedTag == 0);
335 __ TestAndBranchIfAnySet(hash_scratch, kIsNotInternalizedMask, not_unique);
336
337 __ Bind(&unique);
338 // Fall through if the key is a unique name.
339 }
340
341
342 // Neither 'object' nor 'key' are modified by this function.
343 //
344 // If the 'unmapped_case' or 'slow_case' exit is taken, the 'map' register is
345 // left with the object's elements map. Otherwise, it is used as a scratch
346 // register.
347 static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
348 Register object,
349 Register key,
350 Register map,
351 Register scratch1,
352 Register scratch2,
353 Label* unmapped_case,
354 Label* slow_case) {
355 ASSERT(!AreAliased(object, key, map, scratch1, scratch2));
356
357 Heap* heap = masm->isolate()->heap();
358
359 // Check that the receiver is a JSObject. Because of the elements
360 // map check later, we do not need to check for interceptors or
361 // whether it requires access checks.
362 __ JumpIfSmi(object, slow_case);
363 // Check that the object is some kind of JSObject.
364 __ JumpIfObjectType(object, map, scratch1, FIRST_JS_RECEIVER_TYPE,
365 slow_case, lt);
366
367 // Check that the key is a positive smi.
368 __ JumpIfNotSmi(key, slow_case);
369 __ Tbnz(key, kXSignBit, slow_case);
370
371 // Load the elements object and check its map.
372 Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
373 __ Ldr(map, FieldMemOperand(object, JSObject::kElementsOffset));
374 __ CheckMap(map, scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
375
376 // Check if element is in the range of mapped arguments. If not, jump
377 // to the unmapped lookup.
378 __ Ldr(scratch1, FieldMemOperand(map, FixedArray::kLengthOffset));
379 __ Sub(scratch1, scratch1, Operand(Smi::FromInt(2)));
380 __ Cmp(key, scratch1);
381 __ B(hs, unmapped_case);
382
383 // Load element index and check whether it is the hole.
384 static const int offset =
385 FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
386
387 __ Add(scratch1, map, offset);
388 __ SmiUntag(scratch2, key);
389 __ Ldr(scratch1, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
390 __ JumpIfRoot(scratch1, Heap::kTheHoleValueRootIndex, unmapped_case);
391
392 // Load value from context and return it.
393 __ Ldr(scratch2, FieldMemOperand(map, FixedArray::kHeaderSize));
394 __ SmiUntag(scratch1);
395 __ Add(scratch2, scratch2, Context::kHeaderSize - kHeapObjectTag);
396 return MemOperand(scratch2, scratch1, LSL, kPointerSizeLog2);
397 }
398
399
400 // The 'parameter_map' register must be loaded with the parameter map of the
401 // arguments object and is overwritten.
402 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
403 Register key,
404 Register parameter_map,
405 Register scratch,
406 Label* slow_case) {
407 ASSERT(!AreAliased(key, parameter_map, scratch));
408
409 // Element is in arguments backing store, which is referenced by the
410 // second element of the parameter_map.
411 const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
412 Register backing_store = parameter_map;
413 __ Ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
414 Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
415 __ CheckMap(
416 backing_store, scratch, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
417 __ Ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
418 __ Cmp(key, scratch);
419 __ B(hs, slow_case);
420
421 __ Add(backing_store,
422 backing_store,
423 FixedArray::kHeaderSize - kHeapObjectTag);
424 __ SmiUntag(scratch, key);
425 return MemOperand(backing_store, scratch, LSL, kPointerSizeLog2);
426 }
427
428
429 void LoadIC::GenerateMegamorphic(MacroAssembler* masm,
430 ExtraICState extra_state) {
431 // ----------- S t a t e -------------
432 // -- x2 : name
433 // -- lr : return address
434 // -- x0 : receiver
435 // -----------------------------------
436
437 // Probe the stub cache.
438 Code::Flags flags = Code::ComputeFlags(
439 Code::HANDLER, MONOMORPHIC, extra_state,
440 Code::NORMAL, Code::LOAD_IC);
441 masm->isolate()->stub_cache()->GenerateProbe(
442 masm, flags, x0, x2, x3, x4, x5, x6);
443
444 // Cache miss: Jump to runtime.
445 GenerateMiss(masm);
446 }
447
448
449 void LoadIC::GenerateNormal(MacroAssembler* masm) {
450 // ----------- S t a t e -------------
451 // -- x2 : name
452 // -- lr : return address
453 // -- x0 : receiver
454 // -----------------------------------
455 Label miss;
456
457 GenerateNameDictionaryReceiverCheck(masm, x0, x1, x3, x4, &miss);
458
459 // x1 now holds the property dictionary.
460 GenerateDictionaryLoad(masm, &miss, x1, x2, x0, x3, x4);
461 __ Ret();
462
463 // Cache miss: Jump to runtime.
464 __ Bind(&miss);
465 GenerateMiss(masm);
466 }
467
468
469 void LoadIC::GenerateMiss(MacroAssembler* masm) {
470 // ----------- S t a t e -------------
471 // -- x2 : name
472 // -- lr : return address
473 // -- x0 : receiver
474 // -----------------------------------
475 Isolate* isolate = masm->isolate();
476 ASM_LOCATION("LoadIC::GenerateMiss");
477
478 __ IncrementCounter(isolate->counters()->load_miss(), 1, x3, x4);
479
480 // TODO(jbramley): Does the target actually expect an argument in x3, or is
481 // this inherited from ARM's push semantics?
482 __ Mov(x3, x0);
483 __ Push(x3, x2);
484
485 // Perform tail call to the entry.
486 ExternalReference ref =
487 ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
488 __ TailCallExternalReference(ref, 2, 1);
489 }
490
491
492 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
493 // ---------- S t a t e --------------
494 // -- x2 : name
495 // -- lr : return address
496 // -- x0 : receiver
497 // -----------------------------------
498
499 // TODO(jbramley): Does the target actually expect an argument in x3, or is
500 // this inherited from ARM's push semantics?
501 __ Mov(x3, x0);
502 __ Push(x3, x2);
503
504 __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
505 }
506
507
508 void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
509 // ---------- S t a t e --------------
510 // -- lr : return address
511 // -- x0 : key
512 // -- x1 : receiver
513 // -----------------------------------
514 Register result = x0;
515 Register key = x0;
516 Register receiver = x1;
517 Label miss, unmapped;
518
519 Register map_scratch = x2;
520 MemOperand mapped_location = GenerateMappedArgumentsLookup(
521 masm, receiver, key, map_scratch, x3, x4, &unmapped, &miss);
522 __ Ldr(result, mapped_location);
523 __ Ret();
524
525 __ Bind(&unmapped);
526 // Parameter map is left in map_scratch when a jump on unmapped is done.
527 MemOperand unmapped_location =
528 GenerateUnmappedArgumentsLookup(masm, key, map_scratch, x3, &miss);
529 __ Ldr(x2, unmapped_location);
530 __ JumpIfRoot(x2, Heap::kTheHoleValueRootIndex, &miss);
531 // Move the result in x0. x0 must be preserved on miss.
532 __ Mov(result, x2);
533 __ Ret();
534
535 __ Bind(&miss);
536 GenerateMiss(masm);
537 }
538
539
540 void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
541 ASM_LOCATION("KeyedStoreIC::GenerateNonStrictArguments");
542 // ---------- S t a t e --------------
543 // -- lr : return address
544 // -- x0 : value
545 // -- x1 : key
546 // -- x2 : receiver
547 // -----------------------------------
548
549 Label slow, notin;
550
551 Register value = x0;
552 Register key = x1;
553 Register receiver = x2;
554 Register map = x3;
555
556 // These registers are used by GenerateMappedArgumentsLookup to build a
557 // MemOperand. They are live for as long as the MemOperand is live.
558 Register mapped1 = x4;
559 Register mapped2 = x5;
560
561 MemOperand mapped =
562 GenerateMappedArgumentsLookup(masm, receiver, key, map,
563 mapped1, mapped2,
564 &notin, &slow);
565 Operand mapped_offset = mapped.OffsetAsOperand();
566 __ Str(value, mapped);
567 __ Add(x10, mapped.base(), mapped_offset);
568 __ Mov(x11, value);
569 __ RecordWrite(mapped.base(), x10, x11, kLRHasNotBeenSaved, kDontSaveFPRegs);
570 __ Ret();
571
572 __ Bind(&notin);
573
574 // These registers are used by GenerateMappedArgumentsLookup to build a
575 // MemOperand. They are live for as long as the MemOperand is live.
576 Register unmapped1 = map; // This is assumed to alias 'map'.
577 Register unmapped2 = x4;
578 MemOperand unmapped =
579 GenerateUnmappedArgumentsLookup(masm, key, unmapped1, unmapped2, &slow);
580 Operand unmapped_offset = unmapped.OffsetAsOperand();
581 __ Str(value, unmapped);
582 __ Add(x10, unmapped.base(), unmapped_offset);
583 __ Mov(x11, value);
584 __ RecordWrite(unmapped.base(), x10, x11,
585 kLRHasNotBeenSaved, kDontSaveFPRegs);
586 __ Ret();
587 __ Bind(&slow);
588 GenerateMiss(masm);
589 }
590
591
592 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
593 // ---------- S t a t e --------------
594 // -- lr : return address
595 // -- x0 : key
596 // -- x1 : receiver
597 // -----------------------------------
598 Isolate* isolate = masm->isolate();
599
600 __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11);
601
602 __ Push(x1, x0);
603
604 // Perform tail call to the entry.
605 ExternalReference ref =
606 ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
607
608 __ TailCallExternalReference(ref, 2, 1);
609 }
610
611
612 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
613 // ---------- S t a t e --------------
614 // -- lr : return address
615 // -- x0 : key
616 // -- x1 : receiver
617 // -----------------------------------
618 Register key = x0;
619 Register receiver = x1;
620
621 __ Push(receiver, key);
622 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
623 }
624
625
626 static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm,
627 Register key,
628 Register receiver,
629 Register scratch1,
630 Register scratch2,
631 Register scratch3,
632 Register scratch4,
633 Register scratch5,
634 Label *slow) {
635 ASSERT(!AreAliased(
636 key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
637
638 Isolate* isolate = masm->isolate();
639 Label check_number_dictionary;
640 // If we can load the value, it should be returned in x0.
641 Register result = x0;
642
643 GenerateKeyedLoadReceiverCheck(
644 masm, receiver, scratch1, scratch2, Map::kHasIndexedInterceptor, slow);
645
646 // Check the receiver's map to see if it has fast elements.
647 __ CheckFastElements(scratch1, scratch2, &check_number_dictionary);
648
649 GenerateFastArrayLoad(
650 masm, receiver, key, scratch3, scratch2, scratch1, result, NULL, slow);
651 __ IncrementCounter(
652 isolate->counters()->keyed_load_generic_smi(), 1, scratch1, scratch2);
653 __ Ret();
654
655 __ Bind(&check_number_dictionary);
656 __ Ldr(scratch3, FieldMemOperand(receiver, JSObject::kElementsOffset));
657 __ Ldr(scratch2, FieldMemOperand(scratch3, JSObject::kMapOffset));
658
659 // Check whether we have a number dictionary.
660 __ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow);
661
662 __ LoadFromNumberDictionary(
663 slow, scratch3, key, result, scratch1, scratch2, scratch4, scratch5);
664 __ Ret();
665 }
666
667 static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm,
668 Register key,
669 Register receiver,
670 Register scratch1,
671 Register scratch2,
672 Register scratch3,
673 Register scratch4,
674 Register scratch5,
675 Label *slow) {
676 ASSERT(!AreAliased(
677 key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
678
679 Isolate* isolate = masm->isolate();
680 Label probe_dictionary, property_array_property;
681 // If we can load the value, it should be returned in x0.
682 Register result = x0;
683
684 GenerateKeyedLoadReceiverCheck(
685 masm, receiver, scratch1, scratch2, Map::kHasNamedInterceptor, slow);
686
687 // If the receiver is a fast-case object, check the keyed lookup cache.
688 // Otherwise probe the dictionary.
689 __ Ldr(scratch2, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
690 __ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
691 __ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary);
692
693 // We keep the map of the receiver in scratch1.
694 Register receiver_map = scratch1;
695
696 // Load the map of the receiver, compute the keyed lookup cache hash
697 // based on 32 bits of the map pointer and the name hash.
698 __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
699 __ Mov(scratch2, Operand(receiver_map, ASR, KeyedLookupCache::kMapHashShift));
700 __ Ldr(scratch3.W(), FieldMemOperand(key, Name::kHashFieldOffset));
701 __ Eor(scratch2, scratch2, Operand(scratch3, ASR, Name::kHashShift));
702 int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
703 __ And(scratch2, scratch2, mask);
704
705 // Load the key (consisting of map and unique name) from the cache and
706 // check for match.
707 Label load_in_object_property;
708 static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
709 Label hit_on_nth_entry[kEntriesPerBucket];
710 ExternalReference cache_keys =
711 ExternalReference::keyed_lookup_cache_keys(isolate);
712
713 __ Mov(scratch3, Operand(cache_keys));
714 __ Add(scratch3, scratch3, Operand(scratch2, LSL, kPointerSizeLog2 + 1));
715
716 for (int i = 0; i < kEntriesPerBucket - 1; i++) {
717 Label try_next_entry;
718 // Load map and make scratch3 pointing to the next entry.
719 __ Ldr(scratch4, MemOperand(scratch3, kPointerSize * 2, PostIndex));
720 __ Cmp(receiver_map, scratch4);
721 __ B(ne, &try_next_entry);
722 __ Ldr(scratch4, MemOperand(scratch3, -kPointerSize)); // Load name
723 __ Cmp(key, scratch4);
724 __ B(eq, &hit_on_nth_entry[i]);
725 __ Bind(&try_next_entry);
726 }
727
728 // Last entry.
729 __ Ldr(scratch4, MemOperand(scratch3, kPointerSize, PostIndex));
730 __ Cmp(receiver_map, scratch4);
731 __ B(ne, slow);
732 __ Ldr(scratch4, MemOperand(scratch3));
733 __ Cmp(key, scratch4);
734 __ B(ne, slow);
735
736 // Get field offset.
737 ExternalReference cache_field_offsets =
738 ExternalReference::keyed_lookup_cache_field_offsets(isolate);
739
740 // Hit on nth entry.
741 for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
742 __ Bind(&hit_on_nth_entry[i]);
743 __ Mov(scratch3, Operand(cache_field_offsets));
744 if (i != 0) {
745 __ Add(scratch2, scratch2, i);
746 }
747 __ Ldr(scratch4.W(), MemOperand(scratch3, scratch2, LSL, 2));
748 __ Ldrb(scratch5,
749 FieldMemOperand(receiver_map, Map::kInObjectPropertiesOffset));
750 __ Subs(scratch4, scratch4, scratch5);
751 __ B(ge, &property_array_property);
752 if (i != 0) {
753 __ B(&load_in_object_property);
754 }
755 }
756
757 // Load in-object property.
758 __ Bind(&load_in_object_property);
759 __ Ldrb(scratch5, FieldMemOperand(receiver_map, Map::kInstanceSizeOffset));
760 __ Add(scratch5, scratch5, scratch4); // Index from start of object.
761 __ Sub(receiver, receiver, kHeapObjectTag); // Remove the heap tag.
762 __ Ldr(result, MemOperand(receiver, scratch5, LSL, kPointerSizeLog2));
763 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
764 1, scratch1, scratch2);
765 __ Ret();
766
767 // Load property array property.
768 __ Bind(&property_array_property);
769 __ Ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
770 __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
771 __ Ldr(result, MemOperand(scratch1, scratch4, LSL, kPointerSizeLog2));
772 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
773 1, scratch1, scratch2);
774 __ Ret();
775
776 // Do a quick inline probe of the receiver's dictionary, if it exists.
777 __ Bind(&probe_dictionary);
778 __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
779 __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
780 GenerateGlobalInstanceTypeCheck(masm, scratch1, slow);
781 // Load the property.
782 GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3);
783 __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
784 1, scratch1, scratch2);
785 __ Ret();
786 }
787
788
789 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
790 // ---------- S t a t e --------------
791 // -- lr : return address
792 // -- x0 : key
793 // -- x1 : receiver
794 // -----------------------------------
795 Label slow, check_name, index_smi, index_name;
796
797 Register key = x0;
798 Register receiver = x1;
799
800 __ JumpIfNotSmi(key, &check_name);
801 __ Bind(&index_smi);
802 // Now the key is known to be a smi. This place is also jumped to from below
803 // where a numeric string is converted to a smi.
804 GenerateKeyedLoadWithSmiKey(masm, key, receiver, x2, x3, x4, x5, x6, &slow);
805
806 // Slow case, key and receiver still in x0 and x1.
807 __ Bind(&slow);
808 __ IncrementCounter(
809 masm->isolate()->counters()->keyed_load_generic_slow(), 1, x2, x3);
810 GenerateRuntimeGetProperty(masm);
811
812 __ Bind(&check_name);
813 GenerateKeyNameCheck(masm, key, x2, x3, &index_name, &slow);
814
815 GenerateKeyedLoadWithNameKey(masm, key, receiver, x2, x3, x4, x5, x6, &slow);
816
817 __ Bind(&index_name);
818 __ IndexFromHash(x3, key);
819 // Now jump to the place where smi keys are handled.
820 __ B(&index_smi);
821 }
822
823
824 void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
825 // ---------- S t a t e --------------
826 // -- lr : return address
827 // -- x0 : key (index)
828 // -- x1 : receiver
829 // -----------------------------------
830 Label miss;
831
832 Register index = x0;
833 Register receiver = x1;
834 Register result = x0;
835 Register scratch = x3;
836
837 StringCharAtGenerator char_at_generator(receiver,
838 index,
839 scratch,
840 result,
841 &miss, // When not a string.
842 &miss, // When not a number.
843 &miss, // When index out of range.
844 STRING_INDEX_IS_ARRAY_INDEX);
845 char_at_generator.GenerateFast(masm);
846 __ Ret();
847
848 StubRuntimeCallHelper call_helper;
849 char_at_generator.GenerateSlow(masm, call_helper);
850
851 __ Bind(&miss);
852 GenerateMiss(masm);
853 }
854
855
856 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
857 // ---------- S t a t e --------------
858 // -- lr : return address
859 // -- x0 : key
860 // -- x1 : receiver
861 // -----------------------------------
862 Label slow;
863 Register key = x0;
864 Register receiver = x1;
865
866 // Check that the receiver isn't a smi.
867 __ JumpIfSmi(receiver, &slow);
868
869 // Check that the key is an array index, that is Uint32.
870 __ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow);
871
872 // Get the map of the receiver.
873 Register map = x2;
874 __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
875
876 // Check that it has indexed interceptor and access checks
877 // are not enabled for this object.
878 __ Ldrb(x3, FieldMemOperand(map, Map::kBitFieldOffset));
879 ASSERT(kSlowCaseBitFieldMask ==
880 ((1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor)));
881 __ Tbnz(x3, Map::kIsAccessCheckNeeded, &slow);
882 __ Tbz(x3, Map::kHasIndexedInterceptor, &slow);
883
884 // Everything is fine, call runtime.
885 __ Push(receiver, key);
886 __ TailCallExternalReference(
887 ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
888 masm->isolate()),
889 2,
890 1);
891
892 __ Bind(&slow);
893 GenerateMiss(masm);
894 }
895
896
897 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
898 ASM_LOCATION("KeyedStoreIC::GenerateMiss");
899 // ---------- S t a t e --------------
900 // -- x0 : value
901 // -- x1 : key
902 // -- x2 : receiver
903 // -- lr : return address
904 // -----------------------------------
905
906 // Push receiver, key and value for runtime call.
907 __ Push(x2, x1, x0);
908
909 ExternalReference ref =
910 ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
911 __ TailCallExternalReference(ref, 3, 1);
912 }
913
914
915 void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
916 ASM_LOCATION("KeyedStoreIC::GenerateSlow");
917 // ---------- S t a t e --------------
918 // -- lr : return address
919 // -- x0 : value
920 // -- x1 : key
921 // -- x2 : receiver
922 // -----------------------------------
923
924 // Push receiver, key and value for runtime call.
925 __ Push(x2, x1, x0);
926
927 // The slow case calls into the runtime to complete the store without causing
928 // an IC miss that would otherwise cause a transition to the generic stub.
929 ExternalReference ref =
930 ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
931 __ TailCallExternalReference(ref, 3, 1);
932 }
933
934
935 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
936 StrictModeFlag strict_mode) {
937 ASM_LOCATION("KeyedStoreIC::GenerateRuntimeSetProperty");
938 // ---------- S t a t e --------------
939 // -- x0 : value
940 // -- x1 : key
941 // -- x2 : receiver
942 // -- lr : return address
943 // -----------------------------------
944
945 // Push receiver, key and value for runtime call.
946 __ Push(x2, x1, x0);
947
948 // Push PropertyAttributes(NONE) and strict_mode for runtime call.
949 STATIC_ASSERT(NONE == 0);
950 __ Mov(x10, Operand(Smi::FromInt(strict_mode)));
951 __ Push(xzr, x10);
952
953 __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
954 }
955
956
957 static void KeyedStoreGenerateGenericHelper(
958 MacroAssembler* masm,
959 Label* fast_object,
960 Label* fast_double,
961 Label* slow,
962 KeyedStoreCheckMap check_map,
963 KeyedStoreIncrementLength increment_length,
964 Register value,
965 Register key,
966 Register receiver,
967 Register receiver_map,
968 Register elements_map,
969 Register elements) {
970 ASSERT(!AreAliased(
971 value, key, receiver, receiver_map, elements_map, elements, x10, x11));
972
973 Label transition_smi_elements;
974 Label transition_double_elements;
975 Label fast_double_without_map_check;
976 Label non_double_value;
977 Label finish_store;
978
979 __ Bind(fast_object);
980 if (check_map == kCheckMap) {
981 __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
982 __ Cmp(elements_map,
983 Operand(masm->isolate()->factory()->fixed_array_map()));
984 __ B(ne, fast_double);
985 }
986
987 // HOLECHECK: guards "A[i] = V"
988 // We have to go to the runtime if the current value is the hole because there
989 // may be a callback on the element.
990 Label holecheck_passed;
991 // TODO(all): This address calculation is repeated later (for the store
992 // itself). We should keep the result to avoid doing the work twice.
993 __ Add(x10, elements, FixedArray::kHeaderSize - kHeapObjectTag);
994 __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
995 __ Ldr(x11, MemOperand(x10));
996 __ JumpIfNotRoot(x11, Heap::kTheHoleValueRootIndex, &holecheck_passed);
997 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
998 __ bind(&holecheck_passed);
999
1000 // Smi stores don't require further checks.
1001 __ JumpIfSmi(value, &finish_store);
1002
1003 // Escape to elements kind transition case.
1004 __ CheckFastObjectElements(receiver_map, x10, &transition_smi_elements);
1005
1006 __ Bind(&finish_store);
1007 if (increment_length == kIncrementLength) {
1008 // Add 1 to receiver->length.
1009 __ Add(x10, key, Operand(Smi::FromInt(1)));
1010 __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
1011 }
1012
1013 Register address = x11;
1014 __ Add(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
1015 __ Add(address, address, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
1016 __ Str(value, MemOperand(address));
1017
1018 Label dont_record_write;
1019 __ JumpIfSmi(value, &dont_record_write);
1020
1021 // Update write barrier for the elements array address.
1022 __ Mov(x10, value); // Preserve the value which is returned.
1023 __ RecordWrite(elements,
1024 address,
1025 x10,
1026 kLRHasNotBeenSaved,
1027 kDontSaveFPRegs,
1028 EMIT_REMEMBERED_SET,
1029 OMIT_SMI_CHECK);
1030
1031 __ Bind(&dont_record_write);
1032 __ Ret();
1033
1034
1035 __ Bind(fast_double);
1036 if (check_map == kCheckMap) {
1037 // Check for fast double array case. If this fails, call through to the
1038 // runtime.
1039 __ JumpIfNotRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex, slow);
1040 }
1041
1042 // HOLECHECK: guards "A[i] double hole?"
1043 // We have to see if the double version of the hole is present. If so go to
1044 // the runtime.
1045 // TODO(all): This address calculation was done earlier. We should keep the
1046 // result to avoid doing the work twice.
1047 __ Add(x10, elements, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
1048 __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
1049 __ Ldr(x11, MemOperand(x10));
1050 __ CompareAndBranch(x11, kHoleNanInt64, ne, &fast_double_without_map_check);
1051 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
1052
1053 __ Bind(&fast_double_without_map_check);
1054 __ StoreNumberToDoubleElements(value,
1055 key,
1056 elements,
1057 x10,
1058 d0,
1059 d1,
1060 &transition_double_elements);
1061 if (increment_length == kIncrementLength) {
1062 // Add 1 to receiver->length.
1063 __ Add(x10, key, Operand(Smi::FromInt(1)));
1064 __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
1065 }
1066 __ Ret();
1067
1068
1069 __ Bind(&transition_smi_elements);
1070 // Transition the array appropriately depending on the value type.
1071 __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
1072 __ JumpIfNotRoot(x10, Heap::kHeapNumberMapRootIndex, &non_double_value);
1073
1074 // Value is a double. Transition FAST_SMI_ELEMENTS ->
1075 // FAST_DOUBLE_ELEMENTS and complete the store.
1076 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
1077 FAST_DOUBLE_ELEMENTS,
1078 receiver_map,
1079 x10,
1080 slow);
1081 ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3.
1082 AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
1083 FAST_DOUBLE_ELEMENTS);
1084 ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
1085 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1086 __ B(&fast_double_without_map_check);
1087
1088 __ Bind(&non_double_value);
1089 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS.
1090 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
1091 FAST_ELEMENTS,
1092 receiver_map,
1093 x10,
1094 slow);
1095 ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3.
1096 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
1097 ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
1098 slow);
1099 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1100 __ B(&finish_store);
1101
1102 __ Bind(&transition_double_elements);
1103 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
1104 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
1105 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
1106 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
1107 FAST_ELEMENTS,
1108 receiver_map,
1109 x10,
1110 slow);
1111 ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3.
1112 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
1113 ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
1114 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1115 __ B(&finish_store);
1116 }
1117
1118
1119 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
1120 StrictModeFlag strict_mode) {
1121 ASM_LOCATION("KeyedStoreIC::GenerateGeneric");
1122 // ---------- S t a t e --------------
1123 // -- x0 : value
1124 // -- x1 : key
1125 // -- x2 : receiver
1126 // -- lr : return address
1127 // -----------------------------------
1128 Label slow;
1129 Label array;
1130 Label fast_object;
1131 Label extra;
1132 Label fast_object_grow;
1133 Label fast_double_grow;
1134 Label fast_double;
1135
1136 Register value = x0;
1137 Register key = x1;
1138 Register receiver = x2;
1139 Register receiver_map = x3;
1140 Register elements = x4;
1141 Register elements_map = x5;
1142
1143 __ JumpIfNotSmi(key, &slow);
1144 __ JumpIfSmi(receiver, &slow);
1145 __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
1146
1147 // Check that the receiver does not require access checks and is not observed.
1148 // The generic stub does not perform map checks or handle observed objects.
1149 __ Ldrb(x10, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
1150 __ TestAndBranchIfAnySet(
1151 x10, (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kIsObserved), &slow);
1152
1153 // Check if the object is a JS array or not.
1154 Register instance_type = x10;
1155 __ CompareInstanceType(receiver_map, instance_type, JS_ARRAY_TYPE);
1156 __ B(eq, &array);
1157 // Check that the object is some kind of JSObject.
1158 __ Cmp(instance_type, FIRST_JS_OBJECT_TYPE);
1159 __ B(lt, &slow);
1160
1161 // Object case: Check key against length in the elements array.
1162 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1163 // Check array bounds. Both the key and the length of FixedArray are smis.
1164 __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
1165 __ Cmp(x10, Operand::UntagSmi(key));
1166 __ B(hi, &fast_object);
1167
1168
1169 __ Bind(&slow);
1170 // Slow case, handle jump to runtime.
1171 // Live values:
1172 // x0: value
1173 // x1: key
1174 // x2: receiver
1175 GenerateRuntimeSetProperty(masm, strict_mode);
1176
1177
1178 __ Bind(&extra);
1179 // Extra capacity case: Check if there is extra capacity to
1180 // perform the store and update the length. Used for adding one
1181 // element to the array by writing to array[array.length].
1182
1183 // Check for room in the elements backing store.
1184 // Both the key and the length of FixedArray are smis.
1185 __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
1186 __ Cmp(x10, Operand::UntagSmi(key));
1187 __ B(ls, &slow);
1188
1189 __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
1190 __ Cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
1191 __ B(eq, &fast_object_grow);
1192 __ Cmp(elements_map,
1193 Operand(masm->isolate()->factory()->fixed_double_array_map()));
1194 __ B(eq, &fast_double_grow);
1195 __ B(&slow);
1196
1197
1198 __ Bind(&array);
1199 // Array case: Get the length and the elements array from the JS
1200 // array. Check that the array is in fast mode (and writable); if it
1201 // is the length is always a smi.
1202
1203 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1204
1205 // Check the key against the length in the array.
1206 __ Ldrsw(x10, UntagSmiFieldMemOperand(receiver, JSArray::kLengthOffset));
1207 __ Cmp(x10, Operand::UntagSmi(key));
1208 __ B(eq, &extra); // We can handle the case where we are appending 1 element.
1209 __ B(lo, &slow);
1210
1211 KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
1212 &slow, kCheckMap, kDontIncrementLength,
1213 value, key, receiver, receiver_map,
1214 elements_map, elements);
1215 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
1216 &slow, kDontCheckMap, kIncrementLength,
1217 value, key, receiver, receiver_map,
1218 elements_map, elements);
1219 }
1220
1221
1222 void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
1223 ExtraICState extra_ic_state) {
1224 // ----------- S t a t e -------------
1225 // -- x0 : value
1226 // -- x1 : receiver
1227 // -- x2 : name
1228 // -- lr : return address
1229 // -----------------------------------
1230
1231 // Probe the stub cache.
1232 Code::Flags flags = Code::ComputeFlags(
1233 Code::HANDLER, MONOMORPHIC, extra_ic_state,
1234 Code::NORMAL, Code::STORE_IC);
1235
1236 masm->isolate()->stub_cache()->GenerateProbe(
1237 masm, flags, x1, x2, x3, x4, x5, x6);
1238
1239 // Cache miss: Jump to runtime.
1240 GenerateMiss(masm);
1241 }
1242
1243
1244 void StoreIC::GenerateMiss(MacroAssembler* masm) {
1245 // ----------- S t a t e -------------
1246 // -- x0 : value
1247 // -- x1 : receiver
1248 // -- x2 : name
1249 // -- lr : return address
1250 // -----------------------------------
1251
1252 __ Push(x1, x2, x0);
1253
1254 // Tail call to the entry.
1255 ExternalReference ref =
1256 ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
1257 __ TailCallExternalReference(ref, 3, 1);
1258 }
1259
1260
1261 void StoreIC::GenerateNormal(MacroAssembler* masm) {
1262 // ----------- S t a t e -------------
1263 // -- x0 : value
1264 // -- x1 : receiver
1265 // -- x2 : name
1266 // -- lr : return address
1267 // -----------------------------------
1268 Label miss;
1269 Register value = x0;
1270 Register receiver = x1;
1271 Register name = x2;
1272 Register dictionary = x3;
1273
1274 GenerateNameDictionaryReceiverCheck(
1275 masm, receiver, dictionary, x4, x5, &miss);
1276
1277 GenerateDictionaryStore(masm, &miss, dictionary, name, value, x4, x5);
1278 Counters* counters = masm->isolate()->counters();
1279 __ IncrementCounter(counters->store_normal_hit(), 1, x4, x5);
1280 __ Ret();
1281
1282 // Cache miss: Jump to runtime.
1283 __ Bind(&miss);
1284 __ IncrementCounter(counters->store_normal_miss(), 1, x4, x5);
1285 GenerateMiss(masm);
1286 }
1287
1288
1289 void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
1290 StrictModeFlag strict_mode) {
1291 ASM_LOCATION("StoreIC::GenerateRuntimeSetProperty");
1292 // ----------- S t a t e -------------
1293 // -- x0 : value
1294 // -- x1 : receiver
1295 // -- x2 : name
1296 // -- lr : return address
1297 // -----------------------------------
1298
1299 __ Push(x1, x2, x0);
1300
1301 __ Mov(x11, Operand(Smi::FromInt(NONE))); // PropertyAttributes
1302 __ Mov(x10, Operand(Smi::FromInt(strict_mode)));
1303 __ Push(x11, x10);
1304
1305 // Do tail-call to runtime routine.
1306 __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
1307 }
1308
1309
1310 void StoreIC::GenerateSlow(MacroAssembler* masm) {
1311 // ---------- S t a t e --------------
1312 // -- x0 : value
1313 // -- x1 : receiver
1314 // -- x2 : name
1315 // -- lr : return address
1316 // -----------------------------------
1317
1318 // Push receiver, name and value for runtime call.
1319 __ Push(x1, x2, x0);
1320
1321 // The slow case calls into the runtime to complete the store without causing
1322 // an IC miss that would otherwise cause a transition to the generic stub.
1323 ExternalReference ref =
1324 ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
1325 __ TailCallExternalReference(ref, 3, 1);
1326 }
1327
1328
1329 Condition CompareIC::ComputeCondition(Token::Value op) {
1330 switch (op) {
1331 case Token::EQ_STRICT:
1332 case Token::EQ:
1333 return eq;
1334 case Token::LT:
1335 return lt;
1336 case Token::GT:
1337 return gt;
1338 case Token::LTE:
1339 return le;
1340 case Token::GTE:
1341 return ge;
1342 default:
1343 UNREACHABLE();
1344 return al;
1345 }
1346 }
1347
1348
1349 bool CompareIC::HasInlinedSmiCode(Address address) {
1350 // The address of the instruction following the call.
1351 Address info_address =
1352 Assembler::return_address_from_call_start(address);
1353
1354 InstructionSequence* patch_info = InstructionSequence::At(info_address);
1355 return patch_info->IsInlineData();
1356 }
1357
1358
1359 // Activate a SMI fast-path by patching the instructions generated by
1360 // JumpPatchSite::EmitJumpIf(Not)Smi(), using the information encoded by
1361 // JumpPatchSite::EmitPatchInfo().
1362 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
1363 // The patch information is encoded in the instruction stream using
1364 // instructions which have no side effects, so we can safely execute them.
1365 // The patch information is encoded directly after the call to the helper
1366 // function which is requesting this patch operation.
1367 Address info_address =
1368 Assembler::return_address_from_call_start(address);
1369 InlineSmiCheckInfo info(info_address);
1370
1371 // Check and decode the patch information instruction.
1372 if (!info.HasSmiCheck()) {
1373 return;
1374 }
1375
1376 if (FLAG_trace_ic) {
1377 PrintF("[ Patching ic at %p, marker=%p, SMI check=%p\n",
1378 address, info_address, reinterpret_cast<void*>(info.SmiCheck()));
1379 }
1380
1381 // Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi()
1382 // and JumpPatchSite::EmitJumpIfSmi().
1383 // Changing
1384 // tb(n)z xzr, #0, <target>
1385 // to
1386 // tb(!n)z test_reg, #0, <target>
1387 Instruction* to_patch = info.SmiCheck();
1388 PatchingAssembler patcher(to_patch, 1);
1389 ASSERT(to_patch->IsTestBranch());
1390 ASSERT(to_patch->ImmTestBranchBit5() == 0);
1391 ASSERT(to_patch->ImmTestBranchBit40() == 0);
1392
1393 STATIC_ASSERT(kSmiTag == 0);
1394 STATIC_ASSERT(kSmiTagMask == 1);
1395
1396 int branch_imm = to_patch->ImmTestBranch();
1397 Register smi_reg;
1398 if (check == ENABLE_INLINED_SMI_CHECK) {
1399 ASSERT(to_patch->Rt() == xzr.code());
1400 smi_reg = info.SmiRegister();
1401 } else {
1402 ASSERT(check == DISABLE_INLINED_SMI_CHECK);
1403 ASSERT(to_patch->Rt() != xzr.code());
1404 smi_reg = xzr;
1405 }
1406
1407 if (to_patch->Mask(TestBranchMask) == TBZ) {
1408 // This is JumpIfNotSmi(smi_reg, branch_imm).
1409 patcher.tbnz(smi_reg, 0, branch_imm);
1410 } else {
1411 ASSERT(to_patch->Mask(TestBranchMask) == TBNZ);
1412 // This is JumpIfSmi(smi_reg, branch_imm).
1413 patcher.tbz(smi_reg, 0, branch_imm);
1414 }
1415 }
1416
1417
1418 } } // namespace v8::internal
1419
1420 #endif // V8_TARGET_ARCH_A64
OLDNEW
« no previous file with comments | « src/a64/full-codegen-a64.cc ('k') | src/a64/instructions-a64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698