Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(314)

Side by Side Diff: src/arm/ic-arm.cc

Issue 483683005: Move IC code into a subdir and move ic-compilation related code from stub-cache into ic-compiler (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Fix BUILD.gn Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/full-codegen-arm.cc ('k') | src/arm/lithium-codegen-arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/v8.h"
6
7 #if V8_TARGET_ARCH_ARM
8
9 #include "src/arm/assembler-arm.h"
10 #include "src/code-stubs.h"
11 #include "src/codegen.h"
12 #include "src/disasm.h"
13 #include "src/ic-inl.h"
14 #include "src/runtime.h"
15 #include "src/stub-cache.h"
16
17 namespace v8 {
18 namespace internal {
19
20
21 // ----------------------------------------------------------------------------
22 // Static IC stub generators.
23 //
24
25 #define __ ACCESS_MASM(masm)
26
27
28 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
29 Register type,
30 Label* global_object) {
31 // Register usage:
32 // type: holds the receiver instance type on entry.
33 __ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE));
34 __ b(eq, global_object);
35 __ cmp(type, Operand(JS_BUILTINS_OBJECT_TYPE));
36 __ b(eq, global_object);
37 __ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE));
38 __ b(eq, global_object);
39 }
40
41
42 // Helper function used from LoadIC GenerateNormal.
43 //
44 // elements: Property dictionary. It is not clobbered if a jump to the miss
45 // label is done.
46 // name: Property name. It is not clobbered if a jump to the miss label is
47 // done
48 // result: Register for the result. It is only updated if a jump to the miss
49 // label is not done. Can be the same as elements or name clobbering
50 // one of these in the case of not jumping to the miss label.
51 // The two scratch registers need to be different from elements, name and
52 // result.
53 // The generated code assumes that the receiver has slow properties,
54 // is not a global object and does not have interceptors.
55 static void GenerateDictionaryLoad(MacroAssembler* masm,
56 Label* miss,
57 Register elements,
58 Register name,
59 Register result,
60 Register scratch1,
61 Register scratch2) {
62 // Main use of the scratch registers.
63 // scratch1: Used as temporary and to hold the capacity of the property
64 // dictionary.
65 // scratch2: Used as temporary.
66 Label done;
67
68 // Probe the dictionary.
69 NameDictionaryLookupStub::GeneratePositiveLookup(masm,
70 miss,
71 &done,
72 elements,
73 name,
74 scratch1,
75 scratch2);
76
77 // If probing finds an entry check that the value is a normal
78 // property.
79 __ bind(&done); // scratch2 == elements + 4 * index
80 const int kElementsStartOffset = NameDictionary::kHeaderSize +
81 NameDictionary::kElementsStartIndex * kPointerSize;
82 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
83 __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
84 __ tst(scratch1, Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
85 __ b(ne, miss);
86
87 // Get the value at the masked, scaled index and return.
88 __ ldr(result,
89 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
90 }
91
92
93 // Helper function used from StoreIC::GenerateNormal.
94 //
95 // elements: Property dictionary. It is not clobbered if a jump to the miss
96 // label is done.
97 // name: Property name. It is not clobbered if a jump to the miss label is
98 // done
99 // value: The value to store.
100 // The two scratch registers need to be different from elements, name and
101 // result.
102 // The generated code assumes that the receiver has slow properties,
103 // is not a global object and does not have interceptors.
104 static void GenerateDictionaryStore(MacroAssembler* masm,
105 Label* miss,
106 Register elements,
107 Register name,
108 Register value,
109 Register scratch1,
110 Register scratch2) {
111 // Main use of the scratch registers.
112 // scratch1: Used as temporary and to hold the capacity of the property
113 // dictionary.
114 // scratch2: Used as temporary.
115 Label done;
116
117 // Probe the dictionary.
118 NameDictionaryLookupStub::GeneratePositiveLookup(masm,
119 miss,
120 &done,
121 elements,
122 name,
123 scratch1,
124 scratch2);
125
126 // If probing finds an entry in the dictionary check that the value
127 // is a normal property that is not read only.
128 __ bind(&done); // scratch2 == elements + 4 * index
129 const int kElementsStartOffset = NameDictionary::kHeaderSize +
130 NameDictionary::kElementsStartIndex * kPointerSize;
131 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
132 const int kTypeAndReadOnlyMask =
133 (PropertyDetails::TypeField::kMask |
134 PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
135 __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
136 __ tst(scratch1, Operand(kTypeAndReadOnlyMask));
137 __ b(ne, miss);
138
139 // Store the value at the masked, scaled index and return.
140 const int kValueOffset = kElementsStartOffset + kPointerSize;
141 __ add(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
142 __ str(value, MemOperand(scratch2));
143
144 // Update the write barrier. Make sure not to clobber the value.
145 __ mov(scratch1, value);
146 __ RecordWrite(
147 elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
148 }
149
150
151 // Checks the receiver for special cases (value type, slow case bits).
152 // Falls through for regular JS object.
153 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
154 Register receiver,
155 Register map,
156 Register scratch,
157 int interceptor_bit,
158 Label* slow) {
159 // Check that the object isn't a smi.
160 __ JumpIfSmi(receiver, slow);
161 // Get the map of the receiver.
162 __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
163 // Check bit field.
164 __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
165 __ tst(scratch,
166 Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
167 __ b(ne, slow);
168 // Check that the object is some kind of JS object EXCEPT JS Value type.
169 // In the case that the object is a value-wrapper object,
170 // we enter the runtime system to make sure that indexing into string
171 // objects work as intended.
172 DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
173 __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
174 __ cmp(scratch, Operand(JS_OBJECT_TYPE));
175 __ b(lt, slow);
176 }
177
178
179 // Loads an indexed element from a fast case array.
180 // If not_fast_array is NULL, doesn't perform the elements map check.
181 static void GenerateFastArrayLoad(MacroAssembler* masm,
182 Register receiver,
183 Register key,
184 Register elements,
185 Register scratch1,
186 Register scratch2,
187 Register result,
188 Label* not_fast_array,
189 Label* out_of_range) {
190 // Register use:
191 //
192 // receiver - holds the receiver on entry.
193 // Unchanged unless 'result' is the same register.
194 //
195 // key - holds the smi key on entry.
196 // Unchanged unless 'result' is the same register.
197 //
198 // elements - holds the elements of the receiver on exit.
199 //
200 // result - holds the result on exit if the load succeeded.
201 // Allowed to be the the same as 'receiver' or 'key'.
202 // Unchanged on bailout so 'receiver' and 'key' can be safely
203 // used by further computation.
204 //
205 // Scratch registers:
206 //
207 // scratch1 - used to hold elements map and elements length.
208 // Holds the elements map if not_fast_array branch is taken.
209 //
210 // scratch2 - used to hold the loaded value.
211
212 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
213 if (not_fast_array != NULL) {
214 // Check that the object is in fast mode and writable.
215 __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
216 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
217 __ cmp(scratch1, ip);
218 __ b(ne, not_fast_array);
219 } else {
220 __ AssertFastElements(elements);
221 }
222 // Check that the key (index) is within bounds.
223 __ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
224 __ cmp(key, Operand(scratch1));
225 __ b(hs, out_of_range);
226 // Fast case: Do the load.
227 __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
228 __ ldr(scratch2, MemOperand::PointerAddressFromSmiKey(scratch1, key));
229 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
230 __ cmp(scratch2, ip);
231 // In case the loaded value is the_hole we have to consult GetProperty
232 // to ensure the prototype chain is searched.
233 __ b(eq, out_of_range);
234 __ mov(result, scratch2);
235 }
236
237
238 // Checks whether a key is an array index string or a unique name.
239 // Falls through if a key is a unique name.
240 static void GenerateKeyNameCheck(MacroAssembler* masm,
241 Register key,
242 Register map,
243 Register hash,
244 Label* index_string,
245 Label* not_unique) {
246 // The key is not a smi.
247 Label unique;
248 // Is it a name?
249 __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
250 __ b(hi, not_unique);
251 STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
252 __ b(eq, &unique);
253
254 // Is the string an array index, with cached numeric value?
255 __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
256 __ tst(hash, Operand(Name::kContainsCachedArrayIndexMask));
257 __ b(eq, index_string);
258
259 // Is the string internalized? We know it's a string, so a single
260 // bit test is enough.
261 // map: key map
262 __ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
263 STATIC_ASSERT(kInternalizedTag == 0);
264 __ tst(hash, Operand(kIsNotInternalizedMask));
265 __ b(ne, not_unique);
266
267 __ bind(&unique);
268 }
269
270
271 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
272 // The return address is in lr.
273 Register receiver = ReceiverRegister();
274 Register name = NameRegister();
275 DCHECK(receiver.is(r1));
276 DCHECK(name.is(r2));
277
278 // Probe the stub cache.
279 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
280 Code::ComputeHandlerFlags(Code::LOAD_IC));
281 masm->isolate()->stub_cache()->GenerateProbe(
282 masm, flags, receiver, name, r3, r4, r5, r6);
283
284 // Cache miss: Jump to runtime.
285 GenerateMiss(masm);
286 }
287
288
289 void LoadIC::GenerateNormal(MacroAssembler* masm) {
290 Register dictionary = r0;
291 DCHECK(!dictionary.is(ReceiverRegister()));
292 DCHECK(!dictionary.is(NameRegister()));
293
294 Label slow;
295
296 __ ldr(dictionary,
297 FieldMemOperand(ReceiverRegister(), JSObject::kPropertiesOffset));
298 GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), r0, r3, r4);
299 __ Ret();
300
301 // Dictionary load failed, go slow (but don't miss).
302 __ bind(&slow);
303 GenerateRuntimeGetProperty(masm);
304 }
305
306
307 // A register that isn't one of the parameters to the load ic.
308 static const Register LoadIC_TempRegister() { return r3; }
309
310
311 void LoadIC::GenerateMiss(MacroAssembler* masm) {
312 // The return address is in lr.
313 Isolate* isolate = masm->isolate();
314
315 __ IncrementCounter(isolate->counters()->load_miss(), 1, r3, r4);
316
317 __ mov(LoadIC_TempRegister(), ReceiverRegister());
318 __ Push(LoadIC_TempRegister(), NameRegister());
319
320 // Perform tail call to the entry.
321 ExternalReference ref =
322 ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
323 __ TailCallExternalReference(ref, 2, 1);
324 }
325
326
327 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
328 // The return address is in lr.
329
330 __ mov(LoadIC_TempRegister(), ReceiverRegister());
331 __ Push(LoadIC_TempRegister(), NameRegister());
332
333 __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
334 }
335
336
337 static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
338 Register object,
339 Register key,
340 Register scratch1,
341 Register scratch2,
342 Register scratch3,
343 Label* unmapped_case,
344 Label* slow_case) {
345 Heap* heap = masm->isolate()->heap();
346
347 // Check that the receiver is a JSObject. Because of the map check
348 // later, we do not need to check for interceptors or whether it
349 // requires access checks.
350 __ JumpIfSmi(object, slow_case);
351 // Check that the object is some kind of JSObject.
352 __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_RECEIVER_TYPE);
353 __ b(lt, slow_case);
354
355 // Check that the key is a positive smi.
356 __ tst(key, Operand(0x80000001));
357 __ b(ne, slow_case);
358
359 // Load the elements into scratch1 and check its map.
360 Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
361 __ ldr(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
362 __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
363
364 // Check if element is in the range of mapped arguments. If not, jump
365 // to the unmapped lookup with the parameter map in scratch1.
366 __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
367 __ sub(scratch2, scratch2, Operand(Smi::FromInt(2)));
368 __ cmp(key, Operand(scratch2));
369 __ b(cs, unmapped_case);
370
371 // Load element index and check whether it is the hole.
372 const int kOffset =
373 FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
374
375 __ mov(scratch3, Operand(kPointerSize >> 1));
376 __ mul(scratch3, key, scratch3);
377 __ add(scratch3, scratch3, Operand(kOffset));
378
379 __ ldr(scratch2, MemOperand(scratch1, scratch3));
380 __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
381 __ cmp(scratch2, scratch3);
382 __ b(eq, unmapped_case);
383
384 // Load value from context and return it. We can reuse scratch1 because
385 // we do not jump to the unmapped lookup (which requires the parameter
386 // map in scratch1).
387 __ ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
388 __ mov(scratch3, Operand(kPointerSize >> 1));
389 __ mul(scratch3, scratch2, scratch3);
390 __ add(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
391 return MemOperand(scratch1, scratch3);
392 }
393
394
395 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
396 Register key,
397 Register parameter_map,
398 Register scratch,
399 Label* slow_case) {
400 // Element is in arguments backing store, which is referenced by the
401 // second element of the parameter_map. The parameter_map register
402 // must be loaded with the parameter map of the arguments object and is
403 // overwritten.
404 const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
405 Register backing_store = parameter_map;
406 __ ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
407 Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
408 __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
409 DONT_DO_SMI_CHECK);
410 __ ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
411 __ cmp(key, Operand(scratch));
412 __ b(cs, slow_case);
413 __ mov(scratch, Operand(kPointerSize >> 1));
414 __ mul(scratch, key, scratch);
415 __ add(scratch,
416 scratch,
417 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
418 return MemOperand(backing_store, scratch);
419 }
420
421
422 void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
423 // The return address is in lr.
424 Register receiver = ReceiverRegister();
425 Register key = NameRegister();
426 DCHECK(receiver.is(r1));
427 DCHECK(key.is(r2));
428
429 Label slow, notin;
430 MemOperand mapped_location =
431 GenerateMappedArgumentsLookup(
432 masm, receiver, key, r0, r3, r4, &notin, &slow);
433 __ ldr(r0, mapped_location);
434 __ Ret();
435 __ bind(&notin);
436 // The unmapped lookup expects that the parameter map is in r0.
437 MemOperand unmapped_location =
438 GenerateUnmappedArgumentsLookup(masm, key, r0, r3, &slow);
439 __ ldr(r0, unmapped_location);
440 __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
441 __ cmp(r0, r3);
442 __ b(eq, &slow);
443 __ Ret();
444 __ bind(&slow);
445 GenerateMiss(masm);
446 }
447
448
449 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
450 Register receiver = ReceiverRegister();
451 Register key = NameRegister();
452 Register value = ValueRegister();
453 DCHECK(receiver.is(r1));
454 DCHECK(key.is(r2));
455 DCHECK(value.is(r0));
456
457 Label slow, notin;
458 MemOperand mapped_location = GenerateMappedArgumentsLookup(
459 masm, receiver, key, r3, r4, r5, &notin, &slow);
460 __ str(value, mapped_location);
461 __ add(r6, r3, r5);
462 __ mov(r9, value);
463 __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
464 __ Ret();
465 __ bind(&notin);
466 // The unmapped lookup expects that the parameter map is in r3.
467 MemOperand unmapped_location =
468 GenerateUnmappedArgumentsLookup(masm, key, r3, r4, &slow);
469 __ str(value, unmapped_location);
470 __ add(r6, r3, r4);
471 __ mov(r9, value);
472 __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
473 __ Ret();
474 __ bind(&slow);
475 GenerateMiss(masm);
476 }
477
478
479 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
480 // The return address is in lr.
481 Isolate* isolate = masm->isolate();
482
483 __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r3, r4);
484
485 __ Push(ReceiverRegister(), NameRegister());
486
487 // Perform tail call to the entry.
488 ExternalReference ref =
489 ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
490
491 __ TailCallExternalReference(ref, 2, 1);
492 }
493
494
495 // IC register specifications
496 const Register LoadIC::ReceiverRegister() { return r1; }
497 const Register LoadIC::NameRegister() { return r2; }
498
499
500 const Register LoadIC::SlotRegister() {
501 DCHECK(FLAG_vector_ics);
502 return r0;
503 }
504
505
506 const Register LoadIC::VectorRegister() {
507 DCHECK(FLAG_vector_ics);
508 return r3;
509 }
510
511
512 const Register StoreIC::ReceiverRegister() { return r1; }
513 const Register StoreIC::NameRegister() { return r2; }
514 const Register StoreIC::ValueRegister() { return r0; }
515
516
517 const Register KeyedStoreIC::MapRegister() {
518 return r3;
519 }
520
521
522 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
523 // The return address is in lr.
524
525 __ Push(ReceiverRegister(), NameRegister());
526
527 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
528 }
529
530
531 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
532 // The return address is in lr.
533 Label slow, check_name, index_smi, index_name, property_array_property;
534 Label probe_dictionary, check_number_dictionary;
535
536 Register key = NameRegister();
537 Register receiver = ReceiverRegister();
538 DCHECK(key.is(r2));
539 DCHECK(receiver.is(r1));
540
541 Isolate* isolate = masm->isolate();
542
543 // Check that the key is a smi.
544 __ JumpIfNotSmi(key, &check_name);
545 __ bind(&index_smi);
546 // Now the key is known to be a smi. This place is also jumped to from below
547 // where a numeric string is converted to a smi.
548
549 GenerateKeyedLoadReceiverCheck(
550 masm, receiver, r0, r3, Map::kHasIndexedInterceptor, &slow);
551
552 // Check the receiver's map to see if it has fast elements.
553 __ CheckFastElements(r0, r3, &check_number_dictionary);
554
555 GenerateFastArrayLoad(
556 masm, receiver, key, r0, r3, r4, r0, NULL, &slow);
557 __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r4, r3);
558 __ Ret();
559
560 __ bind(&check_number_dictionary);
561 __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
562 __ ldr(r3, FieldMemOperand(r4, JSObject::kMapOffset));
563
564 // Check whether the elements is a number dictionary.
565 // r3: elements map
566 // r4: elements
567 __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
568 __ cmp(r3, ip);
569 __ b(ne, &slow);
570 __ SmiUntag(r0, key);
571 __ LoadFromNumberDictionary(&slow, r4, key, r0, r0, r3, r5);
572 __ Ret();
573
574 // Slow case, key and receiver still in r2 and r1.
575 __ bind(&slow);
576 __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
577 1, r4, r3);
578 GenerateRuntimeGetProperty(masm);
579
580 __ bind(&check_name);
581 GenerateKeyNameCheck(masm, key, r0, r3, &index_name, &slow);
582
583 GenerateKeyedLoadReceiverCheck(
584 masm, receiver, r0, r3, Map::kHasNamedInterceptor, &slow);
585
586 // If the receiver is a fast-case object, check the keyed lookup
587 // cache. Otherwise probe the dictionary.
588 __ ldr(r3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
589 __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
590 __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
591 __ cmp(r4, ip);
592 __ b(eq, &probe_dictionary);
593
594 // Load the map of the receiver, compute the keyed lookup cache hash
595 // based on 32 bits of the map pointer and the name hash.
596 __ ldr(r0, FieldMemOperand(receiver, HeapObject::kMapOffset));
597 __ mov(r3, Operand(r0, ASR, KeyedLookupCache::kMapHashShift));
598 __ ldr(r4, FieldMemOperand(key, Name::kHashFieldOffset));
599 __ eor(r3, r3, Operand(r4, ASR, Name::kHashShift));
600 int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
601 __ And(r3, r3, Operand(mask));
602
603 // Load the key (consisting of map and unique name) from the cache and
604 // check for match.
605 Label load_in_object_property;
606 static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
607 Label hit_on_nth_entry[kEntriesPerBucket];
608 ExternalReference cache_keys =
609 ExternalReference::keyed_lookup_cache_keys(isolate);
610
611 __ mov(r4, Operand(cache_keys));
612 __ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1));
613
614 for (int i = 0; i < kEntriesPerBucket - 1; i++) {
615 Label try_next_entry;
616 // Load map and move r4 to next entry.
617 __ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex));
618 __ cmp(r0, r5);
619 __ b(ne, &try_next_entry);
620 __ ldr(r5, MemOperand(r4, -kPointerSize)); // Load name
621 __ cmp(key, r5);
622 __ b(eq, &hit_on_nth_entry[i]);
623 __ bind(&try_next_entry);
624 }
625
626 // Last entry: Load map and move r4 to name.
627 __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex));
628 __ cmp(r0, r5);
629 __ b(ne, &slow);
630 __ ldr(r5, MemOperand(r4));
631 __ cmp(key, r5);
632 __ b(ne, &slow);
633
634 // Get field offset.
635 // r0 : receiver's map
636 // r3 : lookup cache index
637 ExternalReference cache_field_offsets =
638 ExternalReference::keyed_lookup_cache_field_offsets(isolate);
639
640 // Hit on nth entry.
641 for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
642 __ bind(&hit_on_nth_entry[i]);
643 __ mov(r4, Operand(cache_field_offsets));
644 if (i != 0) {
645 __ add(r3, r3, Operand(i));
646 }
647 __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
648 __ ldrb(r6, FieldMemOperand(r0, Map::kInObjectPropertiesOffset));
649 __ sub(r5, r5, r6, SetCC);
650 __ b(ge, &property_array_property);
651 if (i != 0) {
652 __ jmp(&load_in_object_property);
653 }
654 }
655
656 // Load in-object property.
657 __ bind(&load_in_object_property);
658 __ ldrb(r6, FieldMemOperand(r0, Map::kInstanceSizeOffset));
659 __ add(r6, r6, r5); // Index from start of object.
660 __ sub(receiver, receiver, Operand(kHeapObjectTag)); // Remove the heap tag.
661 __ ldr(r0, MemOperand(receiver, r6, LSL, kPointerSizeLog2));
662 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
663 1, r4, r3);
664 __ Ret();
665
666 // Load property array property.
667 __ bind(&property_array_property);
668 __ ldr(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
669 __ add(receiver, receiver, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
670 __ ldr(r0, MemOperand(receiver, r5, LSL, kPointerSizeLog2));
671 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
672 1, r4, r3);
673 __ Ret();
674
675 // Do a quick inline probe of the receiver's dictionary, if it
676 // exists.
677 __ bind(&probe_dictionary);
678 // r3: elements
679 __ ldr(r0, FieldMemOperand(receiver, HeapObject::kMapOffset));
680 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
681 GenerateGlobalInstanceTypeCheck(masm, r0, &slow);
682 // Load the property to r0.
683 GenerateDictionaryLoad(masm, &slow, r3, key, r0, r5, r4);
684 __ IncrementCounter(
685 isolate->counters()->keyed_load_generic_symbol(), 1, r4, r3);
686 __ Ret();
687
688 __ bind(&index_name);
689 __ IndexFromHash(r3, key);
690 // Now jump to the place where smi keys are handled.
691 __ jmp(&index_smi);
692 }
693
694
695 void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
696 // Return address is in lr.
697 Label miss;
698
699 Register receiver = ReceiverRegister();
700 Register index = NameRegister();
701 Register scratch = r3;
702 Register result = r0;
703 DCHECK(!scratch.is(receiver) && !scratch.is(index));
704
705 StringCharAtGenerator char_at_generator(receiver,
706 index,
707 scratch,
708 result,
709 &miss, // When not a string.
710 &miss, // When not a number.
711 &miss, // When index out of range.
712 STRING_INDEX_IS_ARRAY_INDEX);
713 char_at_generator.GenerateFast(masm);
714 __ Ret();
715
716 StubRuntimeCallHelper call_helper;
717 char_at_generator.GenerateSlow(masm, call_helper);
718
719 __ bind(&miss);
720 GenerateMiss(masm);
721 }
722
723
724 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
725 // Return address is in lr.
726 Label slow;
727
728 Register receiver = ReceiverRegister();
729 Register key = NameRegister();
730 Register scratch1 = r3;
731 Register scratch2 = r4;
732 DCHECK(!scratch1.is(receiver) && !scratch1.is(key));
733 DCHECK(!scratch2.is(receiver) && !scratch2.is(key));
734
735 // Check that the receiver isn't a smi.
736 __ JumpIfSmi(receiver, &slow);
737
738 // Check that the key is an array index, that is Uint32.
739 __ NonNegativeSmiTst(key);
740 __ b(ne, &slow);
741
742 // Get the map of the receiver.
743 __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
744
745 // Check that it has indexed interceptor and access checks
746 // are not enabled for this object.
747 __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
748 __ and_(scratch2, scratch2, Operand(kSlowCaseBitFieldMask));
749 __ cmp(scratch2, Operand(1 << Map::kHasIndexedInterceptor));
750 __ b(ne, &slow);
751
752 // Everything is fine, call runtime.
753 __ Push(receiver, key); // Receiver, key.
754
755 // Perform tail call to the entry.
756 __ TailCallExternalReference(
757 ExternalReference(IC_Utility(kLoadElementWithInterceptor),
758 masm->isolate()),
759 2, 1);
760
761 __ bind(&slow);
762 GenerateMiss(masm);
763 }
764
765
766 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
767 // Push receiver, key and value for runtime call.
768 __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
769
770 ExternalReference ref =
771 ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
772 __ TailCallExternalReference(ref, 3, 1);
773 }
774
775
776 void StoreIC::GenerateSlow(MacroAssembler* masm) {
777 // Push receiver, key and value for runtime call.
778 __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
779
780 // The slow case calls into the runtime to complete the store without causing
781 // an IC miss that would otherwise cause a transition to the generic stub.
782 ExternalReference ref =
783 ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
784 __ TailCallExternalReference(ref, 3, 1);
785 }
786
787
788 void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
789 // Push receiver, key and value for runtime call.
790 __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
791
792 // The slow case calls into the runtime to complete the store without causing
793 // an IC miss that would otherwise cause a transition to the generic stub.
794 ExternalReference ref =
795 ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
796 __ TailCallExternalReference(ref, 3, 1);
797 }
798
799
800 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
801 StrictMode strict_mode) {
802 // Push receiver, key and value for runtime call.
803 __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
804
805 __ mov(r0, Operand(Smi::FromInt(strict_mode))); // Strict mode.
806 __ Push(r0);
807
808 __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
809 }
810
811
812 static void KeyedStoreGenerateGenericHelper(
813 MacroAssembler* masm,
814 Label* fast_object,
815 Label* fast_double,
816 Label* slow,
817 KeyedStoreCheckMap check_map,
818 KeyedStoreIncrementLength increment_length,
819 Register value,
820 Register key,
821 Register receiver,
822 Register receiver_map,
823 Register elements_map,
824 Register elements) {
825 Label transition_smi_elements;
826 Label finish_object_store, non_double_value, transition_double_elements;
827 Label fast_double_without_map_check;
828
829 // Fast case: Do the store, could be either Object or double.
830 __ bind(fast_object);
831 Register scratch_value = r4;
832 Register address = r5;
833 if (check_map == kCheckMap) {
834 __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
835 __ cmp(elements_map,
836 Operand(masm->isolate()->factory()->fixed_array_map()));
837 __ b(ne, fast_double);
838 }
839
840 // HOLECHECK: guards "A[i] = V"
841 // We have to go to the runtime if the current value is the hole because
842 // there may be a callback on the element
843 Label holecheck_passed1;
844 __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
845 __ ldr(scratch_value,
846 MemOperand::PointerAddressFromSmiKey(address, key, PreIndex));
847 __ cmp(scratch_value, Operand(masm->isolate()->factory()->the_hole_value()));
848 __ b(ne, &holecheck_passed1);
849 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
850 slow);
851
852 __ bind(&holecheck_passed1);
853
854 // Smi stores don't require further checks.
855 Label non_smi_value;
856 __ JumpIfNotSmi(value, &non_smi_value);
857
858 if (increment_length == kIncrementLength) {
859 // Add 1 to receiver->length.
860 __ add(scratch_value, key, Operand(Smi::FromInt(1)));
861 __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
862 }
863 // It's irrelevant whether array is smi-only or not when writing a smi.
864 __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
865 __ str(value, MemOperand::PointerAddressFromSmiKey(address, key));
866 __ Ret();
867
868 __ bind(&non_smi_value);
869 // Escape to elements kind transition case.
870 __ CheckFastObjectElements(receiver_map, scratch_value,
871 &transition_smi_elements);
872
873 // Fast elements array, store the value to the elements backing store.
874 __ bind(&finish_object_store);
875 if (increment_length == kIncrementLength) {
876 // Add 1 to receiver->length.
877 __ add(scratch_value, key, Operand(Smi::FromInt(1)));
878 __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
879 }
880 __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
881 __ add(address, address, Operand::PointerOffsetFromSmiKey(key));
882 __ str(value, MemOperand(address));
883 // Update write barrier for the elements array address.
884 __ mov(scratch_value, value); // Preserve the value which is returned.
885 __ RecordWrite(elements,
886 address,
887 scratch_value,
888 kLRHasNotBeenSaved,
889 kDontSaveFPRegs,
890 EMIT_REMEMBERED_SET,
891 OMIT_SMI_CHECK);
892 __ Ret();
893
894 __ bind(fast_double);
895 if (check_map == kCheckMap) {
896 // Check for fast double array case. If this fails, call through to the
897 // runtime.
898 __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
899 __ b(ne, slow);
900 }
901
902 // HOLECHECK: guards "A[i] double hole?"
903 // We have to see if the double version of the hole is present. If so
904 // go to the runtime.
905 __ add(address, elements,
906 Operand((FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32))
907 - kHeapObjectTag));
908 __ ldr(scratch_value,
909 MemOperand(address, key, LSL, kPointerSizeLog2, PreIndex));
910 __ cmp(scratch_value, Operand(kHoleNanUpper32));
911 __ b(ne, &fast_double_without_map_check);
912 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
913 slow);
914
915 __ bind(&fast_double_without_map_check);
916 __ StoreNumberToDoubleElements(value, key, elements, r3, d0,
917 &transition_double_elements);
918 if (increment_length == kIncrementLength) {
919 // Add 1 to receiver->length.
920 __ add(scratch_value, key, Operand(Smi::FromInt(1)));
921 __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
922 }
923 __ Ret();
924
925 __ bind(&transition_smi_elements);
926 // Transition the array appropriately depending on the value type.
927 __ ldr(r4, FieldMemOperand(value, HeapObject::kMapOffset));
928 __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
929 __ b(ne, &non_double_value);
930
931 // Value is a double. Transition FAST_SMI_ELEMENTS ->
932 // FAST_DOUBLE_ELEMENTS and complete the store.
933 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
934 FAST_DOUBLE_ELEMENTS,
935 receiver_map,
936 r4,
937 slow);
938 AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
939 FAST_DOUBLE_ELEMENTS);
940 ElementsTransitionGenerator::GenerateSmiToDouble(
941 masm, receiver, key, value, receiver_map, mode, slow);
942 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
943 __ jmp(&fast_double_without_map_check);
944
945 __ bind(&non_double_value);
946 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
947 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
948 FAST_ELEMENTS,
949 receiver_map,
950 r4,
951 slow);
952 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
953 ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
954 masm, receiver, key, value, receiver_map, mode, slow);
955 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
956 __ jmp(&finish_object_store);
957
958 __ bind(&transition_double_elements);
959 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
960 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
961 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
962 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
963 FAST_ELEMENTS,
964 receiver_map,
965 r4,
966 slow);
967 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
968 ElementsTransitionGenerator::GenerateDoubleToObject(
969 masm, receiver, key, value, receiver_map, mode, slow);
970 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
971 __ jmp(&finish_object_store);
972 }
973
974
975 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
976 StrictMode strict_mode) {
977 // ---------- S t a t e --------------
978 // -- r0 : value
979 // -- r1 : key
980 // -- r2 : receiver
981 // -- lr : return address
982 // -----------------------------------
983 Label slow, fast_object, fast_object_grow;
984 Label fast_double, fast_double_grow;
985 Label array, extra, check_if_double_array;
986
987 // Register usage.
988 Register value = ValueRegister();
989 Register key = NameRegister();
990 Register receiver = ReceiverRegister();
991 DCHECK(receiver.is(r1));
992 DCHECK(key.is(r2));
993 DCHECK(value.is(r0));
994 Register receiver_map = r3;
995 Register elements_map = r6;
996 Register elements = r9; // Elements array of the receiver.
997 // r4 and r5 are used as general scratch registers.
998
999 // Check that the key is a smi.
1000 __ JumpIfNotSmi(key, &slow);
1001 // Check that the object isn't a smi.
1002 __ JumpIfSmi(receiver, &slow);
1003 // Get the map of the object.
1004 __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
1005 // Check that the receiver does not require access checks and is not observed.
1006 // The generic stub does not perform map checks or handle observed objects.
1007 __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
1008 __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
1009 __ b(ne, &slow);
1010 // Check if the object is a JS array or not.
1011 __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
1012 __ cmp(r4, Operand(JS_ARRAY_TYPE));
1013 __ b(eq, &array);
1014 // Check that the object is some kind of JSObject.
1015 __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
1016 __ b(lt, &slow);
1017
1018 // Object case: Check key against length in the elements array.
1019 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1020 // Check array bounds. Both the key and the length of FixedArray are smis.
1021 __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
1022 __ cmp(key, Operand(ip));
1023 __ b(lo, &fast_object);
1024
1025 // Slow case, handle jump to runtime.
1026 __ bind(&slow);
1027 // Entry registers are intact.
1028 // r0: value.
1029 // r1: key.
1030 // r2: receiver.
1031 GenerateRuntimeSetProperty(masm, strict_mode);
1032
1033 // Extra capacity case: Check if there is extra capacity to
1034 // perform the store and update the length. Used for adding one
1035 // element to the array by writing to array[array.length].
1036 __ bind(&extra);
1037 // Condition code from comparing key and array length is still available.
1038 __ b(ne, &slow); // Only support writing to writing to array[array.length].
1039 // Check for room in the elements backing store.
1040 // Both the key and the length of FixedArray are smis.
1041 __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
1042 __ cmp(key, Operand(ip));
1043 __ b(hs, &slow);
1044 __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
1045 __ cmp(elements_map,
1046 Operand(masm->isolate()->factory()->fixed_array_map()));
1047 __ b(ne, &check_if_double_array);
1048 __ jmp(&fast_object_grow);
1049
1050 __ bind(&check_if_double_array);
1051 __ cmp(elements_map,
1052 Operand(masm->isolate()->factory()->fixed_double_array_map()));
1053 __ b(ne, &slow);
1054 __ jmp(&fast_double_grow);
1055
1056 // Array case: Get the length and the elements array from the JS
1057 // array. Check that the array is in fast mode (and writable); if it
1058 // is the length is always a smi.
1059 __ bind(&array);
1060 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1061
1062 // Check the key against the length in the array.
1063 __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
1064 __ cmp(key, Operand(ip));
1065 __ b(hs, &extra);
1066
1067 KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
1068 &slow, kCheckMap, kDontIncrementLength,
1069 value, key, receiver, receiver_map,
1070 elements_map, elements);
1071 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
1072 &slow, kDontCheckMap, kIncrementLength,
1073 value, key, receiver, receiver_map,
1074 elements_map, elements);
1075 }
1076
1077
1078 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
1079 Register receiver = ReceiverRegister();
1080 Register name = NameRegister();
1081 DCHECK(receiver.is(r1));
1082 DCHECK(name.is(r2));
1083 DCHECK(ValueRegister().is(r0));
1084
1085 // Get the receiver from the stack and probe the stub cache.
1086 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
1087 Code::ComputeHandlerFlags(Code::STORE_IC));
1088
1089 masm->isolate()->stub_cache()->GenerateProbe(
1090 masm, flags, receiver, name, r3, r4, r5, r6);
1091
1092 // Cache miss: Jump to runtime.
1093 GenerateMiss(masm);
1094 }
1095
1096
1097 void StoreIC::GenerateMiss(MacroAssembler* masm) {
1098 __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
1099
1100 // Perform tail call to the entry.
1101 ExternalReference ref =
1102 ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
1103 __ TailCallExternalReference(ref, 3, 1);
1104 }
1105
1106
1107 void StoreIC::GenerateNormal(MacroAssembler* masm) {
1108 Label miss;
1109 Register receiver = ReceiverRegister();
1110 Register name = NameRegister();
1111 Register value = ValueRegister();
1112 Register dictionary = r3;
1113 DCHECK(receiver.is(r1));
1114 DCHECK(name.is(r2));
1115 DCHECK(value.is(r0));
1116
1117 __ ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
1118
1119 GenerateDictionaryStore(masm, &miss, dictionary, name, value, r4, r5);
1120 Counters* counters = masm->isolate()->counters();
1121 __ IncrementCounter(counters->store_normal_hit(),
1122 1, r4, r5);
1123 __ Ret();
1124
1125 __ bind(&miss);
1126 __ IncrementCounter(counters->store_normal_miss(), 1, r4, r5);
1127 GenerateMiss(masm);
1128 }
1129
1130
1131 void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
1132 StrictMode strict_mode) {
1133 __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
1134
1135 __ mov(r0, Operand(Smi::FromInt(strict_mode)));
1136 __ Push(r0);
1137
1138 // Do tail-call to runtime routine.
1139 __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
1140 }
1141
1142
1143 #undef __
1144
1145
1146 Condition CompareIC::ComputeCondition(Token::Value op) {
1147 switch (op) {
1148 case Token::EQ_STRICT:
1149 case Token::EQ:
1150 return eq;
1151 case Token::LT:
1152 return lt;
1153 case Token::GT:
1154 return gt;
1155 case Token::LTE:
1156 return le;
1157 case Token::GTE:
1158 return ge;
1159 default:
1160 UNREACHABLE();
1161 return kNoCondition;
1162 }
1163 }
1164
1165
1166 bool CompareIC::HasInlinedSmiCode(Address address) {
1167 // The address of the instruction following the call.
1168 Address cmp_instruction_address =
1169 Assembler::return_address_from_call_start(address);
1170
1171 // If the instruction following the call is not a cmp rx, #yyy, nothing
1172 // was inlined.
1173 Instr instr = Assembler::instr_at(cmp_instruction_address);
1174 return Assembler::IsCmpImmediate(instr);
1175 }
1176
1177
1178 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
1179 Address cmp_instruction_address =
1180 Assembler::return_address_from_call_start(address);
1181
1182 // If the instruction following the call is not a cmp rx, #yyy, nothing
1183 // was inlined.
1184 Instr instr = Assembler::instr_at(cmp_instruction_address);
1185 if (!Assembler::IsCmpImmediate(instr)) {
1186 return;
1187 }
1188
1189 // The delta to the start of the map check instruction and the
1190 // condition code uses at the patched jump.
1191 int delta = Assembler::GetCmpImmediateRawImmediate(instr);
1192 delta +=
1193 Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask;
1194 // If the delta is 0 the instruction is cmp r0, #0 which also signals that
1195 // nothing was inlined.
1196 if (delta == 0) {
1197 return;
1198 }
1199
1200 if (FLAG_trace_ic) {
1201 PrintF("[ patching ic at %p, cmp=%p, delta=%d\n",
1202 address, cmp_instruction_address, delta);
1203 }
1204
1205 Address patch_address =
1206 cmp_instruction_address - delta * Instruction::kInstrSize;
1207 Instr instr_at_patch = Assembler::instr_at(patch_address);
1208 Instr branch_instr =
1209 Assembler::instr_at(patch_address + Instruction::kInstrSize);
1210 // This is patching a conditional "jump if not smi/jump if smi" site.
1211 // Enabling by changing from
1212 // cmp rx, rx
1213 // b eq/ne, <target>
1214 // to
1215 // tst rx, #kSmiTagMask
1216 // b ne/eq, <target>
1217 // and vice-versa to be disabled again.
1218 CodePatcher patcher(patch_address, 2);
1219 Register reg = Assembler::GetRn(instr_at_patch);
1220 if (check == ENABLE_INLINED_SMI_CHECK) {
1221 DCHECK(Assembler::IsCmpRegister(instr_at_patch));
1222 DCHECK_EQ(Assembler::GetRn(instr_at_patch).code(),
1223 Assembler::GetRm(instr_at_patch).code());
1224 patcher.masm()->tst(reg, Operand(kSmiTagMask));
1225 } else {
1226 DCHECK(check == DISABLE_INLINED_SMI_CHECK);
1227 DCHECK(Assembler::IsTstImmediate(instr_at_patch));
1228 patcher.masm()->cmp(reg, reg);
1229 }
1230 DCHECK(Assembler::IsBranch(branch_instr));
1231 if (Assembler::GetCondition(branch_instr) == eq) {
1232 patcher.EmitCondition(ne);
1233 } else {
1234 DCHECK(Assembler::GetCondition(branch_instr) == ne);
1235 patcher.EmitCondition(eq);
1236 }
1237 }
1238
1239
1240 } } // namespace v8::internal
1241
1242 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/full-codegen-arm.cc ('k') | src/arm/lithium-codegen-arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698