Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(300)

Side by Side Diff: src/sh4/ic-sh4.cc

Issue 11275184: First draft of the sh4 port Base URL: http://github.com/v8/v8.git@master
Patch Set: Use GYP and fixe some typos Created 8 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/sh4/full-codegen-sh4.cc ('k') | src/sh4/lithium-codegen-sh4.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2011-2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution. 11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its 12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived 13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission. 14 // from this software without specific prior written permission.
15 // 15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #include "v8.h" 28 #include "v8.h"
29 29
30 #if defined(V8_TARGET_ARCH_ARM) 30 #if defined(V8_TARGET_ARCH_SH4)
31 31
32 #include "assembler-arm.h" 32 #include "assembler-sh4.h"
33 #include "code-stubs.h" 33 #include "code-stubs.h"
34 #include "codegen.h" 34 #include "codegen.h"
35 #include "disasm.h" 35 #include "disasm.h"
36 #include "ic-inl.h" 36 #include "ic-inl.h"
37 #include "runtime.h" 37 #include "runtime.h"
38 #include "stub-cache.h" 38 #include "stub-cache.h"
39 39
40 namespace v8 { 40 namespace v8 {
41 namespace internal { 41 namespace internal {
42 42
43 43
44 // ---------------------------------------------------------------------------- 44 // ----------------------------------------------------------------------------
45 // Static IC stub generators. 45 // Static IC stub generators.
46 // 46 //
47 47
48 #define __ ACCESS_MASM(masm) 48 #define __ ACCESS_MASM(masm)
49 49
50 // ARM to SH4 mapping
51 #include "map-sh4.h"
50 52
51 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, 53 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
52 Register type, 54 Register type,
53 Label* global_object) { 55 Label* global_object) {
54 // Register usage: 56 // Register usage:
55 // type: holds the receiver instance type on entry. 57 // type: holds the receiver instance type on entry.
56 __ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE)); 58 __ cmpeq(type, Operand(JS_GLOBAL_OBJECT_TYPE));
57 __ b(eq, global_object); 59 __ bt(global_object);
58 __ cmp(type, Operand(JS_BUILTINS_OBJECT_TYPE)); 60 __ cmpeq(type, Operand(JS_BUILTINS_OBJECT_TYPE));
59 __ b(eq, global_object); 61 __ bt(global_object);
60 __ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE)); 62 __ cmpeq(type, Operand(JS_GLOBAL_PROXY_TYPE));
61 __ b(eq, global_object); 63 __ bt(global_object);
62 } 64 }
63 65
64 66
65 // Generated code falls through if the receiver is a regular non-global 67 // Generated code falls through if the receiver is a regular non-global
66 // JS object with slow properties and no interceptors. 68 // JS object with slow properties and no interceptors.
67 static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm, 69 static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
68 Register receiver, 70 Register receiver,
69 Register elements, 71 Register elements,
70 Register t0, 72 Register t0,
71 Register t1, 73 Register t1,
72 Label* miss) { 74 Label* miss) {
73 // Register usage: 75 // Register usage:
74 // receiver: holds the receiver on entry and is unchanged. 76 // receiver: holds the receiver on entry and is unchanged.
75 // elements: holds the property dictionary on fall through. 77 // elements: holds the property dictionary on fall through.
76 // Scratch registers: 78 // Scratch registers:
77 // t0: used to holds the receiver map. 79 // t0: used to holds the receiver map.
78 // t1: used to holds the receiver instance type, receiver bit mask and 80 // t1: used to holds the receiver instance type, receiver bit mask and
79 // elements map. 81 // elements map.
80 82
81 // Check that the receiver isn't a smi. 83 // Check that the receiver isn't a smi.
82 __ JumpIfSmi(receiver, miss); 84 __ JumpIfSmi(receiver, miss);
83 85
84 // Check that the receiver is a valid JS object. 86 // Check that the receiver is a valid JS object.
85 __ CompareObjectType(receiver, t0, t1, FIRST_SPEC_OBJECT_TYPE); 87 __ CompareObjectType(receiver, t0, t1, FIRST_SPEC_OBJECT_TYPE, ge);
86 __ b(lt, miss); 88 __ bf(miss);
87 89
88 // If this assert fails, we have to check upper bound too. 90 // If this assert fails, we have to check upper bound too.
89 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); 91 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
90 92
91 GenerateGlobalInstanceTypeCheck(masm, t1, miss); 93 GenerateGlobalInstanceTypeCheck(masm, t1, miss);
92 94
93 // Check that the global object does not require access checks. 95 // Check that the global object does not require access checks.
94 __ ldrb(t1, FieldMemOperand(t0, Map::kBitFieldOffset)); 96 __ ldrb(t1, FieldMemOperand(t0, Map::kBitFieldOffset));
95 __ tst(t1, Operand((1 << Map::kIsAccessCheckNeeded) | 97 __ tst(t1, Operand((1 << Map::kIsAccessCheckNeeded) |
96 (1 << Map::kHasNamedInterceptor))); 98 (1 << Map::kHasNamedInterceptor)));
97 __ b(ne, miss); 99 __ bf(miss);
98 100
99 __ ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); 101 __ ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
100 __ ldr(t1, FieldMemOperand(elements, HeapObject::kMapOffset)); 102 __ ldr(t1, FieldMemOperand(elements, HeapObject::kMapOffset));
101 __ LoadRoot(ip, Heap::kHashTableMapRootIndex); 103 __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
102 __ cmp(t1, ip); 104 __ cmpeq(t1, ip);
103 __ b(ne, miss); 105 __ bf(miss);
104 } 106 }
105 107
106 108
107 // Helper function used from LoadIC/CallIC GenerateNormal. 109 // Helper function used from LoadIC/CallIC GenerateNormal.
108 // 110 //
109 // elements: Property dictionary. It is not clobbered if a jump to the miss 111 // elements: Property dictionary. It is not clobbered if a jump to the miss
110 // label is done. 112 // label is done.
111 // name: Property name. It is not clobbered if a jump to the miss label is 113 // name: Property name. It is not clobbered if a jump to the miss label is
112 // done 114 // done
113 // result: Register for the result. It is only updated if a jump to the miss 115 // result: Register for the result. It is only updated if a jump to the miss
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
192 // is a normal property that is not read only. 194 // is a normal property that is not read only.
193 __ bind(&done); // scratch2 == elements + 4 * index 195 __ bind(&done); // scratch2 == elements + 4 * index
194 const int kElementsStartOffset = StringDictionary::kHeaderSize + 196 const int kElementsStartOffset = StringDictionary::kHeaderSize +
195 StringDictionary::kElementsStartIndex * kPointerSize; 197 StringDictionary::kElementsStartIndex * kPointerSize;
196 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; 198 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
197 const int kTypeAndReadOnlyMask = 199 const int kTypeAndReadOnlyMask =
198 (PropertyDetails::TypeField::kMask | 200 (PropertyDetails::TypeField::kMask |
199 PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize; 201 PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
200 __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); 202 __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
201 __ tst(scratch1, Operand(kTypeAndReadOnlyMask)); 203 __ tst(scratch1, Operand(kTypeAndReadOnlyMask));
202 __ b(ne, miss); 204 __ bf(miss);
203 205
204 // Store the value at the masked, scaled index and return. 206 // Store the value at the masked, scaled index and return.
205 const int kValueOffset = kElementsStartOffset + kPointerSize; 207 const int kValueOffset = kElementsStartOffset + kPointerSize;
206 __ add(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag)); 208 __ add(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
207 __ str(value, MemOperand(scratch2)); 209 __ str(value, MemOperand(scratch2));
208 210
209 // Update the write barrier. Make sure not to clobber the value. 211 // Update the write barrier. Make sure not to clobber the value.
210 __ mov(scratch1, value); 212 __ mov(scratch1, value);
211 __ RecordWrite( 213 __ RecordWrite(
212 elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs); 214 elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
276 __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); 278 __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
277 __ tst(scratch, 279 __ tst(scratch,
278 Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit))); 280 Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
279 __ b(ne, slow); 281 __ b(ne, slow);
280 // Check that the object is some kind of JS object EXCEPT JS Value type. 282 // Check that the object is some kind of JS object EXCEPT JS Value type.
281 // In the case that the object is a value-wrapper object, 283 // In the case that the object is a value-wrapper object,
282 // we enter the runtime system to make sure that indexing into string 284 // we enter the runtime system to make sure that indexing into string
283 // objects work as intended. 285 // objects work as intended.
284 ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE); 286 ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
285 __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); 287 __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
286 __ cmp(scratch, Operand(JS_OBJECT_TYPE)); 288 __ cmpge(scratch, Operand(JS_OBJECT_TYPE));
287 __ b(lt, slow); 289 __ bf(slow);
288 } 290 }
289 291
290 292
291 // Loads an indexed element from a fast case array. 293 // Loads an indexed element from a fast case array.
292 // If not_fast_array is NULL, doesn't perform the elements map check. 294 // If not_fast_array is NULL, doesn't perform the elements map check.
293 static void GenerateFastArrayLoad(MacroAssembler* masm, 295 static void GenerateFastArrayLoad(MacroAssembler* masm,
294 Register receiver, 296 Register receiver,
295 Register key, 297 Register key,
296 Register elements, 298 Register elements,
297 Register scratch1, 299 Register scratch1,
(...skipping 28 matching lines...) Expand all
326 // Check that the object is in fast mode and writable. 328 // Check that the object is in fast mode and writable.
327 __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset)); 329 __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
328 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); 330 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
329 __ cmp(scratch1, ip); 331 __ cmp(scratch1, ip);
330 __ b(ne, not_fast_array); 332 __ b(ne, not_fast_array);
331 } else { 333 } else {
332 __ AssertFastElements(elements); 334 __ AssertFastElements(elements);
333 } 335 }
334 // Check that the key (index) is within bounds. 336 // Check that the key (index) is within bounds.
335 __ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset)); 337 __ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
336 __ cmp(key, Operand(scratch1)); 338 __ cmphs(key, scratch1);
337 __ b(hs, out_of_range); 339 __ bt(out_of_range);
338 // Fast case: Do the load. 340 // Fast case: Do the load.
339 __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 341 __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
340 // The key is a smi. 342 // The key is a smi.
341 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); 343 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
342 __ ldr(scratch2, 344 __ lsl(scratch2, key, Operand(kPointerSizeLog2 - kSmiTagSize));
343 MemOperand(scratch1, key, LSL, kPointerSizeLog2 - kSmiTagSize)); 345 __ ldr(scratch2, MemOperand(scratch1, scratch2));
344 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 346 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
345 __ cmp(scratch2, ip); 347 __ cmp(scratch2, ip);
346 // In case the loaded value is the_hole we have to consult GetProperty 348 // In case the loaded value is the_hole we have to consult GetProperty
347 // to ensure the prototype chain is searched. 349 // to ensure the prototype chain is searched.
348 __ b(eq, out_of_range); 350 __ b(eq, out_of_range);
349 __ mov(result, scratch2); 351 __ mov(result, scratch2);
350 } 352 }
351 353
352 354
353 // Checks whether a key is an array index string or a symbol string. 355 // Checks whether a key is an array index string or a symbol string.
354 // Falls through if a key is a symbol. 356 // Falls through if a key is a symbol.
355 static void GenerateKeyStringCheck(MacroAssembler* masm, 357 static void GenerateKeyStringCheck(MacroAssembler* masm,
356 Register key, 358 Register key,
357 Register map, 359 Register map,
358 Register hash, 360 Register hash,
359 Label* index_string, 361 Label* index_string,
360 Label* not_symbol) { 362 Label* not_symbol) {
361 // The key is not a smi. 363 // The key is not a smi.
362 // Is it a string? 364 // Is it a string?
363 __ CompareObjectType(key, map, hash, FIRST_NONSTRING_TYPE); 365 __ CompareObjectType(key, map, hash, FIRST_NONSTRING_TYPE, ge);
364 __ b(ge, not_symbol); 366 __ bt(not_symbol);
365 367
366 // Is the string an array index, with cached numeric value? 368 // Is the string an array index, with cached numeric value?
367 __ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset)); 369 __ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset));
368 __ tst(hash, Operand(String::kContainsCachedArrayIndexMask)); 370 __ tst(hash, Operand(String::kContainsCachedArrayIndexMask));
369 __ b(eq, index_string); 371 __ b(eq, index_string);
370 372
371 // Is the string a symbol? 373 // Is the string a symbol?
372 // map: key map 374 // map: key map
373 __ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset)); 375 __ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
374 STATIC_ASSERT(kSymbolTag != 0); 376 STATIC_ASSERT(kSymbolTag != 0);
(...skipping 25 matching lines...) Expand all
400 argc); 402 argc);
401 Isolate::Current()->stub_cache()->GenerateProbe( 403 Isolate::Current()->stub_cache()->GenerateProbe(
402 masm, flags, r1, r2, r3, r4, r5, r6); 404 masm, flags, r1, r2, r3, r4, r5, r6);
403 405
404 // If the stub cache probing failed, the receiver might be a value. 406 // If the stub cache probing failed, the receiver might be a value.
405 // For value objects, we use the map of the prototype objects for 407 // For value objects, we use the map of the prototype objects for
406 // the corresponding JSValue for the cache and that is what we need 408 // the corresponding JSValue for the cache and that is what we need
407 // to probe. 409 // to probe.
408 // 410 //
409 // Check for number. 411 // Check for number.
410 __ JumpIfSmi(r1, &number); 412 __ JumpIfSmi(r1, &number, Label::kNear);
411 __ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE); 413 __ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE, eq);
412 __ b(ne, &non_number); 414 __ bf_near(&non_number);
413 __ bind(&number); 415 __ bind(&number);
414 StubCompiler::GenerateLoadGlobalFunctionPrototype( 416 StubCompiler::GenerateLoadGlobalFunctionPrototype(
415 masm, Context::NUMBER_FUNCTION_INDEX, r1); 417 masm, Context::NUMBER_FUNCTION_INDEX, r1);
416 __ b(&probe); 418 __ b(&probe);
417 419
418 // Check for string. 420 // Check for string.
419 __ bind(&non_number); 421 __ bind(&non_number);
420 __ cmp(r3, Operand(FIRST_NONSTRING_TYPE)); 422 __ cmphs(r3, Operand(FIRST_NONSTRING_TYPE));
421 __ b(hs, &non_string); 423 __ bt_near(&non_string);
422 StubCompiler::GenerateLoadGlobalFunctionPrototype( 424 StubCompiler::GenerateLoadGlobalFunctionPrototype(
423 masm, Context::STRING_FUNCTION_INDEX, r1); 425 masm, Context::STRING_FUNCTION_INDEX, r1);
424 __ b(&probe); 426 __ b(&probe);
425 427
426 // Check for boolean. 428 // Check for boolean.
427 __ bind(&non_string); 429 __ bind(&non_string);
428 __ LoadRoot(ip, Heap::kTrueValueRootIndex); 430 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
429 __ cmp(r1, ip); 431 __ cmp(r1, ip);
430 __ b(eq, &boolean); 432 __ b(eq, &boolean);
431 __ LoadRoot(ip, Heap::kFalseValueRootIndex); 433 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
(...skipping 15 matching lines...) Expand all
447 static void GenerateFunctionTailCall(MacroAssembler* masm, 449 static void GenerateFunctionTailCall(MacroAssembler* masm,
448 int argc, 450 int argc,
449 Label* miss, 451 Label* miss,
450 Register scratch) { 452 Register scratch) {
451 // r1: function 453 // r1: function
452 454
453 // Check that the value isn't a smi. 455 // Check that the value isn't a smi.
454 __ JumpIfSmi(r1, miss); 456 __ JumpIfSmi(r1, miss);
455 457
456 // Check that the value is a JSFunction. 458 // Check that the value is a JSFunction.
457 __ CompareObjectType(r1, scratch, scratch, JS_FUNCTION_TYPE); 459 __ CompareObjectType(r1, scratch, scratch, JS_FUNCTION_TYPE, eq);
458 __ b(ne, miss); 460 __ bf(miss);
459 461
460 // Invoke the function. 462 // Invoke the function.
461 ParameterCount actual(argc); 463 ParameterCount actual(argc);
462 __ InvokeFunction(r1, actual, JUMP_FUNCTION, 464 __ InvokeFunction(r1, actual, JUMP_FUNCTION,
463 NullCallWrapper(), CALL_AS_METHOD); 465 NullCallWrapper(), CALL_AS_METHOD);
464 } 466 }
465 467
466 468
467 void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) { 469 void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
468 // ----------- S t a t e ------------- 470 // ----------- S t a t e -------------
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
512 __ Push(r3, r2); 514 __ Push(r3, r2);
513 515
514 // Call the entry. 516 // Call the entry.
515 __ mov(r0, Operand(2)); 517 __ mov(r0, Operand(2));
516 __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate))); 518 __ mov(r1, Operand(ExternalReference(IC_Utility(id), isolate)));
517 519
518 CEntryStub stub(1); 520 CEntryStub stub(1);
519 __ CallStub(&stub); 521 __ CallStub(&stub);
520 522
521 // Move result to r1 and leave the internal frame. 523 // Move result to r1 and leave the internal frame.
522 __ mov(r1, Operand(r0)); 524 __ mov(r1, r0);
523 } 525 }
524 526
525 // Check if the receiver is a global object of some sort. 527 // Check if the receiver is a global object of some sort.
526 // This can happen only for regular CallIC but not KeyedCallIC. 528 // This can happen only for regular CallIC but not KeyedCallIC.
527 if (id == IC::kCallIC_Miss) { 529 if (id == IC::kCallIC_Miss) {
528 Label invoke, global; 530 Label invoke, global;
529 __ ldr(r2, MemOperand(sp, argc * kPointerSize)); // receiver 531 __ ldr(r2, MemOperand(sp, argc * kPointerSize)); // receiver
530 __ JumpIfSmi(r2, &invoke); 532 __ JumpIfSmi(r2, &invoke, Label::kNear);
531 __ CompareObjectType(r2, r3, r3, JS_GLOBAL_OBJECT_TYPE); 533 __ CompareObjectType(r2, r3, r3, JS_GLOBAL_OBJECT_TYPE, eq);
532 __ b(eq, &global); 534 __ bt_near(&global);
533 __ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE)); 535 __ cmpeq(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
534 __ b(ne, &invoke); 536 __ bf_near(&invoke);
535 537
536 // Patch the receiver on the stack. 538 // Patch the receiver on the stack.
537 __ bind(&global); 539 __ bind(&global);
538 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset)); 540 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
539 __ str(r2, MemOperand(sp, argc * kPointerSize)); 541 __ str(r2, MemOperand(sp, argc * kPointerSize));
540 __ bind(&invoke); 542 __ bind(&invoke);
541 } 543 }
542 544
543 // Invoke the function. 545 // Invoke the function.
544 CallKind call_kind = CallICBase::Contextual::decode(extra_state) 546 CallKind call_kind = CallICBase::Contextual::decode(extra_state)
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
602 GenerateFunctionTailCall(masm, argc, &slow_call, r0); 604 GenerateFunctionTailCall(masm, argc, &slow_call, r0);
603 605
604 __ bind(&check_number_dictionary); 606 __ bind(&check_number_dictionary);
605 // r2: key 607 // r2: key
606 // r3: elements map 608 // r3: elements map
607 // r4: elements 609 // r4: elements
608 // Check whether the elements is a number dictionary. 610 // Check whether the elements is a number dictionary.
609 __ LoadRoot(ip, Heap::kHashTableMapRootIndex); 611 __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
610 __ cmp(r3, ip); 612 __ cmp(r3, ip);
611 __ b(ne, &slow_load); 613 __ b(ne, &slow_load);
612 __ mov(r0, Operand(r2, ASR, kSmiTagSize)); 614 __ asr(r0, r2, Operand(kSmiTagSize));
613 // r0: untagged index 615 // r0: untagged index
614 __ LoadFromNumberDictionary(&slow_load, r4, r2, r1, r0, r3, r5); 616 __ LoadFromNumberDictionary(&slow_load, r4, r2, r1, r0, r3, r5);
615 __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, r0, r3); 617 __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, r0, r3);
616 __ jmp(&do_call); 618 __ jmp(&do_call);
617 619
618 __ bind(&slow_load); 620 __ bind(&slow_load);
619 // This branch is taken when calling KeyedCallIC_Miss is neither required 621 // This branch is taken when calling KeyedCallIC_Miss is neither required
620 // nor beneficial. 622 // nor beneficial.
621 __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, r0, r3); 623 __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, r0, r3);
622 { 624 {
(...skipping 140 matching lines...) Expand 10 before | Expand all | Expand 10 after
763 Register scratch3, 765 Register scratch3,
764 Label* unmapped_case, 766 Label* unmapped_case,
765 Label* slow_case) { 767 Label* slow_case) {
766 Heap* heap = masm->isolate()->heap(); 768 Heap* heap = masm->isolate()->heap();
767 769
768 // Check that the receiver is a JSObject. Because of the map check 770 // Check that the receiver is a JSObject. Because of the map check
769 // later, we do not need to check for interceptors or whether it 771 // later, we do not need to check for interceptors or whether it
770 // requires access checks. 772 // requires access checks.
771 __ JumpIfSmi(object, slow_case); 773 __ JumpIfSmi(object, slow_case);
772 // Check that the object is some kind of JSObject. 774 // Check that the object is some kind of JSObject.
773 __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_RECEIVER_TYPE); 775 __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_RECEIVER_TYPE, ge);
774 __ b(lt, slow_case); 776 __ bf(slow_case);
775 777
776 // Check that the key is a positive smi. 778 // Check that the key is a positive smi.
777 __ tst(key, Operand(0x80000001)); 779 __ tst(key, Operand(0x80000001));
778 __ b(ne, slow_case); 780 __ b(ne, slow_case);
779 781
780 // Load the elements into scratch1 and check its map. 782 // Load the elements into scratch1 and check its map.
781 Handle<Map> arguments_map(heap->non_strict_arguments_elements_map()); 783 Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
782 __ ldr(scratch1, FieldMemOperand(object, JSObject::kElementsOffset)); 784 __ ldr(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
783 __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK); 785 __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
784 786
785 // Check if element is in the range of mapped arguments. If not, jump 787 // Check if element is in the range of mapped arguments. If not, jump
786 // to the unmapped lookup with the parameter map in scratch1. 788 // to the unmapped lookup with the parameter map in scratch1.
787 __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset)); 789 __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
788 __ sub(scratch2, scratch2, Operand(Smi::FromInt(2))); 790 __ sub(scratch2, scratch2, Operand(Smi::FromInt(2)));
789 __ cmp(key, Operand(scratch2)); 791 __ cmphs(key, scratch2);
790 __ b(cs, unmapped_case); 792 __ b(t, unmapped_case);
791 793
792 // Load element index and check whether it is the hole. 794 // Load element index and check whether it is the hole.
793 const int kOffset = 795 const int kOffset =
794 FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag; 796 FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
795 797
796 __ mov(scratch3, Operand(kPointerSize >> 1)); 798 __ mov(scratch3, Operand(kPointerSize >> 1));
797 __ mul(scratch3, key, scratch3); 799 __ mul(scratch3, key, scratch3);
798 __ add(scratch3, scratch3, Operand(kOffset)); 800 __ add(scratch3, scratch3, Operand(kOffset));
799 801
800 __ ldr(scratch2, MemOperand(scratch1, scratch3)); 802 __ ldr(scratch2, MemOperand(scratch1, scratch3));
(...skipping 21 matching lines...) Expand all
822 // second element of the parameter_map. The parameter_map register 824 // second element of the parameter_map. The parameter_map register
823 // must be loaded with the parameter map of the arguments object and is 825 // must be loaded with the parameter map of the arguments object and is
824 // overwritten. 826 // overwritten.
825 const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize; 827 const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
826 Register backing_store = parameter_map; 828 Register backing_store = parameter_map;
827 __ ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset)); 829 __ ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
828 Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map()); 830 Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
829 __ CheckMap(backing_store, scratch, fixed_array_map, slow_case, 831 __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
830 DONT_DO_SMI_CHECK); 832 DONT_DO_SMI_CHECK);
831 __ ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset)); 833 __ ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
832 __ cmp(key, Operand(scratch)); 834 __ cmphs(key, scratch);
833 __ b(cs, slow_case); 835 __ b(t, slow_case);
834 __ mov(scratch, Operand(kPointerSize >> 1)); 836 __ mov(scratch, Operand(kPointerSize >> 1));
835 __ mul(scratch, key, scratch); 837 __ mul(scratch, key, scratch);
836 __ add(scratch, 838 __ add(scratch,
837 scratch, 839 scratch,
838 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 840 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
839 return MemOperand(backing_store, scratch); 841 return MemOperand(backing_store, scratch);
840 } 842 }
841 843
842 844
843 void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) { 845 void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
(...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after
992 994
993 __ bind(&check_number_dictionary); 995 __ bind(&check_number_dictionary);
994 __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset)); 996 __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
995 __ ldr(r3, FieldMemOperand(r4, JSObject::kMapOffset)); 997 __ ldr(r3, FieldMemOperand(r4, JSObject::kMapOffset));
996 998
997 // Check whether the elements is a number dictionary. 999 // Check whether the elements is a number dictionary.
998 // r0: key 1000 // r0: key
999 // r3: elements map 1001 // r3: elements map
1000 // r4: elements 1002 // r4: elements
1001 __ LoadRoot(ip, Heap::kHashTableMapRootIndex); 1003 __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
1002 __ cmp(r3, ip); 1004 __ cmpeq(r3, ip);
1003 __ b(ne, &slow); 1005 __ bf(&slow);
1004 __ mov(r2, Operand(r0, ASR, kSmiTagSize)); 1006 __ asr(r2, r0, Operand(kSmiTagSize));
1005 __ LoadFromNumberDictionary(&slow, r4, r0, r0, r2, r3, r5); 1007 __ LoadFromNumberDictionary(&slow, r4, r0, r0, r2, r3, r5);
1006 __ Ret(); 1008 __ Ret();
1007 1009
1008 // Slow case, key and receiver still in r0 and r1. 1010 // Slow case, key and receiver still in r0 and r1.
1009 __ bind(&slow); 1011 __ bind(&slow);
1010 __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1012 __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
1011 1, r2, r3); 1013 1, r2, r3);
1012 GenerateRuntimeGetProperty(masm); 1014 GenerateRuntimeGetProperty(masm);
1013 1015
1014 __ bind(&check_string); 1016 __ bind(&check_string);
1015 GenerateKeyStringCheck(masm, key, r2, r3, &index_string, &slow); 1017 GenerateKeyStringCheck(masm, key, r2, r3, &index_string, &slow);
1016 1018
1017 GenerateKeyedLoadReceiverCheck( 1019 GenerateKeyedLoadReceiverCheck(
1018 masm, receiver, r2, r3, Map::kHasNamedInterceptor, &slow); 1020 masm, receiver, r2, r3, Map::kHasNamedInterceptor, &slow);
1019 1021
1020 // If the receiver is a fast-case object, check the keyed lookup 1022 // If the receiver is a fast-case object, check the keyed lookup
1021 // cache. Otherwise probe the dictionary. 1023 // cache. Otherwise probe the dictionary.
1022 __ ldr(r3, FieldMemOperand(r1, JSObject::kPropertiesOffset)); 1024 __ ldr(r3, FieldMemOperand(r1, JSObject::kPropertiesOffset));
1023 __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset)); 1025 __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
1024 __ LoadRoot(ip, Heap::kHashTableMapRootIndex); 1026 __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
1025 __ cmp(r4, ip); 1027 __ cmp(r4, ip);
1026 __ b(eq, &probe_dictionary); 1028 __ bt(&probe_dictionary);
1027 1029
1028 // Load the map of the receiver, compute the keyed lookup cache hash 1030 // Load the map of the receiver, compute the keyed lookup cache hash
1029 // based on 32 bits of the map pointer and the string hash. 1031 // based on 32 bits of the map pointer and the string hash.
1030 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); 1032 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
1031 __ mov(r3, Operand(r2, ASR, KeyedLookupCache::kMapHashShift)); 1033 __ asr(r3, r2, Operand(KeyedLookupCache::kMapHashShift));
1032 __ ldr(r4, FieldMemOperand(r0, String::kHashFieldOffset)); 1034 __ ldr(r4, FieldMemOperand(r0, String::kHashFieldOffset));
1033 __ eor(r3, r3, Operand(r4, ASR, String::kHashShift)); 1035 __ asr(r4, r4, Operand(String::kHashShift));
1036 __ eor(r3, r3, r4);
1034 int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask; 1037 int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
1035 __ And(r3, r3, Operand(mask)); 1038 __ land(r3, r3, Operand(mask));
1036 1039
1037 // Load the key (consisting of map and symbol) from the cache and 1040 // Load the key (consisting of map and symbol) from the cache and
1038 // check for match. 1041 // check for match.
1039 Label load_in_object_property; 1042 Label load_in_object_property;
1040 static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket; 1043 static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
1041 Label hit_on_nth_entry[kEntriesPerBucket]; 1044 Label hit_on_nth_entry[kEntriesPerBucket];
1042 ExternalReference cache_keys = 1045 ExternalReference cache_keys =
1043 ExternalReference::keyed_lookup_cache_keys(isolate); 1046 ExternalReference::keyed_lookup_cache_keys(isolate);
1044 1047
1045 __ mov(r4, Operand(cache_keys)); 1048 __ mov(r4, Operand(cache_keys));
1046 __ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1)); 1049 __ lsl(r5, r3, Operand(kPointerSizeLog2 + 1));
1050 __ add(r4, r4, r5);
1047 1051
1048 for (int i = 0; i < kEntriesPerBucket - 1; i++) { 1052 for (int i = 0; i < kEntriesPerBucket - 1; i++) {
1049 Label try_next_entry; 1053 Label try_next_entry;
1050 // Load map and move r4 to next entry. 1054 // Load map and move r4 to next entry.
1051 __ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex)); 1055 __ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex));
1052 __ cmp(r2, r5); 1056 __ cmp(r2, r5);
1053 __ b(ne, &try_next_entry); 1057 __ b(ne, &try_next_entry);
1054 __ ldr(r5, MemOperand(r4, -kPointerSize)); // Load symbol 1058 __ ldr(r5, MemOperand(r4, -kPointerSize)); // Load symbol
1055 __ cmp(r0, r5); 1059 __ cmp(r0, r5);
1056 __ b(eq, &hit_on_nth_entry[i]); 1060 __ b(eq, &hit_on_nth_entry[i]);
1057 __ bind(&try_next_entry); 1061 __ bind(&try_next_entry);
1058 } 1062 }
1059 1063
1060 // Last entry: Load map and move r4 to symbol. 1064 // Last entry: Load map and move r4 to symbol.
1061 __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex)); 1065 __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex));
1062 __ cmp(r2, r5); 1066 __ cmp(r2, r5);
1063 __ b(ne, &slow); 1067 __ b(ne, &slow);
1064 __ ldr(r5, MemOperand(r4)); 1068 __ ldr(r5, MemOperand(r4));
1065 __ cmp(r0, r5); 1069 __ cmpeq(r0, r5);
1066 __ b(ne, &slow); 1070 __ bf(&slow);
1067 1071
1068 // Get field offset. 1072 // Get field offset.
1069 // r0 : key 1073 // r0 : key
1070 // r1 : receiver 1074 // r1 : receiver
1071 // r2 : receiver's map 1075 // r2 : receiver's map
1072 // r3 : lookup cache index 1076 // r3 : lookup cache index
1073 ExternalReference cache_field_offsets = 1077 ExternalReference cache_field_offsets =
1074 ExternalReference::keyed_lookup_cache_field_offsets(isolate); 1078 ExternalReference::keyed_lookup_cache_field_offsets(isolate);
1075 1079
1076 // Hit on nth entry. 1080 // Hit on nth entry.
1077 for (int i = kEntriesPerBucket - 1; i >= 0; i--) { 1081 for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
1078 __ bind(&hit_on_nth_entry[i]); 1082 __ bind(&hit_on_nth_entry[i]);
1079 __ mov(r4, Operand(cache_field_offsets)); 1083 __ mov(r4, Operand(cache_field_offsets));
1080 if (i != 0) { 1084 if (i != 0) {
1081 __ add(r3, r3, Operand(i)); 1085 __ add(r3, r3, Operand(i));
1082 } 1086 }
1083 __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2)); 1087 __ lsl(r6, r3, Operand(kPointerSizeLog2));
1088 __ ldr(r5, MemOperand(r4, r6));
1084 __ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset)); 1089 __ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset));
1085 __ sub(r5, r5, r6, SetCC); 1090 __ sub(r5, r5, r6);
1086 __ b(ge, &property_array_property); 1091 __ cmpge(r5, Operand(0));
1092 __ bt(&property_array_property);
1087 if (i != 0) { 1093 if (i != 0) {
1088 __ jmp(&load_in_object_property); 1094 __ jmp(&load_in_object_property);
1089 } 1095 }
1090 } 1096 }
1091 1097
1092 // Load in-object property. 1098 // Load in-object property.
1093 __ bind(&load_in_object_property); 1099 __ bind(&load_in_object_property);
1094 __ ldrb(r6, FieldMemOperand(r2, Map::kInstanceSizeOffset)); 1100 __ ldrb(r6, FieldMemOperand(r2, Map::kInstanceSizeOffset));
1095 __ add(r6, r6, r5); // Index from start of object. 1101 __ add(r6, r6, r5); // Index from start of object.
1096 __ sub(r1, r1, Operand(kHeapObjectTag)); // Remove the heap tag. 1102 __ sub(r1, r1, Operand(kHeapObjectTag)); // Remove the heap tag.
1097 __ ldr(r0, MemOperand(r1, r6, LSL, kPointerSizeLog2)); 1103 __ lsl(r0, r6, Operand(kPointerSizeLog2));
1104 __ ldr(r0, MemOperand(r1, r0));
1098 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1105 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
1099 1, r2, r3); 1106 1, r2, r3);
1100 __ Ret(); 1107 __ Ret();
1101 1108
1102 // Load property array property. 1109 // Load property array property.
1103 __ bind(&property_array_property); 1110 __ bind(&property_array_property);
1104 __ ldr(r1, FieldMemOperand(r1, JSObject::kPropertiesOffset)); 1111 __ ldr(r1, FieldMemOperand(r1, JSObject::kPropertiesOffset));
1105 __ add(r1, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 1112 __ add(r1, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1106 __ ldr(r0, MemOperand(r1, r5, LSL, kPointerSizeLog2)); 1113 __ lsl(r0, r5, Operand(kPointerSizeLog2));
1114 __ ldr(r0, MemOperand(r1, r0));
1107 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1115 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
1108 1, r2, r3); 1116 1, r2, r3);
1109 __ Ret(); 1117 __ Ret();
1110 1118
1111 // Do a quick inline probe of the receiver's dictionary, if it 1119 // Do a quick inline probe of the receiver's dictionary, if it
1112 // exists. 1120 // exists.
1113 __ bind(&probe_dictionary); 1121 __ bind(&probe_dictionary);
1114 // r1: receiver 1122 // r1: receiver
1115 // r0: key 1123 // r0: key
1116 // r3: elements 1124 // r3: elements
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
1164 1172
1165 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { 1173 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
1166 // ---------- S t a t e -------------- 1174 // ---------- S t a t e --------------
1167 // -- lr : return address 1175 // -- lr : return address
1168 // -- r0 : key 1176 // -- r0 : key
1169 // -- r1 : receiver 1177 // -- r1 : receiver
1170 // ----------------------------------- 1178 // -----------------------------------
1171 Label slow; 1179 Label slow;
1172 1180
1173 // Check that the receiver isn't a smi. 1181 // Check that the receiver isn't a smi.
1174 __ JumpIfSmi(r1, &slow); 1182 __ JumpIfSmi(r1, &slow, Label::kNear);
1175 1183
1176 // Check that the key is an array index, that is Uint32. 1184 // Check that the key is an array index, that is Uint32.
1177 __ tst(r0, Operand(kSmiTagMask | kSmiSignMask)); 1185 __ tst(r0, Operand(kSmiTagMask | kSmiSignMask));
1178 __ b(ne, &slow); 1186 __ bf_near(&slow);
1179 1187
1180 // Get the map of the receiver. 1188 // Get the map of the receiver.
1181 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); 1189 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
1182 1190
1183 // Check that it has indexed interceptor and access checks 1191 // Check that it has indexed interceptor and access checks
1184 // are not enabled for this object. 1192 // are not enabled for this object.
1185 __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset)); 1193 __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
1186 __ and_(r3, r3, Operand(kSlowCaseBitFieldMask)); 1194 __ land(r3, r3, Operand(kSlowCaseBitFieldMask));
1187 __ cmp(r3, Operand(1 << Map::kHasIndexedInterceptor)); 1195 __ cmpeq(r3, Operand(1 << Map::kHasIndexedInterceptor));
1188 __ b(ne, &slow); 1196 __ bf_near(&slow);
1189 1197
1190 // Everything is fine, call runtime. 1198 // Everything is fine, call runtime.
1191 __ Push(r1, r0); // Receiver, key. 1199 __ Push(r1, r0); // Receiver, key.
1192 1200
1193 // Perform tail call to the entry. 1201 // Perform tail call to the entry.
1194 __ TailCallExternalReference( 1202 __ TailCallExternalReference(
1195 ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor), 1203 ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
1196 masm->isolate()), 1204 masm->isolate()),
1197 2, 1205 2,
1198 1); 1206 1);
(...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after
1332 Label non_smi_value; 1340 Label non_smi_value;
1333 __ JumpIfNotSmi(value, &non_smi_value); 1341 __ JumpIfNotSmi(value, &non_smi_value);
1334 1342
1335 if (increment_length == kIncrementLength) { 1343 if (increment_length == kIncrementLength) {
1336 // Add 1 to receiver->length. 1344 // Add 1 to receiver->length.
1337 __ add(scratch_value, key, Operand(Smi::FromInt(1))); 1345 __ add(scratch_value, key, Operand(Smi::FromInt(1)));
1338 __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); 1346 __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
1339 } 1347 }
1340 // It's irrelevant whether array is smi-only or not when writing a smi. 1348 // It's irrelevant whether array is smi-only or not when writing a smi.
1341 __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 1349 __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1342 __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); 1350 __ lsl(scratch_value, address, Operand(kPointerSizeLog2 - kSmiTagSize));
1351 __ add(address, address, scratch_value);
1343 __ str(value, MemOperand(address)); 1352 __ str(value, MemOperand(address));
1344 __ Ret(); 1353 __ Ret();
1345 1354
1346 __ bind(&non_smi_value); 1355 __ bind(&non_smi_value);
1347 // Escape to elements kind transition case. 1356 // Escape to elements kind transition case.
1348 __ CheckFastObjectElements(receiver_map, scratch_value, 1357 __ CheckFastObjectElements(receiver_map, scratch_value,
1349 &transition_smi_elements); 1358 &transition_smi_elements);
1350 1359
1351 // Fast elements array, store the value to the elements backing store. 1360 // Fast elements array, store the value to the elements backing store.
1352 __ bind(&finish_object_store); 1361 __ bind(&finish_object_store);
1353 if (increment_length == kIncrementLength) { 1362 if (increment_length == kIncrementLength) {
1354 // Add 1 to receiver->length. 1363 // Add 1 to receiver->length.
1355 __ add(scratch_value, key, Operand(Smi::FromInt(1))); 1364 __ add(scratch_value, key, Operand(Smi::FromInt(1)));
1356 __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); 1365 __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
1357 } 1366 }
1358 __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 1367 __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1359 __ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); 1368 __ lsl(scratch_value, key, Operand(kPointerSizeLog2 - kSmiTagSize));
1369 __ add(address, address, scratch_value);
1360 __ str(value, MemOperand(address)); 1370 __ str(value, MemOperand(address));
1361 // Update write barrier for the elements array address. 1371 // Update write barrier for the elements array address.
1362 __ mov(scratch_value, value); // Preserve the value which is returned. 1372 __ mov(scratch_value, value); // Preserve the value which is returned.
1363 __ RecordWrite(elements, 1373 __ RecordWrite(elements,
1364 address, 1374 address,
1365 scratch_value, 1375 scratch_value,
1366 kLRHasNotBeenSaved, 1376 kLRHasNotBeenSaved,
1367 kDontSaveFPRegs, 1377 kDontSaveFPRegs,
1368 EMIT_REMEMBERED_SET, 1378 EMIT_REMEMBERED_SET,
1369 OMIT_SMI_CHECK); 1379 OMIT_SMI_CHECK);
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
1454 // Register usage. 1464 // Register usage.
1455 Register value = r0; 1465 Register value = r0;
1456 Register key = r1; 1466 Register key = r1;
1457 Register receiver = r2; 1467 Register receiver = r2;
1458 Register receiver_map = r3; 1468 Register receiver_map = r3;
1459 Register elements_map = r6; 1469 Register elements_map = r6;
1460 Register elements = r7; // Elements array of the receiver. 1470 Register elements = r7; // Elements array of the receiver.
1461 // r4 and r5 are used as general scratch registers. 1471 // r4 and r5 are used as general scratch registers.
1462 1472
1463 // Check that the key is a smi. 1473 // Check that the key is a smi.
1464 __ JumpIfNotSmi(key, &slow); 1474 __ JumpIfNotSmi(key, &slow, Label::kNear);
1465 // Check that the object isn't a smi. 1475 // Check that the object isn't a smi.
1466 __ JumpIfSmi(receiver, &slow); 1476 __ JumpIfSmi(receiver, &slow, Label::kNear);
1467 // Get the map of the object. 1477 // Get the map of the object.
1468 __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); 1478 __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
1469 // Check that the receiver does not require access checks. We need 1479 // Check that the receiver does not require access checks. We need
1470 // to do this because this generic stub does not perform map checks. 1480 // to do this because this generic stub does not perform map checks.
1471 __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset)); 1481 __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
1472 __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded)); 1482 __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
1473 __ b(ne, &slow); 1483 __ bf_near(&slow);
1474 // Check if the object is a JS array or not. 1484 // Check if the object is a JS array or not.
1475 __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset)); 1485 __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
1476 __ cmp(r4, Operand(JS_ARRAY_TYPE)); 1486 __ cmp(r4, Operand(JS_ARRAY_TYPE));
1477 __ b(eq, &array); 1487 __ b(eq, &array);
1478 // Check that the object is some kind of JSObject. 1488 // Check that the object is some kind of JSObject.
1479 __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE)); 1489 __ cmpge(r4, Operand(FIRST_JS_OBJECT_TYPE));
1480 __ b(lt, &slow); 1490 __ bf_near(&slow);
1481 1491
1482 // Object case: Check key against length in the elements array. 1492 // Object case: Check key against length in the elements array.
1483 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); 1493 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1484 // Check array bounds. Both the key and the length of FixedArray are smis. 1494 // Check array bounds. Both the key and the length of FixedArray are smis.
1485 __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); 1495 __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
1486 __ cmp(key, Operand(ip)); 1496 __ cmphs(key, ip);
1487 __ b(lo, &fast_object); 1497 __ bf(&fast_object);
1488 1498
1489 // Slow case, handle jump to runtime. 1499 // Slow case, handle jump to runtime.
1490 __ bind(&slow); 1500 __ bind(&slow);
1491 // Entry registers are intact. 1501 // Entry registers are intact.
1492 // r0: value. 1502 // r0: value.
1493 // r1: key. 1503 // r1: key.
1494 // r2: receiver. 1504 // r2: receiver.
1495 GenerateRuntimeSetProperty(masm, strict_mode); 1505 GenerateRuntimeSetProperty(masm, strict_mode);
1496 1506
1497 // Extra capacity case: Check if there is extra capacity to 1507 // Extra capacity case: Check if there is extra capacity to
1498 // perform the store and update the length. Used for adding one 1508 // perform the store and update the length. Used for adding one
1499 // element to the array by writing to array[array.length]. 1509 // element to the array by writing to array[array.length].
1500 __ bind(&extra); 1510 __ bind(&extra);
1501 // Condition code from comparing key and array length is still available. 1511 // Condition code from comparing key and array length is still available.
1502 __ b(ne, &slow); // Only support writing to writing to array[array.length]. 1512 __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
1513 __ cmpeq(key, ip);
1514 __ bf(&slow); // Only support writing to writing to array[array.length].
1503 // Check for room in the elements backing store. 1515 // Check for room in the elements backing store.
1504 // Both the key and the length of FixedArray are smis. 1516 // Both the key and the length of FixedArray are smis.
1505 __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); 1517 __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
1506 __ cmp(key, Operand(ip)); 1518 __ cmphs(key, ip);
1507 __ b(hs, &slow); 1519 __ bt(&slow);
1508 __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); 1520 __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
1509 __ cmp(elements_map, 1521 __ cmp(elements_map,
1510 Operand(masm->isolate()->factory()->fixed_array_map())); 1522 Operand(masm->isolate()->factory()->fixed_array_map()));
1511 __ b(ne, &check_if_double_array); 1523 __ b(ne, &check_if_double_array);
1512 __ jmp(&fast_object_grow); 1524 __ jmp(&fast_object_grow);
1513 1525
1514 __ bind(&check_if_double_array); 1526 __ bind(&check_if_double_array);
1515 __ cmp(elements_map, 1527 __ cmp(elements_map,
1516 Operand(masm->isolate()->factory()->fixed_double_array_map())); 1528 Operand(masm->isolate()->factory()->fixed_double_array_map()));
1517 __ b(ne, &slow); 1529 __ b(ne, &slow);
1518 __ jmp(&fast_double_grow); 1530 __ jmp(&fast_double_grow);
1519 1531
1520 // Array case: Get the length and the elements array from the JS 1532 // Array case: Get the length and the elements array from the JS
1521 // array. Check that the array is in fast mode (and writable); if it 1533 // array. Check that the array is in fast mode (and writable); if it
1522 // is the length is always a smi. 1534 // is the length is always a smi.
1523 __ bind(&array); 1535 __ bind(&array);
1524 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); 1536 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1525 1537
1526 // Check the key against the length in the array. 1538 // Check the key against the length in the array.
1527 __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset)); 1539 __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
1528 __ cmp(key, Operand(ip)); 1540 __ cmphs(key, ip);
1529 __ b(hs, &extra); 1541 __ bt(&extra);
1542 // Fall through to fast case.
1530 1543
1531 KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, 1544 KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
1532 &slow, kCheckMap, kDontIncrementLength, 1545 &slow, kCheckMap, kDontIncrementLength,
1533 value, key, receiver, receiver_map, 1546 value, key, receiver, receiver_map,
1534 elements_map, elements); 1547 elements_map, elements);
1535 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow, 1548 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
1536 &slow, kDontCheckMap, kIncrementLength, 1549 &slow, kDontCheckMap, kIncrementLength,
1537 value, key, receiver, receiver_map, 1550 value, key, receiver, receiver_map,
1538 elements_map, elements); 1551 elements_map, elements);
1539 } 1552 }
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
1593 Label miss; 1606 Label miss;
1594 1607
1595 Register receiver = r1; 1608 Register receiver = r1;
1596 Register value = r0; 1609 Register value = r0;
1597 Register scratch = r3; 1610 Register scratch = r3;
1598 1611
1599 // Check that the receiver isn't a smi. 1612 // Check that the receiver isn't a smi.
1600 __ JumpIfSmi(receiver, &miss); 1613 __ JumpIfSmi(receiver, &miss);
1601 1614
1602 // Check that the object is a JS array. 1615 // Check that the object is a JS array.
1603 __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE); 1616 __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE, eq);
1604 __ b(ne, &miss); 1617 __ bf(&miss);
1605 1618
1606 // Check that elements are FixedArray. 1619 // Check that elements are FixedArray.
1607 // We rely on StoreIC_ArrayLength below to deal with all types of 1620 // We rely on StoreIC_ArrayLength below to deal with all types of
1608 // fast elements (including COW). 1621 // fast elements (including COW).
1609 __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset)); 1622 __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
1610 __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE); 1623 __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE, eq);
1611 __ b(ne, &miss); 1624 __ b(ne, &miss);
1612 1625
1613 // Check that the array has fast properties, otherwise the length 1626 // Check that the array has fast properties, otherwise the length
1614 // property might have been redefined. 1627 // property might have been redefined.
1615 __ ldr(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset)); 1628 __ ldr(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
1616 __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset)); 1629 __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
1617 __ CompareRoot(scratch, Heap::kHashTableMapRootIndex); 1630 __ CompareRoot(scratch, Heap::kHashTableMapRootIndex);
1618 __ b(eq, &miss); 1631 __ b(eq, &miss);
1619 1632
1620 // Check that value is a smi. 1633 // Check that value is a smi.
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after
1727 1740
1728 // Activate inlined smi code. 1741 // Activate inlined smi code.
1729 if (previous_state == UNINITIALIZED) { 1742 if (previous_state == UNINITIALIZED) {
1730 PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK); 1743 PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
1731 } 1744 }
1732 } 1745 }
1733 1746
1734 1747
1735 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { 1748 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
1736 Address cmp_instruction_address = 1749 Address cmp_instruction_address =
1737 Assembler::return_address_from_call_start(address); 1750 address + Assembler::kCallTargetAddressOffset;
1738 1751
1739 // If the instruction following the call is not a cmp rx, #yyy, nothing 1752 // If the instruction following the call is not a cmp #ii, rx, nothing
1740 // was inlined. 1753 // was inlined.
1741 Instr instr = Assembler::instr_at(cmp_instruction_address); 1754 Instr instr = Assembler::instr_at(cmp_instruction_address);
1742 if (!Assembler::IsCmpImmediate(instr)) { 1755 if (!Assembler::IsCmpImmediate(instr)) {
1743 return; 1756 return;
1744 } 1757 }
1745 1758
1746 // The delta to the start of the map check instruction and the 1759 // The delta to the start of the map check instruction and the
1747 // condition code uses at the patched jump. 1760 // condition code uses at the patched jump.
1748 int delta = Assembler::GetCmpImmediateRawImmediate(instr); 1761 int delta = Assembler::GetCmpImmediateAsUnsigned(instr);
1749 delta += 1762 // TODO(stm): is this needed for ST40 ?
1750 Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask; 1763 // delta += Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask
1751 // If the delta is 0 the instruction is cmp r0, #0 which also signals that 1764
1765 // If the delta is 0 the instruction is cmp #0, r0 which also signals that
1752 // nothing was inlined. 1766 // nothing was inlined.
1753 if (delta == 0) { 1767 if (delta == 0) {
1754 return; 1768 return;
1755 } 1769 }
1756 1770
1757 #ifdef DEBUG 1771 #ifdef DEBUG
1758 if (FLAG_trace_ic) { 1772 if (FLAG_trace_ic) {
1759 PrintF("[ patching ic at %p, cmp=%p, delta=%d\n", 1773 PrintF("[ patching ic at %p, cmp=%p, delta=%d\n",
1760 address, cmp_instruction_address, delta); 1774 address, cmp_instruction_address, delta);
1761 } 1775 }
1762 #endif 1776 #endif
1763 1777
1764 Address patch_address = 1778 Address patch_address =
1765 cmp_instruction_address - delta * Instruction::kInstrSize; 1779 cmp_instruction_address - delta * Assembler::kInstrSize;
1766 Instr instr_at_patch = Assembler::instr_at(patch_address); 1780 Instr instr_at_patch = Assembler::instr_at(patch_address);
1781 #ifdef DEBUG
1782 Instr instr_before_patch =
1783 Assembler::instr_at(patch_address - Assembler::kInstrSize);
1784 #endif
1767 Instr branch_instr = 1785 Instr branch_instr =
1768 Assembler::instr_at(patch_address + Instruction::kInstrSize); 1786 Assembler::instr_at(patch_address + Assembler::kInstrSize);
1769 // This is patching a conditional "jump if not smi/jump if smi" site. 1787 ASSERT(Assembler::IsCmpRegister(instr_at_patch));
1770 // Enabling by changing from 1788 ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
1771 // cmp rx, rx 1789 Assembler::GetRm(instr_at_patch).code());
1772 // b eq/ne, <target> 1790 ASSERT(Assembler::IsMovImmediate(instr_before_patch));
1773 // to 1791 ASSERT_EQ(Assembler::GetRn(instr_before_patch).code(), sh4_ip.code());
1774 // tst rx, #kSmiTagMask 1792 ASSERT(Assembler::IsBranch(branch_instr));
1775 // b ne/eq, <target> 1793 if (Assembler::GetCondition(branch_instr) == f) {
1776 // and vice-versa to be disabled again. 1794 // This is patching a "jump if not smi" site to be active.
1777 CodePatcher patcher(patch_address, 2); 1795 // Changing
1778 Register reg = Assembler::GetRn(instr_at_patch); 1796 // mov #kSmiTagMask, sh4_ip
1779 if (check == ENABLE_INLINED_SMI_CHECK) { 1797 // cmp rx, rx
1780 ASSERT(Assembler::IsCmpRegister(instr_at_patch)); 1798 // bf <skip> // actually a bt <target>
1781 ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(), 1799 // ...
1782 Assembler::GetRm(instr_at_patch).code()); 1800 // bra <target>
1783 patcher.masm()->tst(reg, Operand(kSmiTagMask)); 1801 // skip:
1802 // to
1803 // mov #kSmiTagMask, sh4_ip
1804 // tst rx, sh4_ip
1805 // bt <skip> // actually implements a bf <target>
1806 // ...
1807 CodePatcher patcher(patch_address, 2);
1808 Register reg = Assembler::GetRn(instr_at_patch);
1809 patcher.masm()->tst(reg, sh4_ip);
1810 patcher.EmitCondition(t);
1784 } else { 1811 } else {
1785 ASSERT(check == DISABLE_INLINED_SMI_CHECK); 1812 ASSERT(Assembler::GetCondition(branch_instr) == t);
1786 ASSERT(Assembler::IsTstImmediate(instr_at_patch)); 1813 // This is patching a "jump if smi" site to be active.
1787 patcher.masm()->cmp(reg, reg); 1814 // Changing
1788 } 1815 // mov #kSmiTagMask, sh4_ip
1789 ASSERT(Assembler::IsBranch(branch_instr)); 1816 // cmp rx, rx
1790 if (Assembler::GetCondition(branch_instr) == eq) { 1817 // bt <skip> // actually a bf <target>
1791 patcher.EmitCondition(ne); 1818 // ...
1792 } else { 1819 // bra <target>
1793 ASSERT(Assembler::GetCondition(branch_instr) == ne); 1820 // skip:
1794 patcher.EmitCondition(eq); 1821 // to
1822 // mov #kSmiTagMask, sh4_ip
1823 // tst rx, sh4_ip
1824 // bf <target>
1825 // ...
1826 CodePatcher patcher(patch_address, 2);
1827 Register reg = Assembler::GetRn(instr_at_patch);
1828 patcher.masm()->tst(reg, sh4_ip);
1829 patcher.EmitCondition(f);
1795 } 1830 }
1796 } 1831 }
1797 1832
1798 1833
1799 } } // namespace v8::internal 1834 } } // namespace v8::internal
1800 1835
1801 #endif // V8_TARGET_ARCH_ARM 1836 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/sh4/full-codegen-sh4.cc ('k') | src/sh4/lithium-codegen-sh4.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698