OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2011-2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
11 // with the distribution. | 11 // with the distribution. |
12 // * Neither the name of Google Inc. nor the names of its | 12 // * Neither the name of Google Inc. nor the names of its |
13 // contributors may be used to endorse or promote products derived | 13 // contributors may be used to endorse or promote products derived |
14 // from this software without specific prior written permission. | 14 // from this software without specific prior written permission. |
15 // | 15 // |
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | 27 |
28 #include "v8.h" | 28 #include "v8.h" |
29 | 29 |
30 #if defined(V8_TARGET_ARCH_ARM) | 30 #if defined(V8_TARGET_ARCH_SH4) |
31 | 31 |
32 #include "bootstrapper.h" | 32 #include "bootstrapper.h" |
33 #include "code-stubs.h" | 33 #include "code-stubs.h" |
34 #include "regexp-macro-assembler.h" | 34 #include "regexp-macro-assembler.h" |
35 | 35 |
36 namespace v8 { | 36 namespace v8 { |
37 namespace internal { | 37 namespace internal { |
38 | 38 |
39 | 39 |
40 #define __ ACCESS_MASM(masm) | 40 #define __ ACCESS_MASM(masm) |
41 | 41 |
42 static void EmitIdenticalObjectComparison(MacroAssembler* masm, | 42 static void EmitIdenticalObjectComparison(MacroAssembler* masm, |
43 Label* slow, | 43 Label* slow, |
44 Condition cond, | 44 Condition cond, |
45 bool never_nan_nan); | 45 bool never_nan_nan); |
46 static void EmitSmiNonsmiComparison(MacroAssembler* masm, | 46 static void EmitSmiNonsmiComparison(MacroAssembler* masm, |
47 Register lhs, | 47 Register lhs, |
48 Register rhs, | 48 Register rhs, |
49 Label* lhs_not_nan, | 49 Label* lhs_not_nan, |
50 Label* slow, | 50 Label* slow, |
51 bool strict); | 51 bool strict); |
52 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond); | 52 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond); |
53 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, | 53 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, |
54 Register lhs, | 54 Register lhs, |
55 Register rhs); | 55 Register rhs); |
| 56 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, |
| 57 Register lhs, |
| 58 Register rhs, |
| 59 Label* both_loaded_as_doubles, |
| 60 Label* not_heap_numbers, |
| 61 Label* slow); |
| 62 static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm, |
| 63 Register lhs, |
| 64 Register rhs, |
| 65 Label* possible_strings, |
| 66 Label* not_both_strings); |
| 67 void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond); |
56 | 68 |
57 | 69 |
| 70 // Copy from ARM |
| 71 #include "map-sh4.h" // Define register map |
58 // Check if the operand is a heap number. | 72 // Check if the operand is a heap number. |
59 static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand, | 73 static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand, |
60 Register scratch1, Register scratch2, | 74 Register scratch1, Register scratch2, |
61 Label* not_a_heap_number) { | 75 Label* not_a_heap_number) { |
62 __ ldr(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset)); | 76 __ ldr(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset)); |
63 __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex); | 77 __ LoadRoot(scratch2, Heap::kHeapNumberMapRootIndex); |
64 __ cmp(scratch1, scratch2); | 78 __ cmp(scratch1, scratch2); |
65 __ b(ne, not_a_heap_number); | 79 __ b(ne, not_a_heap_number); |
66 } | 80 } |
67 | 81 |
68 | 82 |
69 void ToNumberStub::Generate(MacroAssembler* masm) { | 83 void ToNumberStub::Generate(MacroAssembler* masm) { |
70 // The ToNumber stub takes one argument in eax. | 84 // The ToNumber stub takes one argument in eax. |
| 85 // Entry argument: r0 |
| 86 // Exit in: r0 |
| 87 #ifdef DEBUG |
| 88 // Clobber other parameter registers on entry. |
| 89 __ Dead(r1, r2, r3); |
| 90 __ Dead(r4, r5, r6, r7); |
| 91 #endif |
71 Label check_heap_number, call_builtin; | 92 Label check_heap_number, call_builtin; |
72 __ JumpIfNotSmi(r0, &check_heap_number); | 93 __ JumpIfNotSmi(r0, &check_heap_number, Label::kNear); |
73 __ Ret(); | 94 __ Ret(); |
74 | 95 |
75 __ bind(&check_heap_number); | 96 __ bind(&check_heap_number); |
76 EmitCheckForHeapNumber(masm, r0, r1, ip, &call_builtin); | 97 EmitCheckForHeapNumber(masm, r0, r1, ip, &call_builtin); |
77 __ Ret(); | 98 __ Ret(); |
78 | 99 |
79 __ bind(&call_builtin); | 100 __ bind(&call_builtin); |
80 __ push(r0); | 101 __ push(r0); |
81 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION); | 102 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION); |
82 } | 103 } |
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
163 Label loop; | 184 Label loop; |
164 __ ldr(r4, FieldMemOperand(r1, FixedArray::kLengthOffset)); | 185 __ ldr(r4, FieldMemOperand(r1, FixedArray::kLengthOffset)); |
165 __ bind(&loop); | 186 __ bind(&loop); |
166 // Do not double check first entry. | 187 // Do not double check first entry. |
167 | 188 |
168 __ cmp(r4, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength))); | 189 __ cmp(r4, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength))); |
169 __ b(eq, &install_unoptimized); | 190 __ b(eq, &install_unoptimized); |
170 __ sub(r4, r4, Operand( | 191 __ sub(r4, r4, Operand( |
171 Smi::FromInt(SharedFunctionInfo::kEntryLength))); // Skip an entry. | 192 Smi::FromInt(SharedFunctionInfo::kEntryLength))); // Skip an entry. |
172 __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 193 __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
173 __ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize)); | 194 __ lsl(ip, r4, Operand(kPointerSizeLog2 - kSmiTagSize)); |
| 195 __ add(r5, r5, ip); |
174 __ ldr(r5, MemOperand(r5)); | 196 __ ldr(r5, MemOperand(r5)); |
175 __ cmp(r2, r5); | 197 __ cmp(r2, r5); |
176 __ b(ne, &loop); | 198 __ b(ne, &loop); |
177 // Hit: fetch the optimized code. | 199 // Hit: fetch the optimized code. |
178 __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 200 __ add(r5, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
179 __ add(r5, r5, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize)); | 201 __ lsl(r4, r4, Operand(kPointerSizeLog2 - kSmiTagSize)); |
| 202 __ add(r5, r5, r4); |
180 __ add(r5, r5, Operand(kPointerSize)); | 203 __ add(r5, r5, Operand(kPointerSize)); |
181 __ ldr(r4, MemOperand(r5)); | 204 __ ldr(r4, MemOperand(r5)); |
182 | 205 |
183 __ bind(&install_optimized); | 206 __ bind(&install_optimized); |
184 __ IncrementCounter(counters->fast_new_closure_install_optimized(), | 207 __ IncrementCounter(counters->fast_new_closure_install_optimized(), |
185 1, r6, r7); | 208 1, r6, r7); |
186 | 209 |
187 // TODO(fschneider): Idea: store proper code pointers in the map and either | 210 // TODO(fschneider): Idea: store proper code pointers in the map and either |
188 // unmangle them on marking or do nothing as the whole map is discarded on | 211 // unmangle them on marking or do nothing as the whole map is discarded on |
189 // major GC anyway. | 212 // major GC anyway. |
(...skipping 195 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
385 // [sp]: constant elements. | 408 // [sp]: constant elements. |
386 // [sp + kPointerSize]: literal index. | 409 // [sp + kPointerSize]: literal index. |
387 // [sp + (2 * kPointerSize)]: literals array. | 410 // [sp + (2 * kPointerSize)]: literals array. |
388 | 411 |
389 // Load boilerplate object into r3 and check if we need to create a | 412 // Load boilerplate object into r3 and check if we need to create a |
390 // boilerplate. | 413 // boilerplate. |
391 Label slow_case; | 414 Label slow_case; |
392 __ ldr(r3, MemOperand(sp, 2 * kPointerSize)); | 415 __ ldr(r3, MemOperand(sp, 2 * kPointerSize)); |
393 __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); | 416 __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); |
394 __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 417 __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
395 __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); | 418 __ lsl(ip, r0, Operand(kPointerSizeLog2 - kSmiTagSize)); |
| 419 __ ldr(r3, MemOperand(r3, ip)); |
396 __ CompareRoot(r3, Heap::kUndefinedValueRootIndex); | 420 __ CompareRoot(r3, Heap::kUndefinedValueRootIndex); |
397 __ b(eq, &slow_case); | 421 __ b(eq, &slow_case); |
398 | 422 |
399 FastCloneShallowArrayStub::Mode mode = mode_; | 423 FastCloneShallowArrayStub::Mode mode = mode_; |
400 if (mode == CLONE_ANY_ELEMENTS) { | 424 if (mode == CLONE_ANY_ELEMENTS) { |
401 Label double_elements, check_fast_elements; | 425 Label double_elements, check_fast_elements; |
402 __ ldr(r0, FieldMemOperand(r3, JSArray::kElementsOffset)); | 426 __ ldr(r0, FieldMemOperand(r3, JSArray::kElementsOffset)); |
403 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); | 427 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); |
404 __ CompareRoot(r0, Heap::kFixedCOWArrayMapRootIndex); | 428 __ CompareRoot(r0, Heap::kFixedCOWArrayMapRootIndex); |
405 __ b(ne, &check_fast_elements); | 429 __ b(ne, &check_fast_elements); |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
463 // [sp + kPointerSize]: constant properties. | 487 // [sp + kPointerSize]: constant properties. |
464 // [sp + (2 * kPointerSize)]: literal index. | 488 // [sp + (2 * kPointerSize)]: literal index. |
465 // [sp + (3 * kPointerSize)]: literals array. | 489 // [sp + (3 * kPointerSize)]: literals array. |
466 | 490 |
467 // Load boilerplate object into r3 and check if we need to create a | 491 // Load boilerplate object into r3 and check if we need to create a |
468 // boilerplate. | 492 // boilerplate. |
469 Label slow_case; | 493 Label slow_case; |
470 __ ldr(r3, MemOperand(sp, 3 * kPointerSize)); | 494 __ ldr(r3, MemOperand(sp, 3 * kPointerSize)); |
471 __ ldr(r0, MemOperand(sp, 2 * kPointerSize)); | 495 __ ldr(r0, MemOperand(sp, 2 * kPointerSize)); |
472 __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 496 __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
473 __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); | 497 __ lsl(r1, r0, Operand(kPointerSizeLog2 - kSmiTagSize)); |
| 498 __ ldr(r3, MemOperand(r3, r1)); |
474 __ CompareRoot(r3, Heap::kUndefinedValueRootIndex); | 499 __ CompareRoot(r3, Heap::kUndefinedValueRootIndex); |
475 __ b(eq, &slow_case); | 500 __ b(eq, &slow_case); |
476 | 501 |
477 // Check that the boilerplate contains only fast properties and we can | 502 // Check that the boilerplate contains only fast properties and we can |
478 // statically determine the instance size. | 503 // statically determine the instance size. |
479 int size = JSObject::kHeaderSize + length_ * kPointerSize; | 504 int size = JSObject::kHeaderSize + length_ * kPointerSize; |
480 __ ldr(r0, FieldMemOperand(r3, HeapObject::kMapOffset)); | 505 __ ldr(r0, FieldMemOperand(r3, HeapObject::kMapOffset)); |
481 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceSizeOffset)); | 506 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceSizeOffset)); |
482 __ cmp(r0, Operand(size >> kPointerSizeLog2)); | 507 __ cmp(r0, Operand(size >> kPointerSizeLog2)); |
483 __ b(ne, &slow_case); | 508 __ b(ne, &slow_case); |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
532 (result2_.code() << 4) + | 557 (result2_.code() << 4) + |
533 (source_.code() << 8) + | 558 (source_.code() << 8) + |
534 (zeros_.code() << 12); | 559 (zeros_.code() << 12); |
535 } | 560 } |
536 | 561 |
537 void Generate(MacroAssembler* masm); | 562 void Generate(MacroAssembler* masm); |
538 }; | 563 }; |
539 | 564 |
540 | 565 |
541 void ConvertToDoubleStub::Generate(MacroAssembler* masm) { | 566 void ConvertToDoubleStub::Generate(MacroAssembler* masm) { |
| 567 ASSERT(!result1_.is(ip) && !result2_.is(ip) && !zeros_.is(ip)); |
542 Register exponent = result1_; | 568 Register exponent = result1_; |
543 Register mantissa = result2_; | 569 Register mantissa = result2_; |
544 | 570 |
545 Label not_special; | 571 Label not_special; |
546 // Convert from Smi to integer. | 572 // Convert from Smi to integer. |
547 __ mov(source_, Operand(source_, ASR, kSmiTagSize)); | 573 __ asr(source_, source_, Operand(kSmiTagSize)); |
548 // Move sign bit from source to destination. This works because the sign bit | 574 // Move sign bit from source to destination. This works because the sign bit |
549 // in the exponent word of the double has the same position and polarity as | 575 // in the exponent word of the double has the same position and polarity as |
550 // the 2's complement sign bit in a Smi. | 576 // the 2's complement sign bit in a Smi. |
551 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); | 577 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); |
552 __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC); | 578 __ land(exponent, source_, Operand(HeapNumber::kSignMask)); |
| 579 __ tst(exponent, exponent); |
553 // Subtract from 0 if source was negative. | 580 // Subtract from 0 if source was negative. |
554 __ rsb(source_, source_, Operand(0, RelocInfo::NONE), LeaveCC, ne); | 581 __ rsb(source_, source_, Operand(0), ne); |
555 | 582 |
556 // We have -1, 0 or 1, which we treat specially. Register source_ contains | 583 // We have -1, 0 or 1, which we treat specially. Register source_ contains |
557 // absolute value: it is either equal to 1 (special case of -1 and 1), | 584 // absolute value: it is either equal to 1 (special case of -1 and 1), |
558 // greater than 1 (not a special case) or less than 1 (special case of 0). | 585 // greater than 1 (not a special case) or less than 1 (special case of 0). |
559 __ cmp(source_, Operand(1)); | 586 __ cmpgt(source_, Operand(1)); |
560 __ b(gt, ¬_special); | 587 __ bt_near(¬_special); |
561 | 588 |
562 // For 1 or -1 we need to or in the 0 exponent (biased to 1023). | 589 // For 1 or -1 we need to or in the 0 exponent (biased to 1023). |
563 const uint32_t exponent_word_for_1 = | 590 const uint32_t exponent_word_for_1 = |
564 HeapNumber::kExponentBias << HeapNumber::kExponentShift; | 591 HeapNumber::kExponentBias << HeapNumber::kExponentShift; |
565 __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq); | 592 __ cmpeq(source_, Operand(1)); |
| 593 __ orr(exponent, exponent, Operand(exponent_word_for_1), eq); |
566 // 1, 0 and -1 all have 0 for the second word. | 594 // 1, 0 and -1 all have 0 for the second word. |
567 __ mov(mantissa, Operand(0, RelocInfo::NONE)); | 595 __ mov(mantissa, Operand(0, RelocInfo::NONE)); |
568 __ Ret(); | 596 __ Ret(); |
569 | 597 |
570 __ bind(¬_special); | 598 __ bind(¬_special); |
571 // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5. | 599 // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5. |
572 // Gets the wrong answer for 0, but we already checked for that case above. | 600 // Gets the wrong answer for 0, but we already checked for that case above. |
573 __ CountLeadingZeros(zeros_, source_, mantissa); | 601 __ CountLeadingZeros(zeros_, source_, mantissa); |
574 // Compute exponent and or it into the exponent register. | 602 // Compute exponent and or it into the exponent register. |
575 // We use mantissa as a scratch register here. Use a fudge factor to | 603 // We use mantissa as a scratch register here. Use a fudge factor to |
576 // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts | 604 // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts |
577 // that fit in the ARM's constant field. | 605 // that fit in the ARM's constant field. |
578 int fudge = 0x400; | 606 int fudge = 0x400; |
579 __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge)); | 607 __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge)); |
580 __ add(mantissa, mantissa, Operand(fudge)); | 608 __ add(mantissa, mantissa, Operand(fudge)); |
581 __ orr(exponent, | 609 __ lsl(ip, mantissa, Operand(HeapNumber::kExponentShift)); |
582 exponent, | 610 __ orr(exponent, exponent, ip); |
583 Operand(mantissa, LSL, HeapNumber::kExponentShift)); | |
584 // Shift up the source chopping the top bit off. | 611 // Shift up the source chopping the top bit off. |
585 __ add(zeros_, zeros_, Operand(1)); | 612 __ add(zeros_, zeros_, Operand(1)); |
586 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0. | 613 // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0. |
587 __ mov(source_, Operand(source_, LSL, zeros_)); | 614 __ lsl(source_, source_, zeros_); |
588 // Compute lower part of fraction (last 12 bits). | 615 // Compute lower part of fraction (last 12 bits). |
589 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord)); | 616 __ lsl(mantissa, source_, Operand(HeapNumber::kMantissaBitsInTopWord)); |
590 // And the top (top 20 bits). | 617 // And the top (top 20 bits). |
591 __ orr(exponent, | 618 __ lsr(ip, source_, Operand(32 - HeapNumber::kMantissaBitsInTopWord)); |
592 exponent, | 619 __ orr(exponent, exponent, ip); |
593 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord)); | |
594 __ Ret(); | 620 __ Ret(); |
595 } | 621 } |
596 | 622 |
597 | 623 |
598 void FloatingPointHelper::LoadSmis(MacroAssembler* masm, | 624 void FloatingPointHelper::LoadSmis(MacroAssembler* masm, |
599 FloatingPointHelper::Destination destination, | 625 FloatingPointHelper::Destination destination, |
600 Register scratch1, | 626 Register scratch1, |
601 Register scratch2) { | 627 Register scratch2) { |
602 if (CpuFeatures::IsSupported(VFP2)) { | 628 if (CpuFeatures::IsSupported(FPU)) { |
603 CpuFeatures::Scope scope(VFP2); | 629 __ asr(scratch1, r0, Operand(kSmiTagSize)); |
604 __ mov(scratch1, Operand(r0, ASR, kSmiTagSize)); | 630 __ dfloat(dr2, scratch1); |
605 __ vmov(d7.high(), scratch1); | 631 __ asr(scratch1, r1, Operand(kSmiTagSize)); |
606 __ vcvt_f64_s32(d7, d7.high()); | 632 __ dfloat(dr0, scratch1); |
607 __ mov(scratch1, Operand(r1, ASR, kSmiTagSize)); | |
608 __ vmov(d6.high(), scratch1); | |
609 __ vcvt_f64_s32(d6, d6.high()); | |
610 if (destination == kCoreRegisters) { | 633 if (destination == kCoreRegisters) { |
611 __ vmov(r2, r3, d7); | 634 __ movd(r2, r3, dr2); |
612 __ vmov(r0, r1, d6); | 635 __ movd(r0, r1, dr0); |
613 } | 636 } |
614 } else { | 637 } else { |
615 ASSERT(destination == kCoreRegisters); | 638 ASSERT(destination == kCoreRegisters); |
616 // Write Smi from r0 to r3 and r2 in double format. | 639 // Write Smi from r0 to r3 and r2 in double format. |
617 __ mov(scratch1, Operand(r0)); | 640 __ mov(scratch1, r0); |
618 ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2); | 641 ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2); |
619 __ push(lr); | 642 __ push(lr); |
620 __ Call(stub1.GetCode()); | 643 __ Call(stub1.GetCode()); |
621 // Write Smi from r1 to r1 and r0 in double format. | 644 // Write Smi from r1 to r1 and r0 in double format. |
622 __ mov(scratch1, Operand(r1)); | 645 __ mov(scratch1, r1); |
623 ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2); | 646 ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2); |
624 __ Call(stub2.GetCode()); | 647 __ Call(stub2.GetCode()); |
625 __ pop(lr); | 648 __ pop(lr); |
626 } | 649 } |
627 } | 650 } |
628 | 651 |
629 | 652 |
630 void FloatingPointHelper::LoadOperands( | 653 void FloatingPointHelper::LoadOperands( |
631 MacroAssembler* masm, | 654 MacroAssembler* masm, |
632 FloatingPointHelper::Destination destination, | 655 FloatingPointHelper::Destination destination, |
633 Register heap_number_map, | 656 Register heap_number_map, |
634 Register scratch1, | 657 Register scratch1, |
635 Register scratch2, | 658 Register scratch2, |
636 Label* slow) { | 659 Label* slow) { |
637 | 660 |
638 // Load right operand (r0) to d6 or r2/r3. | 661 // Load right operand (r0) to d7 or r2/r3. |
| 662 ASSERT(!heap_number_map.is(r0) && !heap_number_map.is(r1) && |
| 663 !heap_number_map.is(r2) && !heap_number_map.is(r3)); |
639 LoadNumber(masm, destination, | 664 LoadNumber(masm, destination, |
640 r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow); | 665 r0, dr2, r2, r3, heap_number_map, scratch1, scratch2, slow); |
641 | 666 |
642 // Load left operand (r1) to d7 or r0/r1. | 667 // Load left operand (r1) to d6 or r0/r1. |
643 LoadNumber(masm, destination, | 668 LoadNumber(masm, destination, |
644 r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow); | 669 r1, dr0, r0, r1, heap_number_map, scratch1, scratch2, slow); |
645 } | 670 } |
646 | 671 |
647 | 672 |
648 void FloatingPointHelper::LoadNumber(MacroAssembler* masm, | 673 void FloatingPointHelper::LoadNumber(MacroAssembler* masm, |
649 Destination destination, | 674 Destination destination, |
650 Register object, | 675 Register object, |
651 DwVfpRegister dst, | 676 DwVfpRegister dst, |
652 Register dst1, | 677 Register dst1, |
653 Register dst2, | 678 Register dst2, |
654 Register heap_number_map, | 679 Register heap_number_map, |
655 Register scratch1, | 680 Register scratch1, |
656 Register scratch2, | 681 Register scratch2, |
657 Label* not_number) { | 682 Label* not_number) { |
658 __ AssertRootValue(heap_number_map, | 683 if (FLAG_debug_code) { |
659 Heap::kHeapNumberMapRootIndex, | 684 __ AbortIfNotRootValue(heap_number_map, |
660 "HeapNumberMap register clobbered."); | 685 Heap::kHeapNumberMapRootIndex, |
| 686 "HeapNumberMap register clobbered."); |
| 687 } |
661 | 688 |
662 Label is_smi, done; | 689 Label is_smi, done; |
663 | 690 |
664 // Smi-check | 691 // Smi-check |
665 __ UntagAndJumpIfSmi(scratch1, object, &is_smi); | 692 __ UntagAndJumpIfSmi(scratch1, object, &is_smi); |
666 // Heap number check | 693 // Heap number check |
667 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); | 694 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); |
668 | 695 |
669 // Handle loading a double from a heap number. | 696 // Handle loading a double from a heap number. |
670 if (CpuFeatures::IsSupported(VFP2) && | 697 if (CpuFeatures::IsSupported(FPU) && |
671 destination == kVFPRegisters) { | 698 destination == kVFPRegisters) { |
672 CpuFeatures::Scope scope(VFP2); | |
673 // Load the double from tagged HeapNumber to double register. | 699 // Load the double from tagged HeapNumber to double register. |
674 __ sub(scratch1, object, Operand(kHeapObjectTag)); | 700 ASSERT(Operand(kHeapObjectTag - HeapNumber::kValueOffset).is_int8()); |
675 __ vldr(dst, scratch1, HeapNumber::kValueOffset); | 701 __ sub(scratch1, object, Operand(kHeapObjectTag - |
| 702 HeapNumber::kValueOffset)); |
| 703 __ dldr(dst, MemOperand(scratch1, 0), scratch1); |
676 } else { | 704 } else { |
677 ASSERT(destination == kCoreRegisters); | 705 ASSERT(destination == kCoreRegisters); |
678 // Load the double from heap number to dst1 and dst2 in double format. | 706 // Load the double from heap number to dst1 and dst2 in double format. |
679 __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); | 707 __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); |
680 } | 708 } |
681 __ jmp(&done); | 709 __ jmp_near(&done); |
682 | 710 |
683 // Handle loading a double from a smi. | 711 // Handle loading a double from a smi. |
684 __ bind(&is_smi); | 712 __ bind(&is_smi); |
685 if (CpuFeatures::IsSupported(VFP2)) { | 713 if (CpuFeatures::IsSupported(FPU)) { |
686 CpuFeatures::Scope scope(VFP2); | 714 // Convert smi to double using FPU instructions. |
687 // Convert smi to double using VFP instructions. | 715 __ SmiUntag(scratch1, object); |
688 __ vmov(dst.high(), scratch1); | 716 __ dfloat(dst, scratch1); |
689 __ vcvt_f64_s32(dst, dst.high()); | |
690 if (destination == kCoreRegisters) { | 717 if (destination == kCoreRegisters) { |
691 // Load the converted smi to dst1 and dst2 in double format. | 718 // Load the converted smi to dst1 and dst2 in double format. |
692 __ vmov(dst1, dst2, dst); | 719 __ movd(dst1, dst2, dst); |
693 } | 720 } |
694 } else { | 721 } else { |
695 ASSERT(destination == kCoreRegisters); | 722 ASSERT(destination == kCoreRegisters); |
696 // Write smi to dst1 and dst2 double format. | 723 // Write smi to dst1 and dst2 double format. |
697 __ mov(scratch1, Operand(object)); | 724 __ mov(scratch1, object); |
698 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2); | 725 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2); |
699 __ push(lr); | 726 __ push(lr); |
700 __ Call(stub.GetCode()); | 727 __ Call(stub.GetCode()); |
701 __ pop(lr); | 728 __ pop(lr); |
702 } | 729 } |
703 | 730 |
704 __ bind(&done); | 731 __ bind(&done); |
705 } | 732 } |
706 | 733 |
707 | 734 |
708 void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm, | 735 void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm, |
709 Register object, | 736 Register object, |
710 Register dst, | 737 Register dst, |
711 Register heap_number_map, | 738 Register heap_number_map, |
712 Register scratch1, | 739 Register scratch1, |
713 Register scratch2, | 740 Register scratch2, |
714 Register scratch3, | 741 Register scratch3, |
715 DwVfpRegister double_scratch, | 742 DwVfpRegister double_scratch, |
716 Label* not_number) { | 743 Label* not_number) { |
717 __ AssertRootValue(heap_number_map, | 744 if (FLAG_debug_code) { |
718 Heap::kHeapNumberMapRootIndex, | 745 __ AbortIfNotRootValue(heap_number_map, |
719 "HeapNumberMap register clobbered."); | 746 Heap::kHeapNumberMapRootIndex, |
| 747 "HeapNumberMap register clobbered."); |
| 748 } |
720 Label done; | 749 Label done; |
721 Label not_in_int32_range; | 750 Label not_in_int32_range; |
722 | 751 |
723 __ UntagAndJumpIfSmi(dst, object, &done); | 752 __ UntagAndJumpIfSmi(dst, object, &done); |
724 __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset)); | 753 __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset)); |
725 __ cmp(scratch1, heap_number_map); | 754 __ cmp(scratch1, heap_number_map); |
726 __ b(ne, not_number); | 755 __ b(ne, not_number); |
727 __ ConvertToInt32(object, | 756 __ ConvertToInt32(object, |
728 dst, | 757 dst, |
729 scratch1, | 758 scratch1, |
(...skipping 21 matching lines...) Expand all Loading... |
751 Register dst1, | 780 Register dst1, |
752 Register dst2, | 781 Register dst2, |
753 Register scratch2, | 782 Register scratch2, |
754 SwVfpRegister single_scratch) { | 783 SwVfpRegister single_scratch) { |
755 ASSERT(!int_scratch.is(scratch2)); | 784 ASSERT(!int_scratch.is(scratch2)); |
756 ASSERT(!int_scratch.is(dst1)); | 785 ASSERT(!int_scratch.is(dst1)); |
757 ASSERT(!int_scratch.is(dst2)); | 786 ASSERT(!int_scratch.is(dst2)); |
758 | 787 |
759 Label done; | 788 Label done; |
760 | 789 |
761 if (CpuFeatures::IsSupported(VFP2)) { | 790 if (CpuFeatures::IsSupported(FPU)) { |
762 CpuFeatures::Scope scope(VFP2); | 791 __ dfloat(double_dst, int_scratch); |
763 __ vmov(single_scratch, int_scratch); | |
764 __ vcvt_f64_s32(double_dst, single_scratch); | |
765 if (destination == kCoreRegisters) { | 792 if (destination == kCoreRegisters) { |
766 __ vmov(dst1, dst2, double_dst); | 793 __ movd(dst1, dst2, double_dst); |
767 } | 794 } |
768 } else { | 795 } else { |
769 Label fewer_than_20_useful_bits; | 796 Label fewer_than_20_useful_bits; |
770 // Expected output: | 797 // Expected output: |
771 // | dst2 | dst1 | | 798 // | dst2 | dst1 | |
772 // | s | exp | mantissa | | 799 // | s | exp | mantissa | |
773 | 800 |
774 // Check for zero. | 801 // Check for zero. |
775 __ cmp(int_scratch, Operand::Zero()); | 802 __ cmp(int_scratch, Operand(0)); |
776 __ mov(dst2, int_scratch); | 803 __ mov(dst2, int_scratch); |
777 __ mov(dst1, int_scratch); | 804 __ mov(dst1, int_scratch); |
778 __ b(eq, &done); | 805 __ b(eq, &done); |
779 | 806 |
780 // Preload the sign of the value. | 807 // Preload the sign of the value. |
781 __ and_(dst2, int_scratch, Operand(HeapNumber::kSignMask), SetCC); | 808 __ land(dst2, int_scratch, Operand(HeapNumber::kSignMask)); |
782 // Get the absolute value of the object (as an unsigned integer). | 809 // Get the absolute value of the object (as an unsigned integer). |
783 __ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi); | 810 __ cmpge(dst2, Operand(0)); |
| 811 __ rsb(ip, int_scratch, Operand(0)); |
| 812 __ mov(int_scratch, ip, f); |
784 | 813 |
785 // Get mantissa[51:20]. | 814 // Get mantisssa[51:20]. |
786 | 815 |
787 // Get the position of the first set bit. | 816 // Get the position of the first set bit. |
788 __ CountLeadingZeros(dst1, int_scratch, scratch2); | 817 __ CountLeadingZeros(dst1, int_scratch, scratch2); |
789 __ rsb(dst1, dst1, Operand(31)); | 818 __ rsb(dst1, dst1, Operand(31)); |
790 | 819 |
791 // Set the exponent. | 820 // Set the exponent. |
792 __ add(scratch2, dst1, Operand(HeapNumber::kExponentBias)); | 821 __ add(scratch2, dst1, Operand(HeapNumber::kExponentBias)); |
793 __ Bfi(dst2, scratch2, scratch2, | 822 __ Bfi(dst2, scratch2, scratch2, |
794 HeapNumber::kExponentShift, HeapNumber::kExponentBits); | 823 HeapNumber::kExponentShift, HeapNumber::kExponentBits); |
795 | 824 |
796 // Clear the first non null bit. | 825 // Clear the first non null bit. |
797 __ mov(scratch2, Operand(1)); | 826 __ mov(scratch2, Operand(1)); |
798 __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst1)); | 827 __ lsl(scratch2, scratch2, dst1); |
| 828 __ bic(int_scratch, int_scratch, scratch2); |
799 | 829 |
800 __ cmp(dst1, Operand(HeapNumber::kMantissaBitsInTopWord)); | 830 // Present on ARM, but dead code. |
| 831 // __ cmp(dst1, Operand(HeapNumber::kMantissaBitsInTopWord)); |
801 // Get the number of bits to set in the lower part of the mantissa. | 832 // Get the number of bits to set in the lower part of the mantissa. |
802 __ sub(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); | 833 __ sub(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord)); |
803 __ b(mi, &fewer_than_20_useful_bits); | 834 __ cmpge(scratch2, Operand(0)); |
| 835 __ b(f, &fewer_than_20_useful_bits, Label::kNear); |
804 // Set the higher 20 bits of the mantissa. | 836 // Set the higher 20 bits of the mantissa. |
805 __ orr(dst2, dst2, Operand(int_scratch, LSR, scratch2)); | 837 __ lsr(ip, int_scratch, scratch2); |
| 838 __ orr(dst2, dst2, ip); |
806 __ rsb(scratch2, scratch2, Operand(32)); | 839 __ rsb(scratch2, scratch2, Operand(32)); |
807 __ mov(dst1, Operand(int_scratch, LSL, scratch2)); | 840 __ lsl(dst1, int_scratch, scratch2); |
808 __ b(&done); | 841 __ b(&done); |
809 | 842 |
810 __ bind(&fewer_than_20_useful_bits); | 843 __ bind(&fewer_than_20_useful_bits); |
811 __ rsb(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord)); | 844 __ rsb(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord)); |
812 __ mov(scratch2, Operand(int_scratch, LSL, scratch2)); | 845 __ lsl(scratch2, int_scratch, scratch2); |
813 __ orr(dst2, dst2, scratch2); | 846 __ orr(dst2, dst2, scratch2); |
814 // Set dst1 to 0. | 847 // Set dst1 to 0. |
815 __ mov(dst1, Operand::Zero()); | 848 __ mov(dst1, Operand(0)); |
816 } | 849 } |
817 __ bind(&done); | 850 __ bind(&done); |
818 } | 851 } |
819 | 852 |
820 | 853 |
821 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, | 854 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, |
822 Register object, | 855 Register object, |
823 Destination destination, | 856 Destination destination, |
824 DwVfpRegister double_dst, | 857 DwVfpRegister double_dst, |
825 DwVfpRegister double_scratch, | |
826 Register dst1, | 858 Register dst1, |
827 Register dst2, | 859 Register dst2, |
828 Register heap_number_map, | 860 Register heap_number_map, |
829 Register scratch1, | 861 Register scratch1, |
830 Register scratch2, | 862 Register scratch2, |
831 SwVfpRegister single_scratch, | 863 SwVfpRegister single_scratch, |
832 Label* not_int32) { | 864 Label* not_int32) { |
833 ASSERT(!scratch1.is(object) && !scratch2.is(object)); | 865 ASSERT(!scratch1.is(object) && !scratch2.is(object)); |
834 ASSERT(!scratch1.is(scratch2)); | 866 ASSERT(!scratch1.is(scratch2)); |
835 ASSERT(!heap_number_map.is(object) && | 867 ASSERT(!heap_number_map.is(object) && |
836 !heap_number_map.is(scratch1) && | 868 !heap_number_map.is(scratch1) && |
837 !heap_number_map.is(scratch2)); | 869 !heap_number_map.is(scratch2)); |
838 | 870 |
839 Label done, obj_is_not_smi; | 871 Label done, obj_is_not_smi; |
840 | 872 |
841 __ JumpIfNotSmi(object, &obj_is_not_smi); | 873 __ JumpIfNotSmi(object, &obj_is_not_smi); |
842 __ SmiUntag(scratch1, object); | 874 __ SmiUntag(scratch1, object); |
843 ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2, | 875 ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2, |
844 scratch2, single_scratch); | 876 scratch2, single_scratch); |
845 __ b(&done); | 877 __ b(&done); |
846 | 878 |
847 __ bind(&obj_is_not_smi); | 879 __ bind(&obj_is_not_smi); |
848 __ AssertRootValue(heap_number_map, | 880 if (FLAG_debug_code) { |
849 Heap::kHeapNumberMapRootIndex, | 881 __ AbortIfNotRootValue(heap_number_map, |
850 "HeapNumberMap register clobbered."); | 882 Heap::kHeapNumberMapRootIndex, |
| 883 "HeapNumberMap register clobbered."); |
| 884 } |
851 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); | 885 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); |
852 | 886 |
853 // Load the number. | 887 // Load the number. |
854 if (CpuFeatures::IsSupported(VFP2)) { | 888 if (CpuFeatures::IsSupported(FPU)) { |
855 CpuFeatures::Scope scope(VFP2); | |
856 // Load the double value. | 889 // Load the double value. |
857 __ sub(scratch1, object, Operand(kHeapObjectTag)); | 890 __ sub(scratch1, object, Operand(kHeapObjectTag)); |
858 __ vldr(double_dst, scratch1, HeapNumber::kValueOffset); | 891 __ dldr(double_dst, MemOperand(scratch1, HeapNumber::kValueOffset)); |
859 | 892 |
860 __ EmitVFPTruncate(kRoundToZero, | 893 __ EmitFPUTruncate(kRoundToZero, |
861 scratch1, | 894 scratch1, |
862 double_dst, | 895 double_dst, |
863 scratch2, | 896 scratch2, |
864 double_scratch, | |
865 kCheckForInexactConversion); | 897 kCheckForInexactConversion); |
866 | 898 |
867 // Jump to not_int32 if the operation did not succeed. | 899 // Jump to not_int32 if the operation did not succeed. |
868 __ b(ne, not_int32); | 900 __ b(ne, not_int32); |
869 | 901 |
870 if (destination == kCoreRegisters) { | 902 if (destination == kCoreRegisters) { |
871 __ vmov(dst1, dst2, double_dst); | 903 __ movd(dst1, dst2, double_dst); |
872 } | 904 } |
873 | 905 |
874 } else { | 906 } else { |
875 ASSERT(!scratch1.is(object) && !scratch2.is(object)); | 907 ASSERT(!scratch1.is(object) && !scratch2.is(object)); |
876 // Load the double value in the destination registers.. | 908 // Load the double value in the destination registers.. |
877 __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); | 909 __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); |
878 | 910 |
879 // Check for 0 and -0. | 911 // Check for 0 and -0. |
880 __ bic(scratch1, dst1, Operand(HeapNumber::kSignMask)); | 912 __ bic(scratch1, dst1, Operand(HeapNumber::kSignMask)); |
881 __ orr(scratch1, scratch1, Operand(dst2)); | 913 __ orr(scratch1, scratch1, dst2); |
882 __ cmp(scratch1, Operand::Zero()); | 914 __ cmp(scratch1, Operand(0)); |
883 __ b(eq, &done); | 915 __ b(eq, &done); |
884 | 916 |
885 // Check that the value can be exactly represented by a 32-bit integer. | 917 // Check that the value can be exactly represented by a 32-bit integer. |
886 // Jump to not_int32 if that's not the case. | 918 // Jump to not_int32 if that's not the case. |
887 DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32); | 919 DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32); |
888 | 920 |
889 // dst1 and dst2 were trashed. Reload the double value. | 921 // dst1 and dst2 were trashed. Reload the double value. |
890 __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); | 922 __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); |
891 } | 923 } |
892 | 924 |
893 __ bind(&done); | 925 __ bind(&done); |
894 } | 926 } |
895 | 927 |
896 | 928 |
897 void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, | 929 void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, |
898 Register object, | 930 Register object, |
899 Register dst, | 931 Register dst, |
900 Register heap_number_map, | 932 Register heap_number_map, |
901 Register scratch1, | 933 Register scratch1, |
902 Register scratch2, | 934 Register scratch2, |
903 Register scratch3, | 935 Register scratch3, |
904 DwVfpRegister double_scratch0, | 936 DwVfpRegister double_scratch, |
905 DwVfpRegister double_scratch1, | |
906 Label* not_int32) { | 937 Label* not_int32) { |
907 ASSERT(!dst.is(object)); | 938 ASSERT(!dst.is(object)); |
908 ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object)); | 939 ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object)); |
909 ASSERT(!scratch1.is(scratch2) && | 940 ASSERT(!scratch1.is(scratch2) && |
910 !scratch1.is(scratch3) && | 941 !scratch1.is(scratch3) && |
911 !scratch2.is(scratch3)); | 942 !scratch2.is(scratch3)); |
912 | 943 |
913 Label done; | 944 Label done; |
914 | 945 |
915 __ UntagAndJumpIfSmi(dst, object, &done); | 946 __ UntagAndJumpIfSmi(dst, object, &done); |
916 | 947 |
917 __ AssertRootValue(heap_number_map, | 948 if (FLAG_debug_code) { |
918 Heap::kHeapNumberMapRootIndex, | 949 __ AbortIfNotRootValue(heap_number_map, |
919 "HeapNumberMap register clobbered."); | 950 Heap::kHeapNumberMapRootIndex, |
| 951 "HeapNumberMap register clobbered."); |
| 952 } |
920 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); | 953 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); |
921 | 954 |
922 // Object is a heap number. | 955 // Object is a heap number. |
923 // Convert the floating point value to a 32-bit integer. | 956 // Convert the floating point value to a 32-bit integer. |
924 if (CpuFeatures::IsSupported(VFP2)) { | 957 if (CpuFeatures::IsSupported(FPU)) { |
925 CpuFeatures::Scope scope(VFP2); | |
926 | |
927 // Load the double value. | 958 // Load the double value. |
928 __ sub(scratch1, object, Operand(kHeapObjectTag)); | 959 __ sub(scratch1, object, Operand(kHeapObjectTag)); |
929 __ vldr(double_scratch0, scratch1, HeapNumber::kValueOffset); | 960 __ dldr(double_scratch, MemOperand(scratch1, HeapNumber::kValueOffset)); |
930 | 961 |
931 __ EmitVFPTruncate(kRoundToZero, | 962 __ EmitFPUTruncate(kRoundToZero, |
932 dst, | 963 scratch2, |
933 double_scratch0, | 964 double_scratch, |
934 scratch1, | 965 scratch1, |
935 double_scratch1, | |
936 kCheckForInexactConversion); | 966 kCheckForInexactConversion); |
937 | 967 |
938 // Jump to not_int32 if the operation did not succeed. | 968 // Jump to not_int32 if the operation did not succeed. |
939 __ b(ne, not_int32); | 969 __ b(ne, not_int32); |
| 970 // Get the result in the destination register. |
| 971 __ mov(dst, scratch2); |
| 972 |
940 } else { | 973 } else { |
941 // Load the double value in the destination registers. | 974 // Load the double value in the destination registers. |
942 __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); | 975 __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); |
943 __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset)); | 976 __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset)); |
944 | 977 |
945 // Check for 0 and -0. | 978 // Check for 0 and -0. |
946 __ bic(dst, scratch1, Operand(HeapNumber::kSignMask)); | 979 __ bic(dst, scratch1, Operand(HeapNumber::kSignMask)); |
947 __ orr(dst, scratch2, Operand(dst)); | 980 __ orr(dst, scratch2, dst); |
948 __ cmp(dst, Operand::Zero()); | 981 __ cmp(dst, Operand(0)); |
949 __ b(eq, &done); | 982 __ b(eq, &done); |
950 | 983 |
951 DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32); | 984 DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32); |
952 | 985 |
953 // Registers state after DoubleIs32BitInteger. | 986 // Registers state after DoubleIs32BitInteger. |
954 // dst: mantissa[51:20]. | 987 // dst: mantissa[51:20]. |
955 // scratch2: 1 | 988 // scratch2: 1 |
956 | 989 |
957 // Shift back the higher bits of the mantissa. | 990 // Shift back the higher bits of the mantissa. |
958 __ mov(dst, Operand(dst, LSR, scratch3)); | 991 __ lsr(dst, dst, scratch3); |
959 // Set the implicit first bit. | 992 // Set the implicit first bit. |
960 __ rsb(scratch3, scratch3, Operand(32)); | 993 __ rsb(scratch3, scratch3, Operand(32)); |
961 __ orr(dst, dst, Operand(scratch2, LSL, scratch3)); | 994 __ lsl(scratch2, scratch2, scratch3); |
| 995 __ orr(dst, dst, scratch2); |
962 // Set the sign. | 996 // Set the sign. |
963 __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); | 997 __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); |
964 __ tst(scratch1, Operand(HeapNumber::kSignMask)); | 998 __ tst(scratch1, Operand(HeapNumber::kSignMask)); |
965 __ rsb(dst, dst, Operand::Zero(), LeaveCC, mi); | 999 __ rsb(ip, dst, Operand(0)); |
| 1000 __ mov(dst, ip, ne); // FIXME(stm): strange case !! |
966 } | 1001 } |
967 | 1002 |
968 __ bind(&done); | 1003 __ bind(&done); |
969 } | 1004 } |
970 | 1005 |
971 | 1006 |
972 void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, | 1007 void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, |
973 Register src1, | 1008 Register src1, |
974 Register src2, | 1009 Register src2, |
975 Register dst, | 1010 Register dst, |
976 Register scratch, | 1011 Register scratch, |
977 Label* not_int32) { | 1012 Label* not_int32) { |
978 // Get exponent alone in scratch. | 1013 // Get exponent alone in scratch. |
979 __ Ubfx(scratch, | 1014 __ Ubfx(scratch, |
980 src1, | 1015 src1, |
981 HeapNumber::kExponentShift, | 1016 HeapNumber::kExponentShift, |
982 HeapNumber::kExponentBits); | 1017 HeapNumber::kExponentBits); |
983 | 1018 |
984 // Substract the bias from the exponent. | 1019 // Substract the bias from the exponent. |
985 __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias), SetCC); | 1020 __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias)); |
986 | 1021 |
987 // src1: higher (exponent) part of the double value. | 1022 // src1: higher (exponent) part of the double value. |
988 // src2: lower (mantissa) part of the double value. | 1023 // src2: lower (mantissa) part of the double value. |
989 // scratch: unbiased exponent. | 1024 // scratch: unbiased exponent. |
990 | 1025 |
991 // Fast cases. Check for obvious non 32-bit integer values. | 1026 // Fast cases. Check for obvious non 32-bit integer values. |
992 // Negative exponent cannot yield 32-bit integers. | 1027 // Negative exponent cannot yield 32-bit integers. |
993 __ b(mi, not_int32); | 1028 __ cmpge(scratch, Operand(0)); |
| 1029 __ b(f, not_int32); |
994 // Exponent greater than 31 cannot yield 32-bit integers. | 1030 // Exponent greater than 31 cannot yield 32-bit integers. |
995 // Also, a positive value with an exponent equal to 31 is outside of the | 1031 // Also, a positive value with an exponent equal to 31 is outside of the |
996 // signed 32-bit integer range. | 1032 // signed 32-bit integer range. |
997 // Another way to put it is that if (exponent - signbit) > 30 then the | 1033 // Another way to put it is that if (exponent - signbit) > 30 then the |
998 // number cannot be represented as an int32. | 1034 // number cannot be represented as an int32. |
999 Register tmp = dst; | 1035 Register tmp = dst; |
1000 __ sub(tmp, scratch, Operand(src1, LSR, 31)); | 1036 __ lsr(tmp, src1, Operand(31)); |
1001 __ cmp(tmp, Operand(30)); | 1037 __ sub(tmp, scratch, tmp); |
1002 __ b(gt, not_int32); | 1038 __ cmpgt(tmp, Operand(30)); |
| 1039 __ b(t, not_int32); |
1003 // - Bits [21:0] in the mantissa are not null. | 1040 // - Bits [21:0] in the mantissa are not null. |
1004 __ tst(src2, Operand(0x3fffff)); | 1041 __ tst(src2, Operand(0x3fffff)); |
1005 __ b(ne, not_int32); | 1042 __ b(ne, not_int32); |
1006 | 1043 |
1007 // Otherwise the exponent needs to be big enough to shift left all the | 1044 // Otherwise the exponent needs to be big enough to shift left all the |
1008 // non zero bits left. So we need the (30 - exponent) last bits of the | 1045 // non zero bits left. So we need the (30 - exponent) last bits of the |
1009 // 31 higher bits of the mantissa to be null. | 1046 // 31 higher bits of the mantissa to be null. |
1010 // Because bits [21:0] are null, we can check instead that the | 1047 // Because bits [21:0] are null, we can check instead that the |
1011 // (32 - exponent) last bits of the 32 higher bits of the mantissa are null. | 1048 // (32 - exponent) last bits of the 32 higher bits of the mantisssa are null. |
1012 | 1049 |
1013 // Get the 32 higher bits of the mantissa in dst. | 1050 // Get the 32 higher bits of the mantissa in dst. |
1014 __ Ubfx(dst, | 1051 __ Ubfx(dst, |
1015 src2, | 1052 src2, |
1016 HeapNumber::kMantissaBitsInTopWord, | 1053 HeapNumber::kMantissaBitsInTopWord, |
1017 32 - HeapNumber::kMantissaBitsInTopWord); | 1054 32 - HeapNumber::kMantissaBitsInTopWord); |
| 1055 __ lsl(ip, src1, Operand(HeapNumber::kNonMantissaBitsInTopWord)); |
1018 __ orr(dst, | 1056 __ orr(dst, |
1019 dst, | 1057 dst, |
1020 Operand(src1, LSL, HeapNumber::kNonMantissaBitsInTopWord)); | 1058 ip); |
1021 | 1059 |
1022 // Create the mask and test the lower bits (of the higher bits). | 1060 // Create the mask and test the lower bits (of the higher bits). |
1023 __ rsb(scratch, scratch, Operand(32)); | 1061 __ rsb(scratch, scratch, Operand(32)); |
1024 __ mov(src2, Operand(1)); | 1062 __ mov(src2, Operand(1)); |
1025 __ mov(src1, Operand(src2, LSL, scratch)); | 1063 __ lsl(src1, src2, scratch); |
1026 __ sub(src1, src1, Operand(1)); | 1064 __ sub(src1, src1, Operand(1)); |
1027 __ tst(dst, src1); | 1065 __ tst(dst, src1); |
1028 __ b(ne, not_int32); | 1066 __ b(ne, not_int32); |
1029 } | 1067 } |
1030 | 1068 |
1031 | 1069 |
1032 void FloatingPointHelper::CallCCodeForDoubleOperation( | 1070 void FloatingPointHelper::CallCCodeForDoubleOperation( |
1033 MacroAssembler* masm, | 1071 MacroAssembler* masm, |
1034 Token::Value op, | 1072 Token::Value op, |
1035 Register heap_number_result, | 1073 Register heap_number_result, |
1036 Register scratch) { | 1074 Register scratch) { |
1037 // Using core registers: | 1075 // Using core registers: |
1038 // r0: Left value (least significant part of mantissa). | 1076 // r0: Left value (least significant part of mantissa). |
1039 // r1: Left value (sign, exponent, top of mantissa). | 1077 // r1: Left value (sign, exponent, top of mantissa). |
1040 // r2: Right value (least significant part of mantissa). | 1078 // r2: Right value (least significant part of mantissa). |
1041 // r3: Right value (sign, exponent, top of mantissa). | 1079 // r3: Right value (sign, exponent, top of mantissa). |
1042 | 1080 |
1043 // Assert that heap_number_result is callee-saved. | 1081 // Assert that heap_number_result is callee-saved. |
1044 // We currently always use r5 to pass it. | 1082 // We currently always use r5 to pass it. |
| 1083 // Note: as r5 is not callee-saved on SH4, we push/pop it below |
1045 ASSERT(heap_number_result.is(r5)); | 1084 ASSERT(heap_number_result.is(r5)); |
1046 | 1085 |
| 1086 // Calling C function (using double registers): move r0..r3 to fr4..fr7 |
| 1087 __ movd(dr4, r0, r1); |
| 1088 __ movd(dr6, r2, r3); |
| 1089 |
1047 // Push the current return address before the C call. Return will be | 1090 // Push the current return address before the C call. Return will be |
1048 // through pop(pc) below. | 1091 // through pop(pc) below. |
1049 __ push(lr); | 1092 __ push(lr); |
1050 __ PrepareCallCFunction(0, 2, scratch); | 1093 __ push(heap_number_result); // sh4 specific |
1051 if (masm->use_eabi_hardfloat()) { | 1094 /* Use r0 as scratch: PrepareCallCFunction() disallow use of r4-r7 on sh4. */ |
1052 CpuFeatures::Scope scope(VFP2); | 1095 __ PrepareCallCFunction(0, 2, r0); |
1053 __ vmov(d0, r0, r1); | |
1054 __ vmov(d1, r2, r3); | |
1055 } | |
1056 { | 1096 { |
1057 AllowExternalCallThatCantCauseGC scope(masm); | 1097 AllowExternalCallThatCantCauseGC scope(masm); |
1058 __ CallCFunction( | 1098 __ CallCFunction( |
1059 ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); | 1099 ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); |
1060 } | 1100 } |
| 1101 __ movd(r0, r1, dr0); |
1061 // Store answer in the overwritable heap number. Double returned in | 1102 // Store answer in the overwritable heap number. Double returned in |
1062 // registers r0 and r1 or in d0. | 1103 // registers r0 and r1 or in d0. |
1063 if (masm->use_eabi_hardfloat()) { | 1104 __ pop(heap_number_result); // sh4 specific |
1064 CpuFeatures::Scope scope(VFP2); | 1105 __ Strd(r0, r1, FieldMemOperand(heap_number_result, |
1065 __ vstr(d0, | 1106 HeapNumber::kValueOffset)); |
1066 FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); | |
1067 } else { | |
1068 __ Strd(r0, r1, FieldMemOperand(heap_number_result, | |
1069 HeapNumber::kValueOffset)); | |
1070 } | |
1071 // Place heap_number_result in r0 and return to the pushed return address. | 1107 // Place heap_number_result in r0 and return to the pushed return address. |
1072 __ mov(r0, Operand(heap_number_result)); | 1108 __ mov(r0, heap_number_result); |
1073 __ pop(pc); | 1109 __ pop(lr); |
| 1110 __ rts(); |
1074 } | 1111 } |
1075 | 1112 |
1076 | 1113 |
1077 bool WriteInt32ToHeapNumberStub::IsPregenerated() { | 1114 bool WriteInt32ToHeapNumberStub::IsPregenerated() { |
1078 // These variants are compiled ahead of time. See next method. | 1115 // These variants are compiled ahead of time. See next method. |
1079 if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) { | 1116 if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) { |
1080 return true; | 1117 return true; |
1081 } | 1118 } |
1082 if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) { | 1119 if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) { |
1083 return true; | 1120 return true; |
1084 } | 1121 } |
1085 // Other register combinations are generated as and when they are needed, | 1122 // Other register combinations are generated as and when they are needed, |
1086 // so it is unsafe to call them from stubs (we can't generate a stub while | 1123 // so it is unsafe to call them from stubs (we can't generate a stub while |
1087 // we are generating a stub). | 1124 // we are generating a stub). |
1088 return false; | 1125 return false; |
1089 } | 1126 } |
1090 | 1127 |
1091 | 1128 |
1092 void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() { | 1129 void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() { |
1093 WriteInt32ToHeapNumberStub stub1(r1, r0, r2); | 1130 WriteInt32ToHeapNumberStub stub1(r1, r0, r2); |
1094 WriteInt32ToHeapNumberStub stub2(r2, r0, r3); | 1131 WriteInt32ToHeapNumberStub stub2(r2, r0, r3); |
1095 stub1.GetCode()->set_is_pregenerated(true); | 1132 stub1.GetCode()->set_is_pregenerated(true); |
1096 stub2.GetCode()->set_is_pregenerated(true); | 1133 stub2.GetCode()->set_is_pregenerated(true); |
1097 } | 1134 } |
1098 | 1135 |
1099 | 1136 |
1100 // See comment for class. | 1137 // See comment for class. |
1101 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { | 1138 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { |
| 1139 ASSERT(!scratch_.is(ip) && !the_int_.is(ip)); |
1102 Label max_negative_int; | 1140 Label max_negative_int; |
1103 // the_int_ has the answer which is a signed int32 but not a Smi. | 1141 // the_int_ has the answer which is a signed int32 but not a Smi. |
1104 // We test for the special value that has a different exponent. This test | 1142 // We test for the special value that has a different exponent. This test |
1105 // has the neat side effect of setting the flags according to the sign. | 1143 // has the neat side effect of setting the flags according to the sign. |
1106 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); | 1144 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); |
1107 __ cmp(the_int_, Operand(0x80000000u)); | 1145 __ cmp(the_int_, Operand(0x80000000u)); |
1108 __ b(eq, &max_negative_int); | 1146 __ b(eq, &max_negative_int, Label::kNear); |
1109 // Set up the correct exponent in scratch_. All non-Smi int32s have the same. | 1147 // Set up the correct exponent in scratch_. All non-Smi int32s have the same. |
1110 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). | 1148 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). |
1111 uint32_t non_smi_exponent = | 1149 uint32_t non_smi_exponent = |
1112 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; | 1150 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; |
1113 __ mov(scratch_, Operand(non_smi_exponent)); | 1151 __ mov(scratch_, Operand(non_smi_exponent)); |
| 1152 __ cmpge(the_int_, Operand(0)); |
| 1153 Label skip; |
| 1154 __ bt_near(&skip); |
1114 // Set the sign bit in scratch_ if the value was negative. | 1155 // Set the sign bit in scratch_ if the value was negative. |
1115 __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs); | 1156 __ lor(scratch_, scratch_, Operand(HeapNumber::kSignMask)); |
1116 // Subtract from 0 if the value was negative. | 1157 // Subtract from 0 if the value was negative. |
1117 __ rsb(the_int_, the_int_, Operand(0, RelocInfo::NONE), LeaveCC, cs); | 1158 __ rsb(the_int_, the_int_, Operand(0)); |
| 1159 __ bind(&skip); |
1118 // We should be masking the implict first digit of the mantissa away here, | 1160 // We should be masking the implict first digit of the mantissa away here, |
1119 // but it just ends up combining harmlessly with the last digit of the | 1161 // but it just ends up combining harmlessly with the last digit of the |
1120 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get | 1162 // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get |
1121 // the most significant 1 to hit the last bit of the 12 bit sign and exponent. | 1163 // the most significant 1 to hit the last bit of the 12 bit sign and exponent. |
1122 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); | 1164 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); |
1123 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; | 1165 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; |
1124 __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance)); | 1166 __ lsr(ip, the_int_, Operand(shift_distance)); |
| 1167 __ lor(scratch_, scratch_, ip); |
1125 __ str(scratch_, FieldMemOperand(the_heap_number_, | 1168 __ str(scratch_, FieldMemOperand(the_heap_number_, |
1126 HeapNumber::kExponentOffset)); | 1169 HeapNumber::kExponentOffset)); |
1127 __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance)); | 1170 __ lsl(scratch_, the_int_, Operand(32 - shift_distance)); |
1128 __ str(scratch_, FieldMemOperand(the_heap_number_, | 1171 __ str(scratch_, FieldMemOperand(the_heap_number_, |
1129 HeapNumber::kMantissaOffset)); | 1172 HeapNumber::kMantissaOffset)); |
1130 __ Ret(); | 1173 __ Ret(); |
1131 | 1174 |
1132 __ bind(&max_negative_int); | 1175 __ bind(&max_negative_int); |
1133 // The max negative int32 is stored as a positive number in the mantissa of | 1176 // The max negative int32 is stored as a positive number in the mantissa of |
1134 // a double because it uses a sign bit instead of using two's complement. | 1177 // a double because it uses a sign bit instead of using two's complement. |
1135 // The actual mantissa bits stored are all 0 because the implicit most | 1178 // The actual mantissa bits stored are all 0 because the implicit most |
1136 // significant 1 bit is not stored. | 1179 // significant 1 bit is not stored. |
1137 non_smi_exponent += 1 << HeapNumber::kExponentShift; | 1180 non_smi_exponent += 1 << HeapNumber::kExponentShift; |
(...skipping 18 matching lines...) Expand all Loading... |
1156 __ b(ne, ¬_identical); | 1199 __ b(ne, ¬_identical); |
1157 | 1200 |
1158 // The two objects are identical. If we know that one of them isn't NaN then | 1201 // The two objects are identical. If we know that one of them isn't NaN then |
1159 // we now know they test equal. | 1202 // we now know they test equal. |
1160 if (cond != eq || !never_nan_nan) { | 1203 if (cond != eq || !never_nan_nan) { |
1161 // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(), | 1204 // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(), |
1162 // so we do the second best thing - test it ourselves. | 1205 // so we do the second best thing - test it ourselves. |
1163 // They are both equal and they are not both Smis so both of them are not | 1206 // They are both equal and they are not both Smis so both of them are not |
1164 // Smis. If it's not a heap number, then return equal. | 1207 // Smis. If it's not a heap number, then return equal. |
1165 if (cond == lt || cond == gt) { | 1208 if (cond == lt || cond == gt) { |
1166 __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE); | 1209 __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE, ge); |
1167 __ b(ge, slow); | 1210 __ bt(slow); |
1168 } else { | 1211 } else { |
1169 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); | 1212 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE, eq); |
1170 __ b(eq, &heap_number); | 1213 __ b(eq, &heap_number, Label::kNear); |
1171 // Comparing JS objects with <=, >= is complicated. | 1214 // Comparing JS objects with <=, >= is complicated. |
1172 if (cond != eq) { | 1215 if (cond != eq) { |
1173 __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE)); | 1216 __ cmpge(r4, Operand(FIRST_SPEC_OBJECT_TYPE)); |
1174 __ b(ge, slow); | 1217 __ bt(slow); |
1175 // Normally here we fall through to return_equal, but undefined is | 1218 // Normally here we fall through to return_equal, but undefined is |
1176 // special: (undefined == undefined) == true, but | 1219 // special: (undefined == undefined) == true, but |
1177 // (undefined <= undefined) == false! See ECMAScript 11.8.5. | 1220 // (undefined <= undefined) == false! See ECMAScript 11.8.5. |
1178 if (cond == le || cond == ge) { | 1221 if (cond == le || cond == ge) { |
1179 __ cmp(r4, Operand(ODDBALL_TYPE)); | 1222 __ cmp(r4, Operand(ODDBALL_TYPE)); |
1180 __ b(ne, &return_equal); | 1223 __ b(ne, &return_equal, Label::kNear); |
1181 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); | 1224 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); |
1182 __ cmp(r0, r2); | 1225 __ cmp(r0, r2); |
1183 __ b(ne, &return_equal); | 1226 __ b(ne, &return_equal, Label::kNear); |
1184 if (cond == le) { | 1227 if (cond == le) { |
1185 // undefined <= undefined should fail. | 1228 // undefined <= undefined should fail. |
1186 __ mov(r0, Operand(GREATER)); | 1229 __ mov(r0, Operand(GREATER)); |
1187 } else { | 1230 } else { |
1188 // undefined >= undefined should fail. | 1231 // undefined >= undefined should fail. |
1189 __ mov(r0, Operand(LESS)); | 1232 __ mov(r0, Operand(LESS)); |
1190 } | 1233 } |
1191 __ Ret(); | 1234 __ Ret(); |
1192 } | 1235 } |
1193 } | 1236 } |
(...skipping 23 matching lines...) Expand all Loading... |
1217 // and not all mantissa bits (0..51) clear. | 1260 // and not all mantissa bits (0..51) clear. |
1218 // Read top bits of double representation (second word of value). | 1261 // Read top bits of double representation (second word of value). |
1219 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | 1262 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); |
1220 // Test that exponent bits are all set. | 1263 // Test that exponent bits are all set. |
1221 __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); | 1264 __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); |
1222 // NaNs have all-one exponents so they sign extend to -1. | 1265 // NaNs have all-one exponents so they sign extend to -1. |
1223 __ cmp(r3, Operand(-1)); | 1266 __ cmp(r3, Operand(-1)); |
1224 __ b(ne, &return_equal); | 1267 __ b(ne, &return_equal); |
1225 | 1268 |
1226 // Shift out flag and all exponent bits, retaining only mantissa. | 1269 // Shift out flag and all exponent bits, retaining only mantissa. |
1227 __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord)); | 1270 __ lsl(r2, r2, Operand(HeapNumber::kNonMantissaBitsInTopWord)); |
1228 // Or with all low-bits of mantissa. | 1271 // Or with all low-bits of mantissa. |
1229 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); | 1272 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); |
1230 __ orr(r0, r3, Operand(r2), SetCC); | 1273 __ orr(r0, r3, r2); |
| 1274 __ tst(r0, r0); |
1231 // For equal we already have the right value in r0: Return zero (equal) | 1275 // For equal we already have the right value in r0: Return zero (equal) |
1232 // if all bits in mantissa are zero (it's an Infinity) and non-zero if | 1276 // if all bits in mantissa are zero (it's an Infinity) and non-zero if |
1233 // not (it's a NaN). For <= and >= we need to load r0 with the failing | 1277 // not (it's a NaN). For <= and >= we need to load r0 with the failing |
1234 // value if it's a NaN. | 1278 // value if it's a NaN. |
1235 if (cond != eq) { | 1279 if (cond != eq) { |
1236 // All-zero means Infinity means equal. | 1280 // All-zero means Infinity means equal. |
1237 __ Ret(eq); | 1281 __ Ret(eq); |
1238 if (cond == le) { | 1282 if (cond == le) { |
1239 __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail. | 1283 __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail. |
1240 } else { | 1284 } else { |
(...skipping 13 matching lines...) Expand all Loading... |
1254 static void EmitSmiNonsmiComparison(MacroAssembler* masm, | 1298 static void EmitSmiNonsmiComparison(MacroAssembler* masm, |
1255 Register lhs, | 1299 Register lhs, |
1256 Register rhs, | 1300 Register rhs, |
1257 Label* lhs_not_nan, | 1301 Label* lhs_not_nan, |
1258 Label* slow, | 1302 Label* slow, |
1259 bool strict) { | 1303 bool strict) { |
1260 ASSERT((lhs.is(r0) && rhs.is(r1)) || | 1304 ASSERT((lhs.is(r0) && rhs.is(r1)) || |
1261 (lhs.is(r1) && rhs.is(r0))); | 1305 (lhs.is(r1) && rhs.is(r0))); |
1262 | 1306 |
1263 Label rhs_is_smi; | 1307 Label rhs_is_smi; |
1264 __ JumpIfSmi(rhs, &rhs_is_smi); | 1308 __ JumpIfSmi(rhs, &rhs_is_smi, Label::kNear); |
1265 | 1309 |
1266 // Lhs is a Smi. Check whether the rhs is a heap number. | 1310 // Lhs is a Smi. Check whether the rhs is a heap number. |
1267 __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE); | 1311 __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE, eq); |
1268 if (strict) { | 1312 if (strict) { |
1269 // If rhs is not a number and lhs is a Smi then strict equality cannot | 1313 // If rhs is not a number and lhs is a Smi then strict equality cannot |
1270 // succeed. Return non-equal | 1314 // succeed. Return non-equal |
1271 // If rhs is r0 then there is already a non zero value in it. | 1315 // If rhs is r0 then there is already a non zero value in it. |
1272 if (!rhs.is(r0)) { | 1316 if (!rhs.is(r0)) { |
1273 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); | 1317 __ mov(r0, Operand(NOT_EQUAL), ne); |
1274 } | 1318 } |
1275 __ Ret(ne); | 1319 __ Ret(ne); |
1276 } else { | 1320 } else { |
1277 // Smi compared non-strictly with a non-Smi non-heap-number. Call | 1321 // Smi compared non-strictly with a non-Smi non-heap-number. Call |
1278 // the runtime. | 1322 // the runtime. |
1279 __ b(ne, slow); | 1323 __ b(ne, slow); |
1280 } | 1324 } |
1281 | 1325 |
1282 // Lhs is a smi, rhs is a number. | 1326 // Lhs is a smi, rhs is a number. |
1283 if (CpuFeatures::IsSupported(VFP2)) { | 1327 if (CpuFeatures::IsSupported(FPU)) { |
1284 // Convert lhs to a double in d7. | 1328 // Convert lhs to a double in dr2. |
1285 CpuFeatures::Scope scope(VFP2); | 1329 __ SmiToDoubleFPURegister(lhs, dr2, r7); |
1286 __ SmiToDoubleVFPRegister(lhs, d7, r7, s15); | 1330 // Load the double from rhs, tagged HeapNumber r0, to dr0. |
1287 // Load the double from rhs, tagged HeapNumber r0, to d6. | 1331 __ sub(r7, rhs, Operand(kHeapObjectTag - HeapNumber::kValueOffset)); |
1288 __ sub(r7, rhs, Operand(kHeapObjectTag)); | 1332 __ dldr(dr0, MemOperand(r7, 0), r7); |
1289 __ vldr(d6, r7, HeapNumber::kValueOffset); | |
1290 } else { | 1333 } else { |
1291 __ push(lr); | 1334 __ push(lr); |
1292 // Convert lhs to a double in r2, r3. | 1335 // Convert lhs to a double in r2, r3. |
1293 __ mov(r7, Operand(lhs)); | 1336 __ mov(r7, lhs); |
1294 ConvertToDoubleStub stub1(r3, r2, r7, r6); | 1337 ConvertToDoubleStub stub1(r3, r2, r7, r6); |
1295 __ Call(stub1.GetCode()); | 1338 __ Call(stub1.GetCode()); |
1296 // Load rhs to a double in r0, r1. | 1339 // Load rhs to a double in r0, r1. |
1297 __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | 1340 __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); |
1298 __ pop(lr); | 1341 __ pop(lr); |
1299 } | 1342 } |
1300 | 1343 |
1301 // We now have both loaded as doubles but we can skip the lhs nan check | 1344 // We now have both loaded as doubles but we can skip the lhs nan check |
1302 // since it's a smi. | 1345 // since it's a smi. |
1303 __ jmp(lhs_not_nan); | 1346 __ jmp(lhs_not_nan); |
1304 | 1347 |
1305 __ bind(&rhs_is_smi); | 1348 __ bind(&rhs_is_smi); |
1306 // Rhs is a smi. Check whether the non-smi lhs is a heap number. | 1349 // Rhs is a smi. Check whether the non-smi lhs is a heap number. |
1307 __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE); | 1350 __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE, eq); |
1308 if (strict) { | 1351 if (strict) { |
1309 // If lhs is not a number and rhs is a smi then strict equality cannot | 1352 // If lhs is not a number and rhs is a smi then strict equality cannot |
1310 // succeed. Return non-equal. | 1353 // succeed. Return non-equal. |
1311 // If lhs is r0 then there is already a non zero value in it. | 1354 // If lhs is r0 then there is already a non zero value in it. |
1312 if (!lhs.is(r0)) { | 1355 if (!lhs.is(r0)) { |
1313 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); | 1356 __ mov(r0, Operand(NOT_EQUAL), ne); |
1314 } | 1357 } |
1315 __ Ret(ne); | 1358 __ Ret(ne); |
1316 } else { | 1359 } else { |
1317 // Smi compared non-strictly with a non-smi non-heap-number. Call | 1360 // Smi compared non-strictly with a non-smi non-heap-number. Call |
1318 // the runtime. | 1361 // the runtime. |
1319 __ b(ne, slow); | 1362 __ b(ne, slow); |
1320 } | 1363 } |
1321 | 1364 |
1322 // Rhs is a smi, lhs is a heap number. | 1365 // Rhs is a smi, lhs is a heap number. |
1323 if (CpuFeatures::IsSupported(VFP2)) { | 1366 if (CpuFeatures::IsSupported(FPU)) { |
1324 CpuFeatures::Scope scope(VFP2); | 1367 // Load the double from lhs, tagged HeapNumber r1, to dr2. |
1325 // Load the double from lhs, tagged HeapNumber r1, to d7. | 1368 __ sub(r7, lhs, Operand(kHeapObjectTag - HeapNumber::kValueOffset)); |
1326 __ sub(r7, lhs, Operand(kHeapObjectTag)); | 1369 __ dldr(dr2, MemOperand(r7, 0), r7); |
1327 __ vldr(d7, r7, HeapNumber::kValueOffset); | 1370 // Convert rhs to a double in dr0. |
1328 // Convert rhs to a double in d6 . | 1371 __ SmiToDoubleFPURegister(rhs, dr0, r7); |
1329 __ SmiToDoubleVFPRegister(rhs, d6, r7, s13); | |
1330 } else { | 1372 } else { |
1331 __ push(lr); | 1373 __ push(lr); |
1332 // Load lhs to a double in r2, r3. | 1374 // Load lhs to a double in r2, r3. |
1333 __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | 1375 __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); |
1334 // Convert rhs to a double in r0, r1. | 1376 // Convert rhs to a double in r0, r1. |
1335 __ mov(r7, Operand(rhs)); | 1377 __ mov(r7, rhs); |
1336 ConvertToDoubleStub stub2(r1, r0, r7, r6); | 1378 ConvertToDoubleStub stub2(r1, r0, r7, r6); |
1337 __ Call(stub2.GetCode()); | 1379 __ Call(stub2.GetCode()); |
1338 __ pop(lr); | 1380 __ pop(lr); |
1339 } | 1381 } |
1340 // Fall through to both_loaded_as_doubles. | 1382 // Fall through to both_loaded_as_doubles. |
1341 } | 1383 } |
1342 | 1384 |
1343 | 1385 |
1344 void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) { | 1386 void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) { |
1345 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); | 1387 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); |
1346 Register rhs_exponent = exp_first ? r0 : r1; | 1388 Register rhs_exponent = exp_first ? r0 : r1; |
1347 Register lhs_exponent = exp_first ? r2 : r3; | 1389 Register lhs_exponent = exp_first ? r2 : r3; |
1348 Register rhs_mantissa = exp_first ? r1 : r0; | 1390 Register rhs_mantissa = exp_first ? r1 : r0; |
1349 Register lhs_mantissa = exp_first ? r3 : r2; | 1391 Register lhs_mantissa = exp_first ? r3 : r2; |
1350 Label one_is_nan, neither_is_nan; | 1392 Label one_is_nan, neither_is_nan; |
1351 | 1393 |
1352 __ Sbfx(r4, | 1394 __ Sbfx(r4, |
1353 lhs_exponent, | 1395 lhs_exponent, |
1354 HeapNumber::kExponentShift, | 1396 HeapNumber::kExponentShift, |
1355 HeapNumber::kExponentBits); | 1397 HeapNumber::kExponentBits); |
1356 // NaNs have all-one exponents so they sign extend to -1. | 1398 // NaNs have all-one exponents so they sign extend to -1. |
1357 __ cmp(r4, Operand(-1)); | 1399 __ cmp(r4, Operand(-1)); |
1358 __ b(ne, lhs_not_nan); | 1400 __ b(ne, lhs_not_nan); |
1359 __ mov(r4, | 1401 __ lsl(r4, lhs_exponent, Operand(HeapNumber::kNonMantissaBitsInTopWord)); |
1360 Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), | 1402 __ cmpeq(r4, Operand(0)); |
1361 SetCC); | 1403 __ b(ne, &one_is_nan, Label::kNear); |
1362 __ b(ne, &one_is_nan); | |
1363 __ cmp(lhs_mantissa, Operand(0, RelocInfo::NONE)); | 1404 __ cmp(lhs_mantissa, Operand(0, RelocInfo::NONE)); |
1364 __ b(ne, &one_is_nan); | 1405 __ b(ne, &one_is_nan, Label::kNear); |
1365 | 1406 |
1366 __ bind(lhs_not_nan); | 1407 __ bind(lhs_not_nan); |
1367 __ Sbfx(r4, | 1408 __ Sbfx(r4, |
1368 rhs_exponent, | 1409 rhs_exponent, |
1369 HeapNumber::kExponentShift, | 1410 HeapNumber::kExponentShift, |
1370 HeapNumber::kExponentBits); | 1411 HeapNumber::kExponentBits); |
1371 // NaNs have all-one exponents so they sign extend to -1. | 1412 // NaNs have all-one exponents so they sign extend to -1. |
1372 __ cmp(r4, Operand(-1)); | 1413 __ cmp(r4, Operand(-1)); |
1373 __ b(ne, &neither_is_nan); | 1414 __ b(ne, &neither_is_nan, Label::kNear); |
1374 __ mov(r4, | 1415 __ lsl(r4, rhs_exponent, Operand(HeapNumber::kNonMantissaBitsInTopWord)); |
1375 Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), | 1416 __ cmpeq(r4, Operand(0, RelocInfo::NONE)); |
1376 SetCC); | 1417 __ b(ne, &one_is_nan, Label::kNear); |
1377 __ b(ne, &one_is_nan); | 1418 __ cmp(rhs_mantissa, Operand(0)); |
1378 __ cmp(rhs_mantissa, Operand(0, RelocInfo::NONE)); | 1419 __ b(eq, &neither_is_nan, Label::kNear); |
1379 __ b(eq, &neither_is_nan); | |
1380 | 1420 |
1381 __ bind(&one_is_nan); | 1421 __ bind(&one_is_nan); |
1382 // NaN comparisons always fail. | 1422 // NaN comparisons always fail. |
1383 // Load whatever we need in r0 to make the comparison fail. | 1423 // Load whatever we need in r0 to make the comparison fail. |
1384 if (cond == lt || cond == le) { | 1424 if (cond == lt || cond == le) { |
1385 __ mov(r0, Operand(GREATER)); | 1425 __ mov(r0, Operand(GREATER)); |
1386 } else { | 1426 } else { |
1387 __ mov(r0, Operand(LESS)); | 1427 __ mov(r0, Operand(LESS)); |
1388 } | 1428 } |
1389 __ Ret(); | 1429 __ Ret(); |
1390 | 1430 |
1391 __ bind(&neither_is_nan); | 1431 __ bind(&neither_is_nan); |
1392 } | 1432 } |
1393 | 1433 |
1394 | 1434 |
1395 // See comment at call site. | 1435 // See comment at call site. |
1396 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, | 1436 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, |
1397 Condition cond) { | 1437 Condition cond) { |
1398 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); | 1438 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); |
1399 Register rhs_exponent = exp_first ? r0 : r1; | 1439 Register rhs_exponent = exp_first ? r0 : r1; |
1400 Register lhs_exponent = exp_first ? r2 : r3; | 1440 Register lhs_exponent = exp_first ? r2 : r3; |
1401 Register rhs_mantissa = exp_first ? r1 : r0; | 1441 Register rhs_mantissa = exp_first ? r1 : r0; |
1402 Register lhs_mantissa = exp_first ? r3 : r2; | 1442 Register lhs_mantissa = exp_first ? r3 : r2; |
1403 | 1443 |
1404 // r0, r1, r2, r3 have the two doubles. Neither is a NaN. | 1444 // r0, r1, r2, r3 have the two doubles. Neither is a NaN. |
1405 if (cond == eq) { | 1445 if (cond == eq) { |
1406 // Doubles are not equal unless they have the same bit pattern. | 1446 // Doubles are not equal unless they have the same bit pattern. |
1407 // Exception: 0 and -0. | 1447 // Exception: 0 and -0. |
1408 __ cmp(rhs_mantissa, Operand(lhs_mantissa)); | 1448 __ cmp(rhs_mantissa, lhs_mantissa); |
1409 __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne); | 1449 __ orr(r0, rhs_mantissa, lhs_mantissa, ne); |
1410 // Return non-zero if the numbers are unequal. | 1450 // Return non-zero if the numbers are unequal. |
1411 __ Ret(ne); | 1451 __ Ret(ne); |
1412 | 1452 |
1413 __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC); | 1453 __ sub(r0, rhs_exponent, lhs_exponent); |
| 1454 __ tst(r0, r0); |
1414 // If exponents are equal then return 0. | 1455 // If exponents are equal then return 0. |
1415 __ Ret(eq); | 1456 __ Ret(eq); |
1416 | 1457 |
1417 // Exponents are unequal. The only way we can return that the numbers | 1458 // Exponents are unequal. The only way we can return that the numbers |
1418 // are equal is if one is -0 and the other is 0. We already dealt | 1459 // are equal is if one is -0 and the other is 0. We already dealt |
1419 // with the case where both are -0 or both are 0. | 1460 // with the case where both are -0 or both are 0. |
1420 // We start by seeing if the mantissas (that are equal) or the bottom | 1461 // We start by seeing if the mantissas (that are equal) or the bottom |
1421 // 31 bits of the rhs exponent are non-zero. If so we return not | 1462 // 31 bits of the rhs exponent are non-zero. If so we return not |
1422 // equal. | 1463 // equal. |
1423 __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC); | 1464 __ lsl(r4, lhs_exponent, Operand(kSmiTagSize)); |
1424 __ mov(r0, Operand(r4), LeaveCC, ne); | 1465 __ orr(r4, lhs_mantissa, r4); |
| 1466 __ tst(r4, r4); |
| 1467 __ mov(r0, r4, ne); |
1425 __ Ret(ne); | 1468 __ Ret(ne); |
1426 // Now they are equal if and only if the lhs exponent is zero in its | 1469 // Now they are equal if and only if the lhs exponent is zero in its |
1427 // low 31 bits. | 1470 // low 31 bits. |
1428 __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize)); | 1471 __ lsl(r0, rhs_exponent, Operand(kSmiTagSize)); |
1429 __ Ret(); | 1472 __ Ret(); |
1430 } else { | 1473 } else { |
| 1474 __ Push(r4, r5, r6, r7); |
| 1475 // Calling C function: move r0..r3 to fr4..fr7 |
| 1476 __ movd(dr4, r0, r1); |
| 1477 __ movd(dr6, r2, r3); |
| 1478 |
1431 // Call a native function to do a comparison between two non-NaNs. | 1479 // Call a native function to do a comparison between two non-NaNs. |
1432 // Call C routine that may not cause GC or other trouble. | 1480 // Call C routine that may not cause GC or other trouble. |
1433 __ push(lr); | 1481 __ push(lr); |
1434 __ PrepareCallCFunction(0, 2, r5); | 1482 __ PrepareCallCFunction(0, 2, r0); |
1435 if (masm->use_eabi_hardfloat()) { | |
1436 CpuFeatures::Scope scope(VFP2); | |
1437 __ vmov(d0, r0, r1); | |
1438 __ vmov(d1, r2, r3); | |
1439 } | |
1440 | |
1441 AllowExternalCallThatCantCauseGC scope(masm); | |
1442 __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), | 1483 __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), |
1443 0, 2); | 1484 0, 2); |
1444 __ pop(pc); // Return. | 1485 __ pop(lr); |
| 1486 __ Pop(r4, r5, r6, r7); |
| 1487 __ Ret(); |
1445 } | 1488 } |
1446 } | 1489 } |
1447 | 1490 |
1448 | 1491 |
1449 // See comment at call site. | 1492 // See comment at call site. |
1450 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, | 1493 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, |
1451 Register lhs, | 1494 Register lhs, |
1452 Register rhs) { | 1495 Register rhs) { |
1453 ASSERT((lhs.is(r0) && rhs.is(r1)) || | 1496 ASSERT((lhs.is(r0) && rhs.is(r1)) || |
1454 (lhs.is(r1) && rhs.is(r0))); | 1497 (lhs.is(r1) && rhs.is(r0))); |
1455 | 1498 |
1456 // If either operand is a JS object or an oddball value, then they are | 1499 // If either operand is a JS object or an oddball value, then they are |
1457 // not equal since their pointers are different. | 1500 // not equal since their pointers are different. |
1458 // There is no test for undetectability in strict equality. | 1501 // There is no test for undetectability in strict equality. |
1459 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); | 1502 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); |
1460 Label first_non_object; | 1503 Label first_non_object; |
1461 // Get the type of the first operand into r2 and compare it with | 1504 // Get the type of the first operand into r2 and compare it with |
1462 // FIRST_SPEC_OBJECT_TYPE. | 1505 // FIRST_SPEC_OBJECT_TYPE. |
1463 __ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE); | 1506 __ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE, ge); |
1464 __ b(lt, &first_non_object); | 1507 __ bf_near(&first_non_object); |
1465 | 1508 |
1466 // Return non-zero (r0 is not zero) | 1509 // Return non-zero (r0 is not zero) |
1467 Label return_not_equal; | 1510 Label return_not_equal; |
1468 __ bind(&return_not_equal); | 1511 __ bind(&return_not_equal); |
1469 __ Ret(); | 1512 __ Ret(); |
1470 | 1513 |
1471 __ bind(&first_non_object); | 1514 __ bind(&first_non_object); |
1472 // Check for oddballs: true, false, null, undefined. | 1515 // Check for oddballs: true, false, null, undefined. |
1473 __ cmp(r2, Operand(ODDBALL_TYPE)); | 1516 __ cmp(r2, Operand(ODDBALL_TYPE)); |
1474 __ b(eq, &return_not_equal); | 1517 __ b(eq, &return_not_equal); |
1475 | 1518 |
1476 __ CompareObjectType(lhs, r3, r3, FIRST_SPEC_OBJECT_TYPE); | 1519 __ CompareObjectType(lhs, r3, r3, FIRST_SPEC_OBJECT_TYPE, ge); |
1477 __ b(ge, &return_not_equal); | 1520 __ bt(&return_not_equal); |
1478 | 1521 |
1479 // Check for oddballs: true, false, null, undefined. | 1522 // Check for oddballs: true, false, null, undefined. |
1480 __ cmp(r3, Operand(ODDBALL_TYPE)); | 1523 __ cmp(r3, Operand(ODDBALL_TYPE)); |
1481 __ b(eq, &return_not_equal); | 1524 __ b(eq, &return_not_equal); |
1482 | 1525 |
1483 // Now that we have the types we might as well check for symbol-symbol. | 1526 // Now that we have the types we might as well check for symbol-symbol. |
1484 // Ensure that no non-strings have the symbol bit set. | 1527 // Ensure that no non-strings have the symbol bit set. |
1485 STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask); | 1528 STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask); |
1486 STATIC_ASSERT(kSymbolTag != 0); | 1529 STATIC_ASSERT(kSymbolTag != 0); |
1487 __ and_(r2, r2, Operand(r3)); | 1530 __ land(r2, r2, r3); |
1488 __ tst(r2, Operand(kIsSymbolMask)); | 1531 __ tst(r2, Operand(kIsSymbolMask)); |
1489 __ b(ne, &return_not_equal); | 1532 __ b(ne, &return_not_equal); |
1490 } | 1533 } |
1491 | 1534 |
1492 | 1535 |
1493 // See comment at call site. | 1536 // See comment at call site. |
1494 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, | 1537 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, |
1495 Register lhs, | 1538 Register lhs, |
1496 Register rhs, | 1539 Register rhs, |
1497 Label* both_loaded_as_doubles, | 1540 Label* both_loaded_as_doubles, |
1498 Label* not_heap_numbers, | 1541 Label* not_heap_numbers, |
1499 Label* slow) { | 1542 Label* slow) { |
1500 ASSERT((lhs.is(r0) && rhs.is(r1)) || | 1543 ASSERT((lhs.is(r0) && rhs.is(r1)) || |
1501 (lhs.is(r1) && rhs.is(r0))); | 1544 (lhs.is(r1) && rhs.is(r0))); |
1502 | 1545 |
1503 __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE); | 1546 __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE, eq); |
1504 __ b(ne, not_heap_numbers); | 1547 __ b(ne, not_heap_numbers); |
1505 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset)); | 1548 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset)); |
1506 __ cmp(r2, r3); | 1549 __ cmp(r2, r3); |
1507 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case. | 1550 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case. |
1508 | 1551 |
1509 // Both are heap numbers. Load them up then jump to the code we have | 1552 // Both are heap numbers. Load them up then jump to the code we have |
1510 // for that. | 1553 // for that. |
1511 if (CpuFeatures::IsSupported(VFP2)) { | 1554 if (CpuFeatures::IsSupported(FPU)) { |
1512 CpuFeatures::Scope scope(VFP2); | 1555 __ sub(r7, rhs, Operand(kHeapObjectTag - HeapNumber::kValueOffset)); |
1513 __ sub(r7, rhs, Operand(kHeapObjectTag)); | 1556 __ dldr(dr0, MemOperand(r7, 0), r7); |
1514 __ vldr(d6, r7, HeapNumber::kValueOffset); | 1557 __ sub(r7, lhs, Operand(kHeapObjectTag - HeapNumber::kValueOffset)); |
1515 __ sub(r7, lhs, Operand(kHeapObjectTag)); | 1558 __ dldr(dr2, MemOperand(r7, 0), r7); |
1516 __ vldr(d7, r7, HeapNumber::kValueOffset); | |
1517 } else { | 1559 } else { |
1518 __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | 1560 __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); |
1519 __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | 1561 __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); |
1520 } | 1562 } |
1521 __ jmp(both_loaded_as_doubles); | 1563 __ jmp(both_loaded_as_doubles); |
1522 } | 1564 } |
1523 | 1565 |
1524 | 1566 |
1525 // Fast negative check for symbol-to-symbol equality. | 1567 // Fast negative check for symbol-to-symbol equality. |
1526 static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm, | 1568 static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm, |
1527 Register lhs, | 1569 Register lhs, |
1528 Register rhs, | 1570 Register rhs, |
1529 Label* possible_strings, | 1571 Label* possible_strings, |
1530 Label* not_both_strings) { | 1572 Label* not_both_strings) { |
1531 ASSERT((lhs.is(r0) && rhs.is(r1)) || | 1573 ASSERT((lhs.is(r0) && rhs.is(r1)) || |
1532 (lhs.is(r1) && rhs.is(r0))); | 1574 (lhs.is(r1) && rhs.is(r0))); |
1533 | 1575 |
1534 // r2 is object type of rhs. | 1576 // r2 is object type of rhs. |
1535 // Ensure that no non-strings have the symbol bit set. | 1577 // Ensure that no non-strings have the symbol bit set. |
1536 Label object_test; | 1578 Label object_test; |
1537 STATIC_ASSERT(kSymbolTag != 0); | 1579 STATIC_ASSERT(kSymbolTag != 0); |
1538 __ tst(r2, Operand(kIsNotStringMask)); | 1580 __ tst(r2, Operand(kIsNotStringMask)); |
1539 __ b(ne, &object_test); | 1581 __ b(ne, &object_test, Label::kNear); |
1540 __ tst(r2, Operand(kIsSymbolMask)); | 1582 __ tst(r2, Operand(kIsSymbolMask)); |
1541 __ b(eq, possible_strings); | 1583 __ b(eq, possible_strings); |
1542 __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE); | 1584 __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE, ge); |
1543 __ b(ge, not_both_strings); | 1585 __ bt(not_both_strings); |
1544 __ tst(r3, Operand(kIsSymbolMask)); | 1586 __ tst(r3, Operand(kIsSymbolMask)); |
1545 __ b(eq, possible_strings); | 1587 __ b(eq, possible_strings); |
1546 | 1588 |
1547 // Both are symbols. We already checked they weren't the same pointer | 1589 // Both are symbols. We already checked they weren't the same pointer |
1548 // so they are not equal. | 1590 // so they are not equal. |
1549 __ mov(r0, Operand(NOT_EQUAL)); | 1591 __ mov(r0, Operand(NOT_EQUAL)); |
1550 __ Ret(); | 1592 __ Ret(); |
1551 | 1593 |
1552 __ bind(&object_test); | 1594 __ bind(&object_test); |
1553 __ cmp(r2, Operand(FIRST_SPEC_OBJECT_TYPE)); | 1595 __ cmpge(r2, Operand(FIRST_SPEC_OBJECT_TYPE)); |
1554 __ b(lt, not_both_strings); | 1596 __ bf(not_both_strings); |
1555 __ CompareObjectType(lhs, r2, r3, FIRST_SPEC_OBJECT_TYPE); | 1597 __ CompareObjectType(lhs, r2, r3, FIRST_SPEC_OBJECT_TYPE, ge); |
1556 __ b(lt, not_both_strings); | 1598 __ bf(not_both_strings); |
1557 // If both objects are undetectable, they are equal. Otherwise, they | 1599 // If both objects are undetectable, they are equal. Otherwise, they |
1558 // are not equal, since they are different objects and an object is not | 1600 // are not equal, since they are different objects and an object is not |
1559 // equal to undefined. | 1601 // equal to undefined. |
1560 __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset)); | 1602 __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset)); |
1561 __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset)); | 1603 __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset)); |
1562 __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset)); | 1604 __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset)); |
1563 __ and_(r0, r2, Operand(r3)); | 1605 __ land(r0, r2, r3); |
1564 __ and_(r0, r0, Operand(1 << Map::kIsUndetectable)); | 1606 __ land(r0, r0, Operand(1 << Map::kIsUndetectable)); |
1565 __ eor(r0, r0, Operand(1 << Map::kIsUndetectable)); | 1607 __ eor(r0, r0, Operand(1 << Map::kIsUndetectable)); |
1566 __ Ret(); | 1608 __ Ret(); |
1567 } | 1609 } |
1568 | 1610 |
1569 | 1611 |
1570 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, | 1612 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, |
1571 Register object, | 1613 Register object, |
1572 Register result, | 1614 Register result, |
1573 Register scratch1, | 1615 Register scratch1, |
1574 Register scratch2, | 1616 Register scratch2, |
1575 Register scratch3, | 1617 Register scratch3, |
1576 bool object_is_smi, | 1618 bool object_is_smi, |
1577 Label* not_found) { | 1619 Label* not_found) { |
1578 // Use of registers. Register result is used as a temporary. | 1620 // Use of registers. Register result is used as a temporary. |
1579 Register number_string_cache = result; | 1621 Register number_string_cache = result; |
1580 Register mask = scratch3; | 1622 Register mask = scratch3; |
1581 | 1623 |
1582 // Load the number string cache. | 1624 // Load the number string cache. |
1583 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); | 1625 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); |
1584 | 1626 |
1585 // Make the hash mask from the length of the number string cache. It | 1627 // Make the hash mask from the length of the number string cache. It |
1586 // contains two elements (number and string) for each cache entry. | 1628 // contains two elements (number and string) for each cache entry. |
1587 __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset)); | 1629 __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset)); |
1588 // Divide length by two (length is a smi). | 1630 // Divide length by two (length is a smi). |
1589 __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1)); | 1631 __ asr(mask, mask, Operand(kSmiTagSize + 1)); |
1590 __ sub(mask, mask, Operand(1)); // Make mask. | 1632 __ sub(mask, mask, Operand(1)); // Make mask. |
1591 | 1633 |
1592 // Calculate the entry in the number string cache. The hash value in the | 1634 // Calculate the entry in the number string cache. The hash value in the |
1593 // number string cache for smis is just the smi value, and the hash for | 1635 // number string cache for smis is just the smi value, and the hash for |
1594 // doubles is the xor of the upper and lower words. See | 1636 // doubles is the xor of the upper and lower words. See |
1595 // Heap::GetNumberStringCache. | 1637 // Heap::GetNumberStringCache. |
1596 Isolate* isolate = masm->isolate(); | 1638 Isolate* isolate = masm->isolate(); |
1597 Label is_smi; | 1639 Label is_smi; |
1598 Label load_result_from_cache; | 1640 Label load_result_from_cache; |
1599 if (!object_is_smi) { | 1641 if (!object_is_smi) { |
1600 __ JumpIfSmi(object, &is_smi); | 1642 __ JumpIfSmi(object, &is_smi, Label::kNear); |
1601 if (CpuFeatures::IsSupported(VFP2)) { | 1643 |
1602 CpuFeatures::Scope scope(VFP2); | 1644 if (CpuFeatures::IsSupported(FPU)) { |
1603 __ CheckMap(object, | 1645 __ CheckMap(object, |
1604 scratch1, | 1646 scratch1, |
1605 Heap::kHeapNumberMapRootIndex, | 1647 Heap::kHeapNumberMapRootIndex, |
1606 not_found, | 1648 not_found, |
1607 DONT_DO_SMI_CHECK); | 1649 DONT_DO_SMI_CHECK); |
1608 | 1650 |
1609 STATIC_ASSERT(8 == kDoubleSize); | 1651 STATIC_ASSERT(8 == kDoubleSize); |
1610 __ add(scratch1, | 1652 __ add(scratch1, |
1611 object, | 1653 object, |
1612 Operand(HeapNumber::kValueOffset - kHeapObjectTag)); | 1654 Operand(HeapNumber::kValueOffset - kHeapObjectTag)); |
1613 __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit()); | 1655 __ ldr(scratch2, MemOperand(scratch1, 4)); |
1614 __ eor(scratch1, scratch1, Operand(scratch2)); | 1656 __ ldr(scratch1, MemOperand(scratch1, 0)); |
1615 __ and_(scratch1, scratch1, Operand(mask)); | 1657 |
| 1658 __ eor(scratch1, scratch1, scratch2); |
| 1659 __ land(scratch1, scratch1, mask); |
1616 | 1660 |
1617 // Calculate address of entry in string cache: each entry consists | 1661 // Calculate address of entry in string cache: each entry consists |
1618 // of two pointer sized fields. | 1662 // of two pointer sized fields. |
1619 __ add(scratch1, | 1663 __ lsl(scratch1, scratch1, Operand(kPointerSizeLog2 + 1)); |
1620 number_string_cache, | 1664 __ add(scratch1, number_string_cache, scratch1); |
1621 Operand(scratch1, LSL, kPointerSizeLog2 + 1)); | |
1622 | 1665 |
1623 Register probe = mask; | 1666 Register probe = mask; |
1624 __ ldr(probe, | 1667 __ ldr(probe, |
1625 FieldMemOperand(scratch1, FixedArray::kHeaderSize)); | 1668 FieldMemOperand(scratch1, FixedArray::kHeaderSize)); |
1626 __ JumpIfSmi(probe, not_found); | 1669 __ JumpIfSmi(probe, not_found); |
1627 __ sub(scratch2, object, Operand(kHeapObjectTag)); | 1670 __ sub(scratch2, object, Operand(kHeapObjectTag - |
1628 __ vldr(d0, scratch2, HeapNumber::kValueOffset); | 1671 HeapNumber::kValueOffset)); |
| 1672 __ dldr(dr0, MemOperand(scratch2, 0), scratch2); |
1629 __ sub(probe, probe, Operand(kHeapObjectTag)); | 1673 __ sub(probe, probe, Operand(kHeapObjectTag)); |
1630 __ vldr(d1, probe, HeapNumber::kValueOffset); | 1674 __ dldr(dr2, MemOperand(probe, HeapNumber::kValueOffset)); |
1631 __ VFPCompareAndSetFlags(d0, d1); | 1675 __ dcmpeq(dr0, dr2); |
1632 __ b(ne, not_found); // The cache did not contain this value. | 1676 __ b(ne, not_found); // The cache did not contain this value. |
1633 __ b(&load_result_from_cache); | 1677 __ b(&load_result_from_cache); |
1634 } else { | 1678 } else { |
1635 __ b(not_found); | 1679 __ b(not_found); |
1636 } | 1680 } |
1637 } | 1681 } |
1638 | 1682 |
1639 __ bind(&is_smi); | 1683 __ bind(&is_smi); |
1640 Register scratch = scratch1; | 1684 Register scratch = scratch1; |
1641 __ and_(scratch, mask, Operand(object, ASR, 1)); | 1685 __ asr(scratch, object, Operand(1)); |
| 1686 __ land(scratch, mask, scratch); |
1642 // Calculate address of entry in string cache: each entry consists | 1687 // Calculate address of entry in string cache: each entry consists |
1643 // of two pointer sized fields. | 1688 // of two pointer sized fields. |
1644 __ add(scratch, | 1689 __ lsl(scratch, scratch, Operand(kPointerSizeLog2 + 1)); |
1645 number_string_cache, | 1690 __ add(scratch, number_string_cache, scratch); |
1646 Operand(scratch, LSL, kPointerSizeLog2 + 1)); | |
1647 | 1691 |
1648 // Check if the entry is the smi we are looking for. | 1692 // Check if the entry is the smi we are looking for. |
1649 Register probe = mask; | 1693 Register probe = mask; |
1650 __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); | 1694 __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); |
1651 __ cmp(object, probe); | 1695 __ cmp(object, probe); |
1652 __ b(ne, not_found); | 1696 __ b(ne, not_found); |
1653 | 1697 |
1654 // Get the result from the cache. | 1698 // Get the result from the cache. |
1655 __ bind(&load_result_from_cache); | 1699 __ bind(&load_result_from_cache); |
1656 __ ldr(result, | 1700 __ ldr(result, |
1657 FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); | 1701 FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); |
1658 __ IncrementCounter(isolate->counters()->number_to_string_native(), | 1702 __ IncrementCounter(isolate->counters()->number_to_string_native(), |
1659 1, | 1703 1, |
1660 scratch1, | 1704 scratch1, |
1661 scratch2); | 1705 scratch2); |
1662 } | 1706 } |
1663 | 1707 |
1664 | 1708 |
1665 void NumberToStringStub::Generate(MacroAssembler* masm) { | 1709 void NumberToStringStub::Generate(MacroAssembler* masm) { |
| 1710 // Entry argument: on stack |
| 1711 // Exit in: r0 |
| 1712 |
1666 Label runtime; | 1713 Label runtime; |
1667 | 1714 |
1668 __ ldr(r1, MemOperand(sp, 0)); | 1715 __ ldr(r1, MemOperand(sp, 0)); |
1669 | 1716 |
1670 // Generate code to lookup number in the number string cache. | 1717 // Generate code to lookup number in the number string cache. |
1671 GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime); | 1718 GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime); |
1672 __ add(sp, sp, Operand(1 * kPointerSize)); | 1719 __ add(sp, sp, Operand(1 * kPointerSize)); |
1673 __ Ret(); | 1720 __ Ret(); |
1674 | 1721 |
1675 __ bind(&runtime); | 1722 __ bind(&runtime); |
1676 // Handle number to string in the runtime system if not found in the cache. | 1723 // Handle number to string in the runtime system if not found in the cache. |
1677 __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); | 1724 __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); |
1678 } | 1725 } |
1679 | 1726 |
1680 | 1727 |
1681 // On entry lhs_ and rhs_ are the values to be compared. | 1728 // On entry lhs_ and rhs_ are the values to be compared. |
1682 // On exit r0 is 0, positive or negative to indicate the result of | 1729 // On exit r0 is 0, positive or negative to indicate the result of |
1683 // the comparison. | 1730 // the comparison. |
1684 void CompareStub::Generate(MacroAssembler* masm) { | 1731 void CompareStub::Generate(MacroAssembler* masm) { |
1685 ASSERT((lhs_.is(r0) && rhs_.is(r1)) || | 1732 ASSERT((lhs_.is(r0) && rhs_.is(r1)) || |
1686 (lhs_.is(r1) && rhs_.is(r0))); | 1733 (lhs_.is(r1) && rhs_.is(r0))); |
1687 | 1734 |
1688 Label slow; // Call builtin. | 1735 Label slow; // Call builtin. |
1689 Label not_smis, both_loaded_as_doubles, lhs_not_nan; | 1736 Label not_smis, both_loaded_as_doubles, lhs_not_nan; |
1690 | 1737 |
1691 if (include_smi_compare_) { | 1738 if (include_smi_compare_) { |
1692 Label not_two_smis, smi_done; | 1739 Label not_two_smis, smi_done; |
1693 __ orr(r2, r1, r0); | 1740 __ orr(r2, r1, r0); |
1694 __ JumpIfNotSmi(r2, ¬_two_smis); | 1741 __ JumpIfNotSmi(r2, ¬_two_smis, Label::kNear); |
1695 __ mov(r1, Operand(r1, ASR, 1)); | 1742 __ asr(r1, r1, Operand(1)); |
1696 __ sub(r0, r1, Operand(r0, ASR, 1)); | 1743 __ asr(r0, r0, Operand(1)); |
| 1744 __ sub(r0, r1, r0); |
1697 __ Ret(); | 1745 __ Ret(); |
1698 __ bind(¬_two_smis); | 1746 __ bind(¬_two_smis); |
1699 } else if (FLAG_debug_code) { | 1747 } else if (FLAG_debug_code) { |
1700 __ orr(r2, r1, r0); | 1748 __ orr(r2, r1, r0); |
1701 __ tst(r2, Operand(kSmiTagMask)); | 1749 __ tst(r2, Operand(kSmiTagMask)); |
1702 __ Assert(ne, "CompareStub: unexpected smi operands."); | 1750 __ Assert(ne, "CompareStub: unexpected smi operands."); |
1703 } | 1751 } |
1704 | 1752 |
1705 // NOTICE! This code is only reached after a smi-fast-case check, so | 1753 // NOTICE! This code is only reached after a smi-fast-case check, so |
1706 // it is certain that at least one operand isn't a smi. | 1754 // it is certain that at least one operand isn't a smi. |
1707 | 1755 |
1708 // Handle the case where the objects are identical. Either returns the answer | 1756 // Handle the case where the objects are identical. Either returns the answer |
1709 // or goes to slow. Only falls through if the objects were not identical. | 1757 // or goes to slow. Only falls through if the objects were not identical. |
1710 EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_); | 1758 EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_); |
1711 | 1759 |
1712 // If either is a Smi (we know that not both are), then they can only | 1760 // If either is a Smi (we know that not both are), then they can only |
1713 // be strictly equal if the other is a HeapNumber. | 1761 // be strictly equal if the other is a HeapNumber. |
1714 STATIC_ASSERT(kSmiTag == 0); | 1762 STATIC_ASSERT(kSmiTag == 0); |
1715 ASSERT_EQ(0, Smi::FromInt(0)); | 1763 ASSERT_EQ(0, Smi::FromInt(0)); |
1716 __ and_(r2, lhs_, Operand(rhs_)); | 1764 __ land(r2, lhs_, rhs_); |
1717 __ JumpIfNotSmi(r2, ¬_smis); | 1765 __ JumpIfNotSmi(r2, ¬_smis); |
1718 // One operand is a smi. EmitSmiNonsmiComparison generates code that can: | 1766 // One operand is a smi. EmitSmiNonsmiComparison generates code that can: |
1719 // 1) Return the answer. | 1767 // 1) Return the answer. |
1720 // 2) Go to slow. | 1768 // 2) Go to slow. |
1721 // 3) Fall through to both_loaded_as_doubles. | 1769 // 3) Fall through to both_loaded_as_doubles. |
1722 // 4) Jump to lhs_not_nan. | 1770 // 4) Jump to lhs_not_nan. |
1723 // In cases 3 and 4 we have found out we were dealing with a number-number | 1771 // In cases 3 and 4 we have found out we were dealing with a number-number |
1724 // comparison. If VFP3 is supported the double values of the numbers have | 1772 // comparison. If FPU is supported the double values of the numbers have |
1725 // been loaded into d7 and d6. Otherwise, the double values have been loaded | 1773 // been loaded into dr2 and dr0. Otherwise, the double values have been |
1726 // into r0, r1, r2, and r3. | 1774 // loaded into r0, r1, r2, and r3. |
1727 EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_); | 1775 EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_); |
1728 | 1776 |
1729 __ bind(&both_loaded_as_doubles); | 1777 __ bind(&both_loaded_as_doubles); |
1730 // The arguments have been converted to doubles and stored in d6 and d7, if | 1778 // The arguments have been converted to doubles and stored in dr0 and dr2, if |
1731 // VFP3 is supported, or in r0, r1, r2, and r3. | 1779 // FPU is supported, or in r0, r1, r2, and r3. |
1732 Isolate* isolate = masm->isolate(); | 1780 Isolate* isolate = masm->isolate(); |
1733 if (CpuFeatures::IsSupported(VFP2)) { | 1781 if (CpuFeatures::IsSupported(FPU)) { |
1734 __ bind(&lhs_not_nan); | 1782 __ bind(&lhs_not_nan); |
1735 CpuFeatures::Scope scope(VFP2); | 1783 |
1736 Label no_nan; | 1784 // Test for NaN |
1737 // ARMv7 VFP3 instructions to implement double precision comparison. | |
1738 __ VFPCompareAndSetFlags(d7, d6); | |
1739 Label nan; | 1785 Label nan; |
1740 __ b(vs, &nan); | 1786 __ dcmpeq(dr0, dr0); |
1741 __ mov(r0, Operand(EQUAL), LeaveCC, eq); | 1787 __ bf_near(&nan); |
1742 __ mov(r0, Operand(LESS), LeaveCC, lt); | 1788 __ dcmpeq(dr2, dr2); |
1743 __ mov(r0, Operand(GREATER), LeaveCC, gt); | 1789 __ bf_near(&nan); |
1744 __ Ret(); | 1790 |
| 1791 // Test for eq, lt and gt |
| 1792 Label equal, greater; |
| 1793 __ dcmpeq(dr2, dr0); |
| 1794 __ bt_near(&equal); |
| 1795 __ dcmpgt(dr2, dr0); |
| 1796 __ bt_near(&greater); |
| 1797 |
| 1798 __ mov(r0, Operand(LESS)); |
| 1799 __ rts(); |
| 1800 |
| 1801 __ bind(&equal); |
| 1802 __ mov(r0, Operand(EQUAL)); |
| 1803 __ rts(); |
| 1804 |
| 1805 __ bind(&greater); |
| 1806 __ mov(r0, Operand(GREATER)); |
| 1807 __ rts(); |
1745 | 1808 |
1746 __ bind(&nan); | 1809 __ bind(&nan); |
1747 // If one of the sides was a NaN then the v flag is set. Load r0 with | 1810 // One of the sides was a NaN .Load r0 with whatever it takes to make the |
1748 // whatever it takes to make the comparison fail, since comparisons with NaN | 1811 // comparison fail, since comparisons with NaN always fail. |
1749 // always fail. | |
1750 if (cc_ == lt || cc_ == le) { | 1812 if (cc_ == lt || cc_ == le) { |
1751 __ mov(r0, Operand(GREATER)); | 1813 __ mov(r0, Operand(GREATER)); |
1752 } else { | 1814 } else { |
1753 __ mov(r0, Operand(LESS)); | 1815 __ mov(r0, Operand(LESS)); |
1754 } | 1816 } |
1755 __ Ret(); | 1817 __ Ret(); |
1756 } else { | 1818 } else { |
1757 // Checks for NaN in the doubles we have loaded. Can return the answer or | 1819 // Checks for NaN in the doubles we have loaded. Can return the answer or |
1758 // fall through if neither is a NaN. Also binds lhs_not_nan. | 1820 // fall through if neither is a NaN. Also binds lhs_not_nan. |
1759 EmitNanCheck(masm, &lhs_not_nan, cc_); | 1821 EmitNanCheck(masm, &lhs_not_nan, cc_); |
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1874 __ JumpIfSmi(tos_, &patch); | 1936 __ JumpIfSmi(tos_, &patch); |
1875 } | 1937 } |
1876 | 1938 |
1877 if (types_.NeedsMap()) { | 1939 if (types_.NeedsMap()) { |
1878 __ ldr(map, FieldMemOperand(tos_, HeapObject::kMapOffset)); | 1940 __ ldr(map, FieldMemOperand(tos_, HeapObject::kMapOffset)); |
1879 | 1941 |
1880 if (types_.CanBeUndetectable()) { | 1942 if (types_.CanBeUndetectable()) { |
1881 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset)); | 1943 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset)); |
1882 __ tst(ip, Operand(1 << Map::kIsUndetectable)); | 1944 __ tst(ip, Operand(1 << Map::kIsUndetectable)); |
1883 // Undetectable -> false. | 1945 // Undetectable -> false. |
1884 __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne); | 1946 Label skip; |
1885 __ Ret(ne); | 1947 __ bt_near(&skip); |
| 1948 __ mov(tos_, Operand(0, RelocInfo::NONE)); |
| 1949 __ rts(); |
| 1950 __ bind(&skip); |
1886 } | 1951 } |
1887 } | 1952 } |
1888 | 1953 |
1889 if (types_.Contains(SPEC_OBJECT)) { | 1954 if (types_.Contains(SPEC_OBJECT)) { |
1890 // Spec object -> true. | 1955 // Spec object -> true. |
1891 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE); | 1956 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE, ge); |
1892 // tos_ contains the correct non-zero return value already. | 1957 // tos_ contains the correct non-zero return value already. |
1893 __ Ret(ge); | 1958 __ Ret(eq); |
1894 } | 1959 } |
1895 | 1960 |
1896 if (types_.Contains(STRING)) { | 1961 if (types_.Contains(STRING)) { |
1897 // String value -> false iff empty. | 1962 // String value -> false iff empty. |
1898 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); | 1963 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE, ge); |
1899 __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset), lt); | 1964 Label skip; |
1900 __ Ret(lt); // the string length is OK as the return value | 1965 __ bt_near(&skip); |
| 1966 __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset)); |
| 1967 __ rts(); // the string length is OK as the return value |
| 1968 __ bind(&skip); |
1901 } | 1969 } |
1902 | 1970 |
1903 if (types_.Contains(HEAP_NUMBER)) { | 1971 if (types_.Contains(HEAP_NUMBER)) { |
1904 // Heap number -> false iff +0, -0, or NaN. | 1972 // Heap number -> false iff +0, -0, or NaN. |
1905 Label not_heap_number; | 1973 Label not_heap_number; |
1906 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); | 1974 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); |
1907 __ b(ne, ¬_heap_number); | 1975 __ bf(¬_heap_number); |
1908 | 1976 |
1909 if (CpuFeatures::IsSupported(VFP2)) { | 1977 if (CpuFeatures::IsSupported(FPU)) { |
1910 CpuFeatures::Scope scope(VFP2); | 1978 __ dldr(dr0, FieldMemOperand(tos_, HeapNumber::kValueOffset)); |
1911 | 1979 // "tos_" is a register, and contains a non zero value by default. |
1912 __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset)); | 1980 // Hence we only need to overwrite "tos_" with zero to return false for |
1913 __ VFPCompareAndSetFlags(d1, 0.0); | 1981 // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. |
1914 // "tos_" is a register, and contains a non zero value by default. | 1982 __ dfloat(dr2, Operand(0)); |
1915 // Hence we only need to overwrite "tos_" with zero to return false for | 1983 __ dcmpeq(dr0, dr2); |
1916 // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. | 1984 __ mov(tos_, Operand(0, RelocInfo::NONE), eq); // for FP_ZERO |
1917 __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); // for FP_ZERO | 1985 __ dcmpeq(dr0, dr0); |
1918 __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN | 1986 // for FP_NAN (dr0 != dr0 iff isnan(dr0)) |
1919 } else { | 1987 __ mov(tos_, Operand(0, RelocInfo::NONE), ne); |
1920 Label done, not_nan, not_zero; | 1988 } else { |
1921 __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset)); | 1989 UNIMPLEMENTED(); |
1922 // -0 maps to false: | 1990 } |
1923 __ bic( | |
1924 temp, temp, Operand(HeapNumber::kSignMask, RelocInfo::NONE), SetCC); | |
1925 __ b(ne, ¬_zero); | |
1926 // If exponent word is zero then the answer depends on the mantissa word. | |
1927 __ ldr(tos_, FieldMemOperand(tos_, HeapNumber::kMantissaOffset)); | |
1928 __ jmp(&done); | |
1929 | |
1930 // Check for NaN. | |
1931 __ bind(¬_zero); | |
1932 // We already zeroed the sign bit, now shift out the mantissa so we only | |
1933 // have the exponent left. | |
1934 __ mov(temp, Operand(temp, LSR, HeapNumber::kMantissaBitsInTopWord)); | |
1935 unsigned int shifted_exponent_mask = | |
1936 HeapNumber::kExponentMask >> HeapNumber::kMantissaBitsInTopWord; | |
1937 __ cmp(temp, Operand(shifted_exponent_mask, RelocInfo::NONE)); | |
1938 __ b(ne, ¬_nan); // If exponent is not 0x7ff then it can't be a NaN. | |
1939 | |
1940 // Reload exponent word. | |
1941 __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset)); | |
1942 __ tst(temp, Operand(HeapNumber::kMantissaMask, RelocInfo::NONE)); | |
1943 // If mantissa is not zero then we have a NaN, so return 0. | |
1944 __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne); | |
1945 __ b(ne, &done); | |
1946 | |
1947 // Load mantissa word. | |
1948 __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kMantissaOffset)); | |
1949 __ cmp(temp, Operand(0, RelocInfo::NONE)); | |
1950 // If mantissa is not zero then we have a NaN, so return 0. | |
1951 __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne); | |
1952 __ b(ne, &done); | |
1953 | |
1954 __ bind(¬_nan); | |
1955 __ mov(tos_, Operand(1, RelocInfo::NONE)); | |
1956 __ bind(&done); | |
1957 } | |
1958 __ Ret(); | 1991 __ Ret(); |
1959 __ bind(¬_heap_number); | 1992 __ bind(¬_heap_number); |
1960 } | 1993 } |
1961 | 1994 |
1962 __ bind(&patch); | 1995 __ bind(&patch); |
1963 GenerateTypeTransition(masm); | 1996 GenerateTypeTransition(masm); |
1964 } | 1997 } |
1965 | 1998 |
1966 | 1999 |
1967 void ToBooleanStub::CheckOddball(MacroAssembler* masm, | 2000 void ToBooleanStub::CheckOddball(MacroAssembler* masm, |
1968 Type type, | 2001 Type type, |
1969 Heap::RootListIndex value, | 2002 Heap::RootListIndex value, |
1970 bool result) { | 2003 bool result) { |
1971 if (types_.Contains(type)) { | 2004 if (types_.Contains(type)) { |
1972 // If we see an expected oddball, return its ToBoolean value tos_. | 2005 // If we see an expected oddball, return its ToBoolean value tos_. |
1973 __ LoadRoot(ip, value); | 2006 __ LoadRoot(ip, value); |
1974 __ cmp(tos_, ip); | 2007 __ cmp(tos_, ip); |
1975 // The value of a root is never NULL, so we can avoid loading a non-null | 2008 // The value of a root is never NULL, so we can avoid loading a non-null |
1976 // value into tos_ when we want to return 'true'. | 2009 // value into tos_ when we want to return 'true'. |
1977 if (!result) { | 2010 if (!result) { |
1978 __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); | 2011 __ mov(tos_, Operand(0, RelocInfo::NONE), eq); |
1979 } | 2012 } |
1980 __ Ret(eq); | 2013 __ Ret(eq); |
1981 } | 2014 } |
1982 } | 2015 } |
1983 | 2016 |
1984 | 2017 |
1985 void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) { | 2018 void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) { |
1986 if (!tos_.is(r3)) { | 2019 if (!tos_.is(r3)) { |
1987 __ mov(r3, Operand(tos_)); | 2020 __ mov(r3, tos_); |
1988 } | 2021 } |
1989 __ mov(r2, Operand(Smi::FromInt(tos_.code()))); | 2022 __ mov(r2, Operand(Smi::FromInt(tos_.code()))); |
1990 __ mov(r1, Operand(Smi::FromInt(types_.ToByte()))); | 2023 __ mov(r1, Operand(Smi::FromInt(types_.ToByte()))); |
1991 __ Push(r3, r2, r1); | 2024 __ Push(r3, r2, r1); |
1992 // Patch the caller to an appropriate specialized stub and return the | 2025 // Patch the caller to an appropriate specialized stub and return the |
1993 // operation result to the caller of the stub. | 2026 // operation result to the caller of the stub. |
1994 __ TailCallExternalReference( | 2027 __ TailCallExternalReference( |
1995 ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()), | 2028 ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()), |
1996 3, | 2029 3, |
1997 1); | 2030 1); |
1998 } | 2031 } |
1999 | 2032 |
2000 | 2033 |
2001 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { | 2034 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { |
2002 // We don't allow a GC during a store buffer overflow so there is no need to | 2035 // We don't allow a GC during a store buffer overflow so there is no need to |
2003 // store the registers in any particular way, but we do have to store and | 2036 // store the registers in any particular way, but we do have to store and |
2004 // restore them. | 2037 // restore them. |
2005 __ stm(db_w, sp, kCallerSaved | lr.bit()); | 2038 __ pushm(kJSCallerSaved); |
| 2039 __ push(pr); |
2006 if (save_doubles_ == kSaveFPRegs) { | 2040 if (save_doubles_ == kSaveFPRegs) { |
2007 CpuFeatures::Scope scope(VFP2); | 2041 UNIMPLEMENTED(); |
2008 __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters)); | |
2009 for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { | |
2010 DwVfpRegister reg = DwVfpRegister::from_code(i); | |
2011 __ vstr(reg, MemOperand(sp, i * kDoubleSize)); | |
2012 } | |
2013 } | 2042 } |
2014 const int argument_count = 1; | 2043 const int argument_count = 1; |
2015 const int fp_argument_count = 0; | 2044 const int fp_argument_count = 0; |
2016 const Register scratch = r1; | 2045 const Register scratch = r1; |
2017 | 2046 |
2018 AllowExternalCallThatCantCauseGC scope(masm); | 2047 AllowExternalCallThatCantCauseGC scope(masm); |
2019 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); | 2048 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); |
2020 __ mov(r0, Operand(ExternalReference::isolate_address())); | 2049 __ mov(r0, Operand(ExternalReference::isolate_address())); |
2021 __ CallCFunction( | 2050 __ CallCFunction( |
2022 ExternalReference::store_buffer_overflow_function(masm->isolate()), | 2051 ExternalReference::store_buffer_overflow_function(masm->isolate()), |
2023 argument_count); | 2052 argument_count); |
2024 if (save_doubles_ == kSaveFPRegs) { | 2053 if (save_doubles_ == kSaveFPRegs) { |
2025 CpuFeatures::Scope scope(VFP2); | 2054 UNIMPLEMENTED(); |
2026 for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { | |
2027 DwVfpRegister reg = DwVfpRegister::from_code(i); | |
2028 __ vldr(reg, MemOperand(sp, i * kDoubleSize)); | |
2029 } | |
2030 __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters)); | |
2031 } | 2055 } |
2032 __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0). | 2056 __ pop(pr); |
| 2057 __ popm(kJSCallerSaved); |
| 2058 __ rts(); |
2033 } | 2059 } |
2034 | 2060 |
2035 | 2061 |
2036 void UnaryOpStub::PrintName(StringStream* stream) { | 2062 void UnaryOpStub::PrintName(StringStream* stream) { |
2037 const char* op_name = Token::Name(op_); | 2063 const char* op_name = Token::Name(op_); |
2038 const char* overwrite_name = NULL; // Make g++ happy. | 2064 const char* overwrite_name = NULL; // Make g++ happy. |
2039 switch (mode_) { | 2065 switch (mode_) { |
2040 case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break; | 2066 case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break; |
2041 case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break; | 2067 case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break; |
2042 } | 2068 } |
(...skipping 17 matching lines...) Expand all Loading... |
2060 GenerateHeapNumberStub(masm); | 2086 GenerateHeapNumberStub(masm); |
2061 break; | 2087 break; |
2062 case UnaryOpIC::GENERIC: | 2088 case UnaryOpIC::GENERIC: |
2063 GenerateGenericStub(masm); | 2089 GenerateGenericStub(masm); |
2064 break; | 2090 break; |
2065 } | 2091 } |
2066 } | 2092 } |
2067 | 2093 |
2068 | 2094 |
2069 void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { | 2095 void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
2070 __ mov(r3, Operand(r0)); // the operand | 2096 __ mov(r3, r0); // the operand |
2071 __ mov(r2, Operand(Smi::FromInt(op_))); | 2097 __ mov(r2, Operand(Smi::FromInt(op_))); |
2072 __ mov(r1, Operand(Smi::FromInt(mode_))); | 2098 __ mov(r1, Operand(Smi::FromInt(mode_))); |
2073 __ mov(r0, Operand(Smi::FromInt(operand_type_))); | 2099 __ mov(r0, Operand(Smi::FromInt(operand_type_))); |
2074 __ Push(r3, r2, r1, r0); | 2100 __ Push(r3, r2, r1, r0); |
2075 | 2101 |
2076 __ TailCallExternalReference( | 2102 __ TailCallExternalReference( |
2077 ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1); | 2103 ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1); |
2078 } | 2104 } |
2079 | 2105 |
2080 | 2106 |
(...skipping 28 matching lines...) Expand all Loading... |
2109 GenerateTypeTransition(masm); | 2135 GenerateTypeTransition(masm); |
2110 } | 2136 } |
2111 | 2137 |
2112 | 2138 |
2113 void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm, | 2139 void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm, |
2114 Label* non_smi, | 2140 Label* non_smi, |
2115 Label* slow) { | 2141 Label* slow) { |
2116 __ JumpIfNotSmi(r0, non_smi); | 2142 __ JumpIfNotSmi(r0, non_smi); |
2117 | 2143 |
2118 // The result of negating zero or the smallest negative smi is not a smi. | 2144 // The result of negating zero or the smallest negative smi is not a smi. |
2119 __ bic(ip, r0, Operand(0x80000000), SetCC); | 2145 __ bic(ip, r0, Operand(0x80000000)); |
| 2146 __ tst(ip, ip); |
2120 __ b(eq, slow); | 2147 __ b(eq, slow); |
2121 | 2148 |
2122 // Return '0 - value'. | 2149 // Return '0 - value'. |
2123 __ rsb(r0, r0, Operand(0, RelocInfo::NONE)); | 2150 __ rsb(r0, r0, Operand(0, RelocInfo::NONE)); |
2124 __ Ret(); | 2151 __ Ret(); |
2125 } | 2152 } |
2126 | 2153 |
2127 | 2154 |
2128 void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm, | 2155 void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm, |
2129 Label* non_smi) { | 2156 Label* non_smi) { |
2130 __ JumpIfNotSmi(r0, non_smi); | 2157 __ JumpIfNotSmi(r0, non_smi); |
2131 | 2158 |
2132 // Flip bits and revert inverted smi-tag. | 2159 // Flip bits and revert inverted smi-tag. |
2133 __ mvn(r0, Operand(r0)); | 2160 __ mvn(r0, r0); |
2134 __ bic(r0, r0, Operand(kSmiTagMask)); | 2161 __ bic(r0, r0, Operand(kSmiTagMask)); |
2135 __ Ret(); | 2162 __ Ret(); |
2136 } | 2163 } |
2137 | 2164 |
2138 | 2165 |
2139 // TODO(svenpanne): Use virtual functions instead of switch. | 2166 // TODO(svenpanne): Use virtual functions instead of switch. |
2140 void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { | 2167 void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { |
2141 switch (op_) { | 2168 switch (op_) { |
2142 case Token::SUB: | 2169 case Token::SUB: |
2143 GenerateHeapNumberStubSub(masm); | 2170 GenerateHeapNumberStubSub(masm); |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2176 Label* slow) { | 2203 Label* slow) { |
2177 EmitCheckForHeapNumber(masm, r0, r1, r6, slow); | 2204 EmitCheckForHeapNumber(masm, r0, r1, r6, slow); |
2178 // r0 is a heap number. Get a new heap number in r1. | 2205 // r0 is a heap number. Get a new heap number in r1. |
2179 if (mode_ == UNARY_OVERWRITE) { | 2206 if (mode_ == UNARY_OVERWRITE) { |
2180 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | 2207 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); |
2181 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. | 2208 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. |
2182 __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | 2209 __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); |
2183 } else { | 2210 } else { |
2184 Label slow_allocate_heapnumber, heapnumber_allocated; | 2211 Label slow_allocate_heapnumber, heapnumber_allocated; |
2185 __ AllocateHeapNumber(r1, r2, r3, r6, &slow_allocate_heapnumber); | 2212 __ AllocateHeapNumber(r1, r2, r3, r6, &slow_allocate_heapnumber); |
2186 __ jmp(&heapnumber_allocated); | 2213 __ jmp_near(&heapnumber_allocated); |
2187 | 2214 |
2188 __ bind(&slow_allocate_heapnumber); | 2215 __ bind(&slow_allocate_heapnumber); |
2189 { | 2216 { |
2190 FrameScope scope(masm, StackFrame::INTERNAL); | 2217 FrameScope scope(masm, StackFrame::INTERNAL); |
2191 __ push(r0); | 2218 __ push(r0); |
2192 __ CallRuntime(Runtime::kNumberAlloc, 0); | 2219 __ CallRuntime(Runtime::kNumberAlloc, 0); |
2193 __ mov(r1, Operand(r0)); | 2220 __ mov(r1, r0); |
2194 __ pop(r0); | 2221 __ pop(r0); |
2195 } | 2222 } |
2196 | 2223 |
2197 __ bind(&heapnumber_allocated); | 2224 __ bind(&heapnumber_allocated); |
2198 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); | 2225 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); |
2199 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | 2226 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); |
2200 __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset)); | 2227 __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset)); |
2201 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. | 2228 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. |
2202 __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset)); | 2229 __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset)); |
2203 __ mov(r0, Operand(r1)); | 2230 __ mov(r0, r1); |
2204 } | 2231 } |
2205 __ Ret(); | 2232 __ Ret(); |
2206 } | 2233 } |
2207 | 2234 |
2208 | 2235 |
2209 void UnaryOpStub::GenerateHeapNumberCodeBitNot( | 2236 void UnaryOpStub::GenerateHeapNumberCodeBitNot( |
2210 MacroAssembler* masm, Label* slow) { | 2237 MacroAssembler* masm, Label* slow) { |
2211 Label impossible; | 2238 Label impossible; |
2212 | 2239 |
2213 EmitCheckForHeapNumber(masm, r0, r1, r6, slow); | 2240 EmitCheckForHeapNumber(masm, r0, r1, r6, slow); |
2214 // Convert the heap number is r0 to an untagged integer in r1. | 2241 // Convert the heap number is r0 to an untagged integer in r1. |
2215 __ ConvertToInt32(r0, r1, r2, r3, d0, slow); | 2242 __ ConvertToInt32(r0, r1, r2, r3, dr0, slow); |
2216 | 2243 |
2217 // Do the bitwise operation and check if the result fits in a smi. | 2244 // Do the bitwise operation and check if the result fits in a smi. |
2218 Label try_float; | 2245 Label try_float; |
2219 __ mvn(r1, Operand(r1)); | 2246 __ mvn(r1, r1); |
2220 __ add(r2, r1, Operand(0x40000000), SetCC); | 2247 __ add(r2, r1, Operand(0x40000000)); |
2221 __ b(mi, &try_float); | 2248 __ cmpge(r2, Operand(0)); |
| 2249 __ bf(&try_float); |
2222 | 2250 |
2223 // Tag the result as a smi and we're done. | 2251 // Tag the result as a smi and we're done. |
2224 __ mov(r0, Operand(r1, LSL, kSmiTagSize)); | 2252 __ lsl(r0, r1, Operand(kSmiTagSize)); |
2225 __ Ret(); | 2253 __ Ret(); |
2226 | 2254 |
2227 // Try to store the result in a heap number. | 2255 // Try to store the result in a heap number. |
2228 __ bind(&try_float); | 2256 __ bind(&try_float); |
2229 if (mode_ == UNARY_NO_OVERWRITE) { | 2257 if (mode_ == UNARY_NO_OVERWRITE) { |
2230 Label slow_allocate_heapnumber, heapnumber_allocated; | 2258 Label slow_allocate_heapnumber, heapnumber_allocated; |
2231 // Allocate a new heap number without zapping r0, which we need if it fails. | 2259 // Allocate a new heap number without zapping r0, which we need if it fails. |
2232 __ AllocateHeapNumber(r2, r3, r4, r6, &slow_allocate_heapnumber); | 2260 __ AllocateHeapNumber(r2, r3, r4, r6, &slow_allocate_heapnumber); |
2233 __ jmp(&heapnumber_allocated); | 2261 __ jmp(&heapnumber_allocated); |
2234 | 2262 |
2235 __ bind(&slow_allocate_heapnumber); | 2263 __ bind(&slow_allocate_heapnumber); |
2236 { | 2264 { |
2237 FrameScope scope(masm, StackFrame::INTERNAL); | 2265 FrameScope scope(masm, StackFrame::INTERNAL); |
2238 __ push(r0); // Push the heap number, not the untagged int32. | 2266 __ push(r0); // Push the heap number, not the untagged int32. |
2239 __ CallRuntime(Runtime::kNumberAlloc, 0); | 2267 __ CallRuntime(Runtime::kNumberAlloc, 0); |
2240 __ mov(r2, r0); // Move the new heap number into r2. | 2268 __ mov(r2, r0); // Move the new heap number into r2. |
2241 // Get the heap number into r0, now that the new heap number is in r2. | 2269 // Get the heap number into r0, now that the new heap number is in r2. |
2242 __ pop(r0); | 2270 __ pop(r0); |
2243 } | 2271 } |
2244 | 2272 |
2245 // Convert the heap number in r0 to an untagged integer in r1. | 2273 // Convert the heap number in r0 to an untagged integer in r1. |
2246 // This can't go slow-case because it's the same number we already | 2274 // This can't go slow-case because it's the same number we already |
2247 // converted once again. | 2275 // converted once again. |
2248 __ ConvertToInt32(r0, r1, r3, r4, d0, &impossible); | 2276 __ ConvertToInt32(r0, r1, r3, r4, dr0, &impossible); |
2249 __ mvn(r1, Operand(r1)); | 2277 __ mvn(r1, r1); |
2250 | 2278 |
2251 __ bind(&heapnumber_allocated); | 2279 __ bind(&heapnumber_allocated); |
2252 __ mov(r0, r2); // Move newly allocated heap number to r0. | 2280 __ mov(r0, r2); // Move newly allocated heap number to r0. |
2253 } | 2281 } |
2254 | 2282 |
2255 if (CpuFeatures::IsSupported(VFP2)) { | 2283 if (CpuFeatures::IsSupported(FPU)) { |
2256 // Convert the int32 in r1 to the heap number in r0. r2 is corrupted. | 2284 // Convert the int32 in r1 to the heap number in r0. r2 is corrupted. |
2257 CpuFeatures::Scope scope(VFP2); | 2285 __ dfloat(dr0, r1); |
2258 __ vmov(s0, r1); | 2286 __ sub(r2, r0, Operand(kHeapObjectTag - HeapNumber::kValueOffset)); |
2259 __ vcvt_f64_s32(d0, s0); | 2287 __ dstr(dr0, MemOperand(r2, 0), r2); |
2260 __ sub(r2, r0, Operand(kHeapObjectTag)); | 2288 __ rts(); |
2261 __ vstr(d0, r2, HeapNumber::kValueOffset); | |
2262 __ Ret(); | |
2263 } else { | 2289 } else { |
2264 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not | 2290 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not |
2265 // have to set up a frame. | 2291 // have to set up a frame. |
2266 WriteInt32ToHeapNumberStub stub(r1, r0, r2); | 2292 WriteInt32ToHeapNumberStub stub(r1, r0, r2); |
2267 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); | 2293 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); |
2268 } | 2294 } |
2269 | 2295 |
2270 __ bind(&impossible); | 2296 __ bind(&impossible); |
2271 if (FLAG_debug_code) { | 2297 if (FLAG_debug_code) { |
2272 __ stop("Incorrect assumption in bit-not stub"); | 2298 __ stop("Incorrect assumption in bit-not stub"); |
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2338 __ TailCallExternalReference( | 2364 __ TailCallExternalReference( |
2339 ExternalReference(IC_Utility(IC::kBinaryOp_Patch), | 2365 ExternalReference(IC_Utility(IC::kBinaryOp_Patch), |
2340 masm->isolate()), | 2366 masm->isolate()), |
2341 5, | 2367 5, |
2342 1); | 2368 1); |
2343 } | 2369 } |
2344 | 2370 |
2345 | 2371 |
2346 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( | 2372 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( |
2347 MacroAssembler* masm) { | 2373 MacroAssembler* masm) { |
2348 UNIMPLEMENTED(); | 2374 __ UNIMPLEMENTED_BREAK(); |
2349 } | 2375 } |
2350 | 2376 |
2351 | 2377 |
2352 void BinaryOpStub::Generate(MacroAssembler* masm) { | 2378 void BinaryOpStub::Generate(MacroAssembler* masm) { |
2353 // Explicitly allow generation of nested stubs. It is safe here because | 2379 // Explicitly allow generation of nested stubs. It is safe here because |
2354 // generation code does not use any raw pointers. | 2380 // generation code does not use any raw pointers. |
2355 AllowStubCallsScope allow_stub_calls(masm, true); | 2381 AllowStubCallsScope allow_stub_calls(masm, true); |
2356 | 2382 |
2357 switch (operands_type_) { | 2383 switch (operands_type_) { |
2358 case BinaryOpIC::UNINITIALIZED: | 2384 case BinaryOpIC::UNINITIALIZED: |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2403 | 2429 |
2404 void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) { | 2430 void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) { |
2405 Register left = r1; | 2431 Register left = r1; |
2406 Register right = r0; | 2432 Register right = r0; |
2407 Register scratch1 = r7; | 2433 Register scratch1 = r7; |
2408 Register scratch2 = r9; | 2434 Register scratch2 = r9; |
2409 | 2435 |
2410 ASSERT(right.is(r0)); | 2436 ASSERT(right.is(r0)); |
2411 STATIC_ASSERT(kSmiTag == 0); | 2437 STATIC_ASSERT(kSmiTag == 0); |
2412 | 2438 |
2413 Label not_smi_result; | 2439 Label not_smi_result, skip_if_true, skip_if_false; |
2414 switch (op_) { | 2440 switch (op_) { |
2415 case Token::ADD: | 2441 case Token::ADD: |
2416 __ add(right, left, Operand(right), SetCC); // Add optimistically. | 2442 __ addv(right, left, right); // Add optimistically. |
2417 __ Ret(vc); | 2443 __ Ret(f); // Return if no overflow |
2418 __ sub(right, right, Operand(left)); // Revert optimistic add. | 2444 __ sub(right, right, left); // Revert optimistic add. |
2419 break; | 2445 break; |
2420 case Token::SUB: | 2446 case Token::SUB: |
2421 __ sub(right, left, Operand(right), SetCC); // Subtract optimistically. | 2447 __ subv(right, left, right); // Subtract optimistically. |
2422 __ Ret(vc); | 2448 __ Ret(f); // Return if no overflow |
2423 __ sub(right, left, Operand(right)); // Revert optimistic subtract. | 2449 __ sub(right, left, right); // Revert optimistic subtract. |
2424 break; | 2450 break; |
2425 case Token::MUL: | 2451 case Token::MUL: |
| 2452 // TODO(stm): implement optimized multiply with overflow check for SH4 |
2426 // Remove tag from one of the operands. This way the multiplication result | 2453 // Remove tag from one of the operands. This way the multiplication result |
2427 // will be a smi if it fits the smi range. | 2454 // will be a smi if it fits the smi range. |
2428 __ SmiUntag(ip, right); | 2455 __ SmiUntag(ip, right); |
2429 // Do multiplication | 2456 // Do multiplication |
2430 // scratch1 = lower 32 bits of ip * left. | 2457 // scratch1 = lower 32 bits of ip * left. |
2431 // scratch2 = higher 32 bits of ip * left. | 2458 // scratch2 = higher 32 bits of ip * left. |
2432 __ smull(scratch1, scratch2, left, ip); | 2459 __ dmuls(scratch1, scratch2, left, ip); |
2433 // Check for overflowing the smi range - no overflow if higher 33 bits of | 2460 // Check for overflowing the smi range - no overflow if higher 33 bits of |
2434 // the result are identical. | 2461 // the result are identical. |
2435 __ mov(ip, Operand(scratch1, ASR, 31)); | 2462 __ asr(ip, scratch1, Operand(31)); |
2436 __ cmp(ip, Operand(scratch2)); | 2463 __ cmp(ip, scratch2); |
2437 __ b(ne, ¬_smi_result); | 2464 __ b(ne, ¬_smi_result); |
2438 // Go slow on zero result to handle -0. | 2465 // Go slow on zero result to handle -0. |
2439 __ cmp(scratch1, Operand(0)); | 2466 __ tst(scratch1, scratch1); |
2440 __ mov(right, Operand(scratch1), LeaveCC, ne); | 2467 __ bt_near(&skip_if_true); |
2441 __ Ret(ne); | 2468 __ mov(right, scratch1); |
| 2469 __ rts(); |
| 2470 __ bind(&skip_if_true); |
2442 // We need -0 if we were multiplying a negative number with 0 to get 0. | 2471 // We need -0 if we were multiplying a negative number with 0 to get 0. |
2443 // We know one of them was zero. | 2472 // We know one of them was zero. |
2444 __ add(scratch2, right, Operand(left), SetCC); | 2473 __ add(scratch2, right, left); |
2445 __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl); | 2474 __ cmpge(scratch2, Operand(0)); |
2446 __ Ret(pl); // Return smi 0 if the non-zero one was positive. | 2475 __ bf_near(&skip_if_false); |
| 2476 __ mov(right, Operand(Smi::FromInt(0))); |
| 2477 __ rts(); // Return smi 0 if the non-zero one was positive. |
| 2478 __ bind(&skip_if_false); |
2447 // We fall through here if we multiplied a negative number with 0, because | 2479 // We fall through here if we multiplied a negative number with 0, because |
2448 // that would mean we should produce -0. | 2480 // that would mean we should produce -0. |
2449 break; | 2481 break; |
2450 case Token::DIV: | 2482 case Token::DIV: |
2451 // Check for power of two on the right hand side. | 2483 // Check for power of two on the right hand side. |
2452 __ JumpIfNotPowerOfTwoOrZero(right, scratch1, ¬_smi_result); | 2484 __ JumpIfNotPowerOfTwoOrZero(right, scratch1, ¬_smi_result); |
2453 // Check for positive and no remainder (scratch1 contains right - 1). | 2485 // Check for positive and no remainder (scratch1 contains right - 1). |
2454 __ orr(scratch2, scratch1, Operand(0x80000000u)); | 2486 __ orr(scratch2, scratch1, Operand(0x80000000u)); |
2455 __ tst(left, scratch2); | 2487 __ tst(left, scratch2); |
2456 __ b(ne, ¬_smi_result); | 2488 __ b(ne, ¬_smi_result); |
2457 | 2489 |
2458 // Perform division by shifting. | 2490 // Perform division by shifting. |
2459 __ CountLeadingZeros(scratch1, scratch1, scratch2); | 2491 __ CountLeadingZeros(scratch1, scratch1, scratch2); |
2460 __ rsb(scratch1, scratch1, Operand(31)); | 2492 __ rsb(scratch1, scratch1, Operand(31)); |
2461 __ mov(right, Operand(left, LSR, scratch1)); | 2493 __ lsr(right, left, scratch1); |
2462 __ Ret(); | 2494 __ Ret(); |
2463 break; | 2495 break; |
2464 case Token::MOD: | 2496 case Token::MOD: |
2465 // Check for two positive smis. | 2497 // Check for two positive smis. |
2466 __ orr(scratch1, left, Operand(right)); | 2498 __ orr(scratch1, left, right); |
2467 __ tst(scratch1, Operand(0x80000000u | kSmiTagMask)); | 2499 __ tst(scratch1, Operand(0x80000000u | kSmiTagMask)); |
2468 __ b(ne, ¬_smi_result); | 2500 __ b(ne, ¬_smi_result); |
2469 | 2501 |
2470 // Check for power of two on the right hand side. | 2502 // Check for power of two on the right hand side. |
2471 __ JumpIfNotPowerOfTwoOrZero(right, scratch1, ¬_smi_result); | 2503 __ JumpIfNotPowerOfTwoOrZero(right, scratch1, ¬_smi_result); |
2472 | 2504 |
2473 // Perform modulus by masking. | 2505 // Perform modulus by masking. |
2474 __ and_(right, left, Operand(scratch1)); | 2506 __ land(right, left, scratch1); |
2475 __ Ret(); | 2507 __ Ret(); |
2476 break; | 2508 break; |
2477 case Token::BIT_OR: | 2509 case Token::BIT_OR: |
2478 __ orr(right, left, Operand(right)); | 2510 __ orr(right, left, right); |
2479 __ Ret(); | 2511 __ Ret(); |
2480 break; | 2512 break; |
2481 case Token::BIT_AND: | 2513 case Token::BIT_AND: |
2482 __ and_(right, left, Operand(right)); | 2514 __ land(right, left, right); |
2483 __ Ret(); | 2515 __ Ret(); |
2484 break; | 2516 break; |
2485 case Token::BIT_XOR: | 2517 case Token::BIT_XOR: |
2486 __ eor(right, left, Operand(right)); | 2518 __ eor(right, left, right); |
2487 __ Ret(); | 2519 __ Ret(); |
2488 break; | 2520 break; |
2489 case Token::SAR: | 2521 case Token::SAR: |
2490 // Remove tags from right operand. | 2522 // Remove tags from right operand. |
2491 __ GetLeastBitsFromSmi(scratch1, right, 5); | 2523 __ GetLeastBitsFromSmi(scratch1, right, 5); |
2492 __ mov(right, Operand(left, ASR, scratch1)); | 2524 __ asr(right, left, scratch1); |
2493 // Smi tag result. | 2525 // Smi tag result. |
2494 __ bic(right, right, Operand(kSmiTagMask)); | 2526 __ bic(right, right, Operand(kSmiTagMask)); |
2495 __ Ret(); | 2527 __ Ret(); |
2496 break; | 2528 break; |
2497 case Token::SHR: | 2529 case Token::SHR: |
2498 // Remove tags from operands. We can't do this on a 31 bit number | 2530 // Remove tags from operands. We can't do this on a 31 bit number |
2499 // because then the 0s get shifted into bit 30 instead of bit 31. | 2531 // because then the 0s get shifted into bit 30 instead of bit 31. |
2500 __ SmiUntag(scratch1, left); | 2532 __ SmiUntag(scratch1, left); |
2501 __ GetLeastBitsFromSmi(scratch2, right, 5); | 2533 __ GetLeastBitsFromSmi(scratch2, right, 5); |
2502 __ mov(scratch1, Operand(scratch1, LSR, scratch2)); | 2534 __ lsr(scratch1, scratch1, scratch2); |
2503 // Unsigned shift is not allowed to produce a negative number, so | 2535 // Unsigned shift is not allowed to produce a negative number, so |
2504 // check the sign bit and the sign bit after Smi tagging. | 2536 // check the sign bit and the sign bit after Smi tagging. |
2505 __ tst(scratch1, Operand(0xc0000000)); | 2537 __ tst(scratch1, Operand(0xc0000000)); |
2506 __ b(ne, ¬_smi_result); | 2538 __ b(ne, ¬_smi_result); |
2507 // Smi tag result. | 2539 // Smi tag result. |
2508 __ SmiTag(right, scratch1); | 2540 __ SmiTag(right, scratch1); |
2509 __ Ret(); | 2541 __ Ret(); |
2510 break; | 2542 break; |
2511 case Token::SHL: | 2543 case Token::SHL: |
2512 // Remove tags from operands. | 2544 // Remove tags from operands. |
2513 __ SmiUntag(scratch1, left); | 2545 __ SmiUntag(scratch1, left); |
2514 __ GetLeastBitsFromSmi(scratch2, right, 5); | 2546 __ GetLeastBitsFromSmi(scratch2, right, 5); |
2515 __ mov(scratch1, Operand(scratch1, LSL, scratch2)); | 2547 __ lsl(scratch1, scratch1, scratch2); |
2516 // Check that the signed result fits in a Smi. | 2548 // Check that the signed result fits in a Smi. |
2517 __ add(scratch2, scratch1, Operand(0x40000000), SetCC); | 2549 __ add(scratch2, scratch1, Operand(0x40000000)); |
2518 __ b(mi, ¬_smi_result); | 2550 __ cmpge(scratch2, Operand(0)); |
| 2551 __ bf(¬_smi_result); |
2519 __ SmiTag(right, scratch1); | 2552 __ SmiTag(right, scratch1); |
2520 __ Ret(); | 2553 __ Ret(); |
2521 break; | 2554 break; |
2522 default: | 2555 default: |
2523 UNREACHABLE(); | 2556 UNREACHABLE(); |
2524 } | 2557 } |
2525 __ bind(¬_smi_result); | 2558 __ bind(¬_smi_result); |
2526 } | 2559 } |
2527 | 2560 |
2528 | 2561 |
2529 void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, | 2562 void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, |
2530 bool smi_operands, | 2563 bool smi_operands, |
2531 Label* not_numbers, | 2564 Label* not_numbers, |
2532 Label* gc_required) { | 2565 Label* gc_required) { |
2533 Register left = r1; | 2566 Register left = r1; |
2534 Register right = r0; | 2567 Register right = r0; |
2535 Register scratch1 = r7; | 2568 Register scratch1 = r7; |
2536 Register scratch2 = r9; | 2569 Register scratch2 = r9; |
2537 Register scratch3 = r4; | 2570 Register scratch3 = r4; |
2538 | 2571 |
2539 ASSERT(smi_operands || (not_numbers != NULL)); | 2572 ASSERT(smi_operands || (not_numbers != NULL)); |
2540 if (smi_operands) { | 2573 if (smi_operands && FLAG_debug_code) { |
2541 __ AssertSmi(left); | 2574 __ AbortIfNotSmi(left); |
2542 __ AssertSmi(right); | 2575 __ AbortIfNotSmi(right); |
2543 } | 2576 } |
2544 | 2577 |
2545 Register heap_number_map = r6; | 2578 Register heap_number_map = r6; |
2546 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 2579 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
2547 | 2580 |
2548 switch (op_) { | 2581 switch (op_) { |
2549 case Token::ADD: | 2582 case Token::ADD: |
2550 case Token::SUB: | 2583 case Token::SUB: |
2551 case Token::MUL: | 2584 case Token::MUL: |
2552 case Token::DIV: | 2585 case Token::DIV: |
2553 case Token::MOD: { | 2586 case Token::MOD: { |
2554 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 | 2587 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 |
2555 // depending on whether VFP3 is available or not. | 2588 // depending on whether VFP3 is available or not. |
2556 FloatingPointHelper::Destination destination = | 2589 FloatingPointHelper::Destination destination = |
2557 CpuFeatures::IsSupported(VFP2) && | 2590 CpuFeatures::IsSupported(FPU) && |
2558 op_ != Token::MOD ? | 2591 op_ != Token::MOD ? |
2559 FloatingPointHelper::kVFPRegisters : | 2592 FloatingPointHelper::kVFPRegisters : |
2560 FloatingPointHelper::kCoreRegisters; | 2593 FloatingPointHelper::kCoreRegisters; |
2561 | 2594 |
2562 // Allocate new heap number for result. | 2595 // Allocate new heap number for result. |
2563 Register result = r5; | 2596 Register result = r5; |
2564 GenerateHeapResultAllocation( | 2597 GenerateHeapResultAllocation( |
2565 masm, result, heap_number_map, scratch1, scratch2, gc_required); | 2598 masm, result, heap_number_map, scratch1, scratch2, gc_required); |
2566 | 2599 |
2567 // Load the operands. | 2600 // Load the operands. |
2568 if (smi_operands) { | 2601 if (smi_operands) { |
2569 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2); | 2602 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2); |
2570 } else { | 2603 } else { |
2571 FloatingPointHelper::LoadOperands(masm, | 2604 FloatingPointHelper::LoadOperands(masm, |
2572 destination, | 2605 destination, |
2573 heap_number_map, | 2606 heap_number_map, |
2574 scratch1, | 2607 scratch1, |
2575 scratch2, | 2608 scratch2, |
2576 not_numbers); | 2609 not_numbers); |
2577 } | 2610 } |
2578 | 2611 |
2579 // Calculate the result. | 2612 // Calculate the result. |
2580 if (destination == FloatingPointHelper::kVFPRegisters) { | 2613 if (destination == FloatingPointHelper::kVFPRegisters) { |
2581 // Using VFP registers: | 2614 // Using FPU registers: |
2582 // d6: Left value | 2615 // dr0: Left value |
2583 // d7: Right value | 2616 // dr2: Right value |
2584 CpuFeatures::Scope scope(VFP2); | |
2585 switch (op_) { | 2617 switch (op_) { |
2586 case Token::ADD: | 2618 case Token::ADD: |
2587 __ vadd(d5, d6, d7); | 2619 __ fadd(dr0, dr2); |
2588 break; | 2620 break; |
2589 case Token::SUB: | 2621 case Token::SUB: |
2590 __ vsub(d5, d6, d7); | 2622 __ fsub(dr0, dr2); |
2591 break; | 2623 break; |
2592 case Token::MUL: | 2624 case Token::MUL: |
2593 __ vmul(d5, d6, d7); | 2625 __ fmul(dr0, dr2); |
2594 break; | 2626 break; |
2595 case Token::DIV: | 2627 case Token::DIV: |
2596 __ vdiv(d5, d6, d7); | 2628 __ fdiv(dr0, dr2); |
2597 break; | 2629 break; |
2598 default: | 2630 default: |
2599 UNREACHABLE(); | 2631 UNREACHABLE(); |
2600 } | 2632 } |
2601 | 2633 |
2602 __ sub(r0, result, Operand(kHeapObjectTag)); | 2634 __ sub(r0, result, Operand(kHeapObjectTag - HeapNumber::kValueOffset)); |
2603 __ vstr(d5, r0, HeapNumber::kValueOffset); | 2635 __ dstr(dr0, MemOperand(r0, 0)); |
2604 __ add(r0, r0, Operand(kHeapObjectTag)); | 2636 __ add(r0, r0, Operand(kHeapObjectTag - HeapNumber::kValueOffset)); |
2605 __ Ret(); | 2637 __ Ret(); |
2606 } else { | 2638 } else { |
2607 // Call the C function to handle the double operation. | 2639 // Call the C function to handle the double operation. |
2608 FloatingPointHelper::CallCCodeForDoubleOperation(masm, | 2640 FloatingPointHelper::CallCCodeForDoubleOperation(masm, |
2609 op_, | 2641 op_, |
2610 result, | 2642 result, |
2611 scratch1); | 2643 scratch1); |
2612 if (FLAG_debug_code) { | 2644 if (FLAG_debug_code) { |
2613 __ stop("Unreachable code."); | 2645 __ stop("Unreachable code."); |
2614 } | 2646 } |
(...skipping 11 matching lines...) Expand all Loading... |
2626 __ SmiUntag(r2, right); | 2658 __ SmiUntag(r2, right); |
2627 } else { | 2659 } else { |
2628 // Convert operands to 32-bit integers. Right in r2 and left in r3. | 2660 // Convert operands to 32-bit integers. Right in r2 and left in r3. |
2629 FloatingPointHelper::ConvertNumberToInt32(masm, | 2661 FloatingPointHelper::ConvertNumberToInt32(masm, |
2630 left, | 2662 left, |
2631 r3, | 2663 r3, |
2632 heap_number_map, | 2664 heap_number_map, |
2633 scratch1, | 2665 scratch1, |
2634 scratch2, | 2666 scratch2, |
2635 scratch3, | 2667 scratch3, |
2636 d0, | 2668 dr0, |
2637 not_numbers); | 2669 not_numbers); |
2638 FloatingPointHelper::ConvertNumberToInt32(masm, | 2670 FloatingPointHelper::ConvertNumberToInt32(masm, |
2639 right, | 2671 right, |
2640 r2, | 2672 r2, |
2641 heap_number_map, | 2673 heap_number_map, |
2642 scratch1, | 2674 scratch1, |
2643 scratch2, | 2675 scratch2, |
2644 scratch3, | 2676 scratch3, |
2645 d0, | 2677 dr0, |
2646 not_numbers); | 2678 not_numbers); |
2647 } | 2679 } |
2648 | 2680 |
2649 Label result_not_a_smi; | 2681 Label result_not_a_smi; |
2650 switch (op_) { | 2682 switch (op_) { |
2651 case Token::BIT_OR: | 2683 case Token::BIT_OR: |
2652 __ orr(r2, r3, Operand(r2)); | 2684 __ orr(r2, r3, r2); |
2653 break; | 2685 break; |
2654 case Token::BIT_XOR: | 2686 case Token::BIT_XOR: |
2655 __ eor(r2, r3, Operand(r2)); | 2687 __ eor(r2, r3, r2); |
2656 break; | 2688 break; |
2657 case Token::BIT_AND: | 2689 case Token::BIT_AND: |
2658 __ and_(r2, r3, Operand(r2)); | 2690 __ land(r2, r3, r2); |
2659 break; | 2691 break; |
2660 case Token::SAR: | 2692 case Token::SAR: |
2661 // Use only the 5 least significant bits of the shift count. | 2693 // Use only the 5 least significant bits of the shift count. |
2662 __ GetLeastBitsFromInt32(r2, r2, 5); | 2694 __ GetLeastBitsFromInt32(r2, r2, 5); |
2663 __ mov(r2, Operand(r3, ASR, r2)); | 2695 __ asr(r2, r3, r2); |
2664 break; | 2696 break; |
2665 case Token::SHR: | 2697 case Token::SHR: |
2666 // Use only the 5 least significant bits of the shift count. | 2698 // Use only the 5 least significant bits of the shift count. |
2667 __ GetLeastBitsFromInt32(r2, r2, 5); | 2699 __ GetLeastBitsFromInt32(r2, r2, 5); |
2668 __ mov(r2, Operand(r3, LSR, r2), SetCC); | 2700 __ lsr(r2, r3, r2); |
| 2701 __ cmpge(r2, Operand(0)); // Check non-negative (see comment below). |
2669 // SHR is special because it is required to produce a positive answer. | 2702 // SHR is special because it is required to produce a positive answer. |
2670 // The code below for writing into heap numbers isn't capable of | 2703 // The code below for writing into heap numbers isn't capable of |
2671 // writing the register as an unsigned int so we go to slow case if we | 2704 // writing the register as an unsigned int so we go to slow case if we |
2672 // hit this case. | 2705 // hit this case. |
2673 if (CpuFeatures::IsSupported(VFP2)) { | 2706 if (CpuFeatures::IsSupported(FPU)) { |
2674 __ b(mi, &result_not_a_smi); | 2707 __ bf(&result_not_a_smi); |
2675 } else { | 2708 } else { |
2676 __ b(mi, not_numbers); | 2709 __ bf(not_numbers); |
2677 } | 2710 } |
2678 break; | 2711 break; |
2679 case Token::SHL: | 2712 case Token::SHL: |
2680 // Use only the 5 least significant bits of the shift count. | 2713 // Use only the 5 least significant bits of the shift count. |
2681 __ GetLeastBitsFromInt32(r2, r2, 5); | 2714 __ GetLeastBitsFromInt32(r2, r2, 5); |
2682 __ mov(r2, Operand(r3, LSL, r2)); | 2715 __ lsl(r2, r3, r2); |
2683 break; | 2716 break; |
2684 default: | 2717 default: |
2685 UNREACHABLE(); | 2718 UNREACHABLE(); |
2686 } | 2719 } |
2687 | 2720 |
2688 // Check that the *signed* result fits in a smi. | 2721 // Check that the *signed* result fits in a smi. |
2689 __ add(r3, r2, Operand(0x40000000), SetCC); | 2722 __ add(r3, r2, Operand(0x40000000)); |
2690 __ b(mi, &result_not_a_smi); | 2723 __ cmpge(r3, Operand(0)); |
| 2724 __ bf(&result_not_a_smi); |
2691 __ SmiTag(r0, r2); | 2725 __ SmiTag(r0, r2); |
2692 __ Ret(); | 2726 __ Ret(); |
2693 | 2727 |
2694 // Allocate new heap number for result. | 2728 // Allocate new heap number for result. |
2695 __ bind(&result_not_a_smi); | 2729 __ bind(&result_not_a_smi); |
2696 Register result = r5; | 2730 Register result = r5; |
2697 if (smi_operands) { | 2731 if (smi_operands) { |
2698 __ AllocateHeapNumber( | 2732 __ AllocateHeapNumber( |
2699 result, scratch1, scratch2, heap_number_map, gc_required); | 2733 result, scratch1, scratch2, heap_number_map, gc_required); |
2700 } else { | 2734 } else { |
2701 GenerateHeapResultAllocation( | 2735 GenerateHeapResultAllocation( |
2702 masm, result, heap_number_map, scratch1, scratch2, gc_required); | 2736 masm, result, heap_number_map, scratch1, scratch2, gc_required); |
2703 } | 2737 } |
2704 | 2738 |
2705 // r2: Answer as signed int32. | 2739 // r2: Answer as signed int32. |
2706 // r5: Heap number to write answer into. | 2740 // r5: Heap number to write answer into. |
2707 | 2741 |
2708 // Nothing can go wrong now, so move the heap number to r0, which is the | 2742 // Nothing can go wrong now, so move the heap number to r0, which is the |
2709 // result. | 2743 // result. |
2710 __ mov(r0, Operand(r5)); | 2744 __ mov(r0, r5); // TODO(stm): look at this: it should be better |
2711 | 2745 |
2712 if (CpuFeatures::IsSupported(VFP2)) { | 2746 if (CpuFeatures::IsSupported(FPU)) { |
2713 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As | |
2714 // mentioned above SHR needs to always produce a positive result. | |
2715 CpuFeatures::Scope scope(VFP2); | |
2716 __ vmov(s0, r2); | |
2717 if (op_ == Token::SHR) { | 2747 if (op_ == Token::SHR) { |
2718 __ vcvt_f64_u32(d0, s0); | 2748 __ dufloat(dr0, r2, dr2, sh4_rtmp); |
2719 } else { | 2749 } else { |
2720 __ vcvt_f64_s32(d0, s0); | 2750 __ dfloat(dr0, r2); |
2721 } | 2751 } |
2722 __ sub(r3, r0, Operand(kHeapObjectTag)); | 2752 __ sub(r3, r0, Operand(kHeapObjectTag - HeapNumber::kValueOffset)); |
2723 __ vstr(d0, r3, HeapNumber::kValueOffset); | 2753 __ dstr(dr0, MemOperand(r3, 0), r3); |
2724 __ Ret(); | 2754 __ Ret(); |
2725 } else { | 2755 } else { |
2726 // Tail call that writes the int32 in r2 to the heap number in r0, using | 2756 // Tail call that writes the int32 in r2 to the heap number in r0, using |
2727 // r3 as scratch. r0 is preserved and returned. | 2757 // r3 as scratch. r0 is preserved and returned. |
2728 WriteInt32ToHeapNumberStub stub(r2, r0, r3); | 2758 WriteInt32ToHeapNumberStub stub(r2, r0, r3); |
2729 __ TailCallStub(&stub); | 2759 __ TailCallStub(&stub); |
2730 } | 2760 } |
2731 break; | 2761 break; |
2732 } | 2762 } |
2733 default: | 2763 default: |
(...skipping 11 matching lines...) Expand all Loading... |
2745 Label* use_runtime, | 2775 Label* use_runtime, |
2746 Label* gc_required, | 2776 Label* gc_required, |
2747 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { | 2777 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { |
2748 Label not_smis; | 2778 Label not_smis; |
2749 | 2779 |
2750 Register left = r1; | 2780 Register left = r1; |
2751 Register right = r0; | 2781 Register right = r0; |
2752 Register scratch1 = r7; | 2782 Register scratch1 = r7; |
2753 | 2783 |
2754 // Perform combined smi check on both operands. | 2784 // Perform combined smi check on both operands. |
2755 __ orr(scratch1, left, Operand(right)); | 2785 __ orr(scratch1, left, right); |
2756 STATIC_ASSERT(kSmiTag == 0); | 2786 STATIC_ASSERT(kSmiTag == 0); |
2757 __ JumpIfNotSmi(scratch1, ¬_smis); | 2787 __ JumpIfNotSmi(scratch1, ¬_smis); |
2758 | 2788 |
2759 // If the smi-smi operation results in a smi return is generated. | 2789 // If the smi-smi operation results in a smi return is generated. |
2760 GenerateSmiSmiOperation(masm); | 2790 GenerateSmiSmiOperation(masm); |
2761 | 2791 |
2762 // If heap number results are possible generate the result in an allocated | 2792 // If heap number results are possible generate the result in an allocated |
2763 // heap number. | 2793 // heap number. |
2764 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) { | 2794 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) { |
2765 GenerateFPOperation(masm, true, use_runtime, gc_required); | 2795 GenerateFPOperation(masm, true, use_runtime, gc_required); |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2809 ASSERT(op_ == Token::ADD); | 2839 ASSERT(op_ == Token::ADD); |
2810 // If both arguments are strings, call the string add stub. | 2840 // If both arguments are strings, call the string add stub. |
2811 // Otherwise, do a transition. | 2841 // Otherwise, do a transition. |
2812 | 2842 |
2813 // Registers containing left and right operands respectively. | 2843 // Registers containing left and right operands respectively. |
2814 Register left = r1; | 2844 Register left = r1; |
2815 Register right = r0; | 2845 Register right = r0; |
2816 | 2846 |
2817 // Test if left operand is a string. | 2847 // Test if left operand is a string. |
2818 __ JumpIfSmi(left, &call_runtime); | 2848 __ JumpIfSmi(left, &call_runtime); |
2819 __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE); | 2849 __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE, ge); |
2820 __ b(ge, &call_runtime); | 2850 __ bt(&call_runtime); |
2821 | 2851 |
2822 // Test if right operand is a string. | 2852 // Test if right operand is a string. |
2823 __ JumpIfSmi(right, &call_runtime); | 2853 __ JumpIfSmi(right, &call_runtime); |
2824 __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE); | 2854 __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE, ge); |
2825 __ b(ge, &call_runtime); | 2855 __ bt(&call_runtime); |
2826 | 2856 |
2827 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); | 2857 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); |
2828 GenerateRegisterArgsPush(masm); | 2858 GenerateRegisterArgsPush(masm); |
2829 __ TailCallStub(&string_add_stub); | 2859 __ TailCallStub(&string_add_stub); |
2830 | 2860 |
2831 __ bind(&call_runtime); | 2861 __ bind(&call_runtime); |
2832 GenerateTypeTransition(masm); | 2862 GenerateTypeTransition(masm); |
2833 } | 2863 } |
2834 | 2864 |
2835 | 2865 |
2836 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { | 2866 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
2837 ASSERT(operands_type_ == BinaryOpIC::INT32); | 2867 ASSERT(operands_type_ == BinaryOpIC::INT32); |
2838 | 2868 |
2839 Register left = r1; | 2869 Register left = r1; |
2840 Register right = r0; | 2870 Register right = r0; |
2841 Register scratch1 = r7; | 2871 Register scratch1 = r7; |
2842 Register scratch2 = r9; | 2872 Register scratch2 = r9; |
2843 DwVfpRegister double_scratch = d0; | 2873 DwVfpRegister double_scratch = dr0; |
2844 | 2874 |
2845 Register heap_number_result = no_reg; | 2875 Register heap_number_result = no_reg; |
2846 Register heap_number_map = r6; | 2876 Register heap_number_map = r6; |
2847 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 2877 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
2848 | 2878 |
2849 Label call_runtime; | 2879 Label call_runtime; |
2850 // Labels for type transition, used for wrong input or output types. | 2880 // Labels for type transition, used for wrong input or output types. |
2851 // Both label are currently actually bound to the same position. We use two | 2881 // Both label are currently actually bound to the same position. We use two |
2852 // different label to differentiate the cause leading to type transition. | 2882 // different label to differentiate the cause leading to type transition. |
2853 Label transition; | 2883 Label transition; |
2854 | 2884 |
2855 // Smi-smi fast case. | 2885 // Smi-smi fast case. |
2856 Label skip; | 2886 Label skip; |
2857 __ orr(scratch1, left, right); | 2887 __ orr(scratch1, left, right); |
2858 __ JumpIfNotSmi(scratch1, &skip); | 2888 __ JumpIfNotSmi(scratch1, &skip); |
2859 GenerateSmiSmiOperation(masm); | 2889 GenerateSmiSmiOperation(masm); |
2860 // Fall through if the result is not a smi. | 2890 // Fall through if the result is not a smi. |
2861 __ bind(&skip); | 2891 __ bind(&skip); |
2862 | 2892 |
2863 switch (op_) { | 2893 switch (op_) { |
2864 case Token::ADD: | 2894 case Token::ADD: |
2865 case Token::SUB: | 2895 case Token::SUB: |
2866 case Token::MUL: | 2896 case Token::MUL: |
2867 case Token::DIV: | 2897 case Token::DIV: |
2868 case Token::MOD: { | 2898 case Token::MOD: { |
2869 // Load both operands and check that they are 32-bit integer. | 2899 // Load both operands and check that they are 32-bit integer. |
2870 // Jump to type transition if they are not. The registers r0 and r1 (right | 2900 // Jump to type transition if they are not. The registers r0 and r1 (right |
2871 // and left) are preserved for the runtime call. | 2901 // and left) are preserved for the runtime call. |
2872 FloatingPointHelper::Destination destination = | 2902 FloatingPointHelper::Destination destination = |
2873 (CpuFeatures::IsSupported(VFP2) && op_ != Token::MOD) | 2903 (CpuFeatures::IsSupported(FPU) && op_ != Token::MOD) |
2874 ? FloatingPointHelper::kVFPRegisters | 2904 ? FloatingPointHelper::kVFPRegisters |
2875 : FloatingPointHelper::kCoreRegisters; | 2905 : FloatingPointHelper::kCoreRegisters; |
2876 | 2906 |
2877 FloatingPointHelper::LoadNumberAsInt32Double(masm, | 2907 FloatingPointHelper::LoadNumberAsInt32Double(masm, |
2878 right, | 2908 right, |
2879 destination, | 2909 destination, |
2880 d7, | 2910 dr2, |
2881 d8, | |
2882 r2, | 2911 r2, |
2883 r3, | 2912 r3, |
2884 heap_number_map, | 2913 heap_number_map, |
2885 scratch1, | 2914 scratch1, |
2886 scratch2, | 2915 scratch2, |
2887 s0, | 2916 fr4, |
2888 &transition); | 2917 &transition); |
2889 FloatingPointHelper::LoadNumberAsInt32Double(masm, | 2918 FloatingPointHelper::LoadNumberAsInt32Double(masm, |
2890 left, | 2919 left, |
2891 destination, | 2920 destination, |
2892 d6, | 2921 dr0, |
2893 d8, | |
2894 r4, | 2922 r4, |
2895 r5, | 2923 r5, |
2896 heap_number_map, | 2924 heap_number_map, |
2897 scratch1, | 2925 scratch1, |
2898 scratch2, | 2926 scratch2, |
2899 s0, | 2927 fr4, |
2900 &transition); | 2928 &transition); |
2901 | 2929 |
2902 if (destination == FloatingPointHelper::kVFPRegisters) { | 2930 if (destination == FloatingPointHelper::kVFPRegisters) { |
2903 CpuFeatures::Scope scope(VFP2); | |
2904 Label return_heap_number; | 2931 Label return_heap_number; |
2905 switch (op_) { | 2932 switch (op_) { |
2906 case Token::ADD: | 2933 case Token::ADD: |
2907 __ vadd(d5, d6, d7); | 2934 __ fadd(dr0, dr2); |
2908 break; | 2935 break; |
2909 case Token::SUB: | 2936 case Token::SUB: |
2910 __ vsub(d5, d6, d7); | 2937 __ fsub(dr0, dr2); |
2911 break; | 2938 break; |
2912 case Token::MUL: | 2939 case Token::MUL: |
2913 __ vmul(d5, d6, d7); | 2940 __ fmul(dr0, dr2); |
2914 break; | 2941 break; |
2915 case Token::DIV: | 2942 case Token::DIV: |
2916 __ vdiv(d5, d6, d7); | 2943 __ fdiv(dr0, dr2); |
2917 break; | 2944 break; |
2918 default: | 2945 default: |
2919 UNREACHABLE(); | 2946 UNREACHABLE(); |
2920 } | 2947 } |
2921 | 2948 |
2922 if (op_ != Token::DIV) { | 2949 if (op_ != Token::DIV) { |
2923 // These operations produce an integer result. | 2950 // These operations produce an integer result. |
2924 // Try to return a smi if we can. | 2951 // Try to return a smi if we can. |
2925 // Otherwise return a heap number if allowed, or jump to type | 2952 // Otherwise return a heap number if allowed, or jump to type |
2926 // transition. | 2953 // transition. |
2927 | 2954 |
2928 __ EmitVFPTruncate(kRoundToZero, | 2955 __ EmitFPUTruncate(kRoundToZero, |
2929 scratch1, | |
2930 d5, | |
2931 scratch2, | 2956 scratch2, |
2932 d8); | 2957 dr0, |
| 2958 scratch1); |
2933 | 2959 |
2934 if (result_type_ <= BinaryOpIC::INT32) { | 2960 if (result_type_ <= BinaryOpIC::INT32) { |
2935 // If the ne condition is set, result does | 2961 // If the ne condition is set, result does |
2936 // not fit in a 32-bit integer. | 2962 // not fit in a 32-bit integer. |
2937 __ b(ne, &transition); | 2963 __ b(ne, &transition); |
2938 } | 2964 } |
2939 | 2965 |
2940 // Check if the result fits in a smi. | 2966 // Check if the result fits in a smi. |
2941 __ add(scratch2, scratch1, Operand(0x40000000), SetCC); | 2967 __ add(scratch2, scratch1, Operand(0x40000000)); |
| 2968 __ cmpge(scratch2, Operand(0)); |
2942 // If not try to return a heap number. | 2969 // If not try to return a heap number. |
2943 __ b(mi, &return_heap_number); | 2970 __ bt(&return_heap_number); |
2944 // Check for minus zero. Return heap number for minus zero. | 2971 // Check for minus zero. Return heap number for minus zero. |
2945 Label not_zero; | 2972 Label not_zero; |
2946 __ cmp(scratch1, Operand::Zero()); | 2973 __ cmp(scratch1, Operand(0)); |
2947 __ b(ne, ¬_zero); | 2974 __ b(ne, ¬_zero); |
2948 __ vmov(scratch2, d5.high()); | 2975 __ isingle(scratch2, dr0.high()); |
2949 __ tst(scratch2, Operand(HeapNumber::kSignMask)); | 2976 __ tst(scratch2, Operand(HeapNumber::kSignMask)); |
2950 __ b(ne, &return_heap_number); | 2977 __ b(ne, &return_heap_number); |
2951 __ bind(¬_zero); | 2978 __ bind(¬_zero); |
2952 | 2979 |
2953 // Tag the result and return. | 2980 // Tag the result and return. |
2954 __ SmiTag(r0, scratch1); | 2981 __ SmiTag(r0, scratch1); |
2955 __ Ret(); | 2982 __ Ret(); |
2956 } else { | 2983 } else { |
2957 // DIV just falls through to allocating a heap number. | 2984 // DIV just falls through to allocating a heap number. |
2958 } | 2985 } |
2959 | 2986 |
2960 __ bind(&return_heap_number); | 2987 __ bind(&return_heap_number); |
2961 // Return a heap number, or fall through to type transition or runtime | 2988 // Return a heap number, or fall through to type transition or runtime |
2962 // call if we can't. | 2989 // call if we can't. |
2963 if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER | 2990 if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER |
2964 : BinaryOpIC::INT32)) { | 2991 : BinaryOpIC::INT32)) { |
2965 // We are using vfp registers so r5 is available. | 2992 // We are using vfp registers so r5 is available. |
2966 heap_number_result = r5; | 2993 heap_number_result = r5; |
2967 GenerateHeapResultAllocation(masm, | 2994 GenerateHeapResultAllocation(masm, |
2968 heap_number_result, | 2995 heap_number_result, |
2969 heap_number_map, | 2996 heap_number_map, |
2970 scratch1, | 2997 scratch1, |
2971 scratch2, | 2998 scratch2, |
2972 &call_runtime); | 2999 &call_runtime); |
2973 __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); | 3000 __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); |
2974 __ vstr(d5, r0, HeapNumber::kValueOffset); | 3001 __ dstr(dr0, MemOperand(r0, HeapNumber::kValueOffset)); |
2975 __ mov(r0, heap_number_result); | 3002 __ mov(r0, heap_number_result); |
2976 __ Ret(); | 3003 __ Ret(); |
2977 } | 3004 } |
2978 | 3005 |
2979 // A DIV operation expecting an integer result falls through | 3006 // A DIV operation expecting an integer result falls through |
2980 // to type transition. | 3007 // to type transition. |
2981 | 3008 |
2982 } else { | 3009 } else { |
2983 // We preserved r0 and r1 to be able to call runtime. | 3010 // We preserved r0 and r1 to be able to call runtime. |
2984 // Save the left value on the stack. | 3011 // Save the left value on the stack. |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3024 // Convert operands to 32-bit integers. Right in r2 and left in r3. The | 3051 // Convert operands to 32-bit integers. Right in r2 and left in r3. The |
3025 // registers r0 and r1 (right and left) are preserved for the runtime | 3052 // registers r0 and r1 (right and left) are preserved for the runtime |
3026 // call. | 3053 // call. |
3027 FloatingPointHelper::LoadNumberAsInt32(masm, | 3054 FloatingPointHelper::LoadNumberAsInt32(masm, |
3028 left, | 3055 left, |
3029 r3, | 3056 r3, |
3030 heap_number_map, | 3057 heap_number_map, |
3031 scratch1, | 3058 scratch1, |
3032 scratch2, | 3059 scratch2, |
3033 scratch3, | 3060 scratch3, |
3034 d0, | 3061 dr0, |
3035 d1, | |
3036 &transition); | 3062 &transition); |
3037 FloatingPointHelper::LoadNumberAsInt32(masm, | 3063 FloatingPointHelper::LoadNumberAsInt32(masm, |
3038 right, | 3064 right, |
3039 r2, | 3065 r2, |
3040 heap_number_map, | 3066 heap_number_map, |
3041 scratch1, | 3067 scratch1, |
3042 scratch2, | 3068 scratch2, |
3043 scratch3, | 3069 scratch3, |
3044 d0, | 3070 dr0, |
3045 d1, | |
3046 &transition); | 3071 &transition); |
3047 | 3072 |
3048 // The ECMA-262 standard specifies that, for shift operations, only the | 3073 // The ECMA-262 standard specifies that, for shift operations, only the |
3049 // 5 least significant bits of the shift value should be used. | 3074 // 5 least significant bits of the shift value should be used. |
3050 switch (op_) { | 3075 switch (op_) { |
3051 case Token::BIT_OR: | 3076 case Token::BIT_OR: |
3052 __ orr(r2, r3, Operand(r2)); | 3077 __ orr(r2, r3, r2); |
3053 break; | 3078 break; |
3054 case Token::BIT_XOR: | 3079 case Token::BIT_XOR: |
3055 __ eor(r2, r3, Operand(r2)); | 3080 __ eor(r2, r3, r2); |
3056 break; | 3081 break; |
3057 case Token::BIT_AND: | 3082 case Token::BIT_AND: |
3058 __ and_(r2, r3, Operand(r2)); | 3083 __ land(r2, r3, r2); |
3059 break; | 3084 break; |
3060 case Token::SAR: | 3085 case Token::SAR: |
3061 __ and_(r2, r2, Operand(0x1f)); | 3086 __ land(r2, r2, Operand(0x1f)); |
3062 __ mov(r2, Operand(r3, ASR, r2)); | 3087 __ asr(r2, r3, r2); |
3063 break; | 3088 break; |
3064 case Token::SHR: | 3089 case Token::SHR: |
3065 __ and_(r2, r2, Operand(0x1f)); | 3090 __ land(r2, r2, Operand(0x1f)); |
3066 __ mov(r2, Operand(r3, LSR, r2), SetCC); | 3091 __ lsr(r2, r3, r2); |
3067 // SHR is special because it is required to produce a positive answer. | 3092 // SHR is special because it is required to produce a positive answer. |
3068 // We only get a negative result if the shift value (r2) is 0. | 3093 // We only get a negative result if the shift value (r2) is 0. |
3069 // This result cannot be respresented as a signed 32-bit integer, try | 3094 // This result cannot be respresented as a signed 32-bit integer, try |
3070 // to return a heap number if we can. | 3095 // to return a heap number if we can. |
3071 // The non vfp2 code does not support this special case, so jump to | 3096 // The non vfp3 code does not support this special case, so jump to |
3072 // runtime if we don't support it. | 3097 // runtime if we don't support it. |
3073 if (CpuFeatures::IsSupported(VFP2)) { | 3098 __ cmpge(r2, Operand(0)); |
3074 __ b(mi, (result_type_ <= BinaryOpIC::INT32) | 3099 if (CpuFeatures::IsSupported(FPU)) { |
| 3100 __ b(f, (result_type_ <= BinaryOpIC::INT32) |
3075 ? &transition | 3101 ? &transition |
3076 : &return_heap_number); | 3102 : &return_heap_number); |
3077 } else { | 3103 } else { |
3078 __ b(mi, (result_type_ <= BinaryOpIC::INT32) | 3104 __ b(f, (result_type_ <= BinaryOpIC::INT32) |
3079 ? &transition | 3105 ? &transition |
3080 : &call_runtime); | 3106 : &call_runtime); |
3081 } | 3107 } |
3082 break; | 3108 break; |
3083 case Token::SHL: | 3109 case Token::SHL: |
3084 __ and_(r2, r2, Operand(0x1f)); | 3110 __ land(r2, r2, Operand(0x1f)); |
3085 __ mov(r2, Operand(r3, LSL, r2)); | 3111 __ lsl(r2, r3, r2); |
3086 break; | 3112 break; |
3087 default: | 3113 default: |
3088 UNREACHABLE(); | 3114 UNREACHABLE(); |
3089 } | 3115 } |
3090 | 3116 |
3091 // Check if the result fits in a smi. | 3117 // Check if the result fits in a smi. |
3092 __ add(scratch1, r2, Operand(0x40000000), SetCC); | 3118 __ add(scratch1, r2, Operand(0x40000000)); |
3093 // If not try to return a heap number. (We know the result is an int32.) | 3119 // If not try to return a heap number. (We know the result is an int32.) |
3094 __ b(mi, &return_heap_number); | 3120 __ cmpge(scratch1, Operand(0)); |
| 3121 __ b(f, &return_heap_number); |
3095 // Tag the result and return. | 3122 // Tag the result and return. |
3096 __ SmiTag(r0, r2); | 3123 __ SmiTag(r0, r2); |
3097 __ Ret(); | 3124 __ Ret(); |
3098 | 3125 |
3099 __ bind(&return_heap_number); | 3126 __ bind(&return_heap_number); |
3100 heap_number_result = r5; | 3127 heap_number_result = r5; |
3101 GenerateHeapResultAllocation(masm, | 3128 GenerateHeapResultAllocation(masm, |
3102 heap_number_result, | 3129 heap_number_result, |
3103 heap_number_map, | 3130 heap_number_map, |
3104 scratch1, | 3131 scratch1, |
3105 scratch2, | 3132 scratch2, |
3106 &call_runtime); | 3133 &call_runtime); |
3107 | 3134 |
3108 if (CpuFeatures::IsSupported(VFP2)) { | 3135 if (CpuFeatures::IsSupported(FPU)) { |
3109 CpuFeatures::Scope scope(VFP2); | 3136 if ((op_ != Token::SHR)) { |
3110 if (op_ != Token::SHR) { | |
3111 // Convert the result to a floating point value. | 3137 // Convert the result to a floating point value. |
3112 __ vmov(double_scratch.low(), r2); | 3138 __ dfloat(double_scratch, r2); |
3113 __ vcvt_f64_s32(double_scratch, double_scratch.low()); | |
3114 } else { | 3139 } else { |
3115 // The result must be interpreted as an unsigned 32-bit integer. | 3140 __ dufloat(double_scratch, r2, dr2, sh4_rtmp); |
3116 __ vmov(double_scratch.low(), r2); | |
3117 __ vcvt_f64_u32(double_scratch, double_scratch.low()); | |
3118 } | 3141 } |
3119 | 3142 |
3120 // Store the result. | 3143 // Store the result. |
3121 __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); | 3144 __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); |
3122 __ vstr(double_scratch, r0, HeapNumber::kValueOffset); | 3145 __ dstr(double_scratch, MemOperand(r0, HeapNumber::kValueOffset)); |
3123 __ mov(r0, heap_number_result); | 3146 __ mov(r0, heap_number_result); |
3124 __ Ret(); | 3147 __ Ret(); |
3125 } else { | 3148 } else { |
3126 // Tail call that writes the int32 in r2 to the heap number in r0, using | 3149 // Tail call that writes the int32 in r2 to the heap number in r0, using |
3127 // r3 as scratch. r0 is preserved and returned. | 3150 // r3 as scratch. r0 is preserved and returned. |
3128 __ mov(r0, r5); | 3151 __ mov(r0, r5); |
3129 WriteInt32ToHeapNumberStub stub(r2, r0, r3); | 3152 WriteInt32ToHeapNumberStub stub(r2, r0, r3); |
3130 __ TailCallStub(&stub); | 3153 __ TailCallStub(&stub); |
3131 } | 3154 } |
3132 | 3155 |
(...skipping 29 matching lines...) Expand all Loading... |
3162 | 3185 |
3163 // Convert oddball arguments to numbers. | 3186 // Convert oddball arguments to numbers. |
3164 Label check, done; | 3187 Label check, done; |
3165 __ CompareRoot(r1, Heap::kUndefinedValueRootIndex); | 3188 __ CompareRoot(r1, Heap::kUndefinedValueRootIndex); |
3166 __ b(ne, &check); | 3189 __ b(ne, &check); |
3167 if (Token::IsBitOp(op_)) { | 3190 if (Token::IsBitOp(op_)) { |
3168 __ mov(r1, Operand(Smi::FromInt(0))); | 3191 __ mov(r1, Operand(Smi::FromInt(0))); |
3169 } else { | 3192 } else { |
3170 __ LoadRoot(r1, Heap::kNanValueRootIndex); | 3193 __ LoadRoot(r1, Heap::kNanValueRootIndex); |
3171 } | 3194 } |
3172 __ jmp(&done); | 3195 __ jmp_near(&done); |
3173 __ bind(&check); | 3196 __ bind(&check); |
3174 __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); | 3197 __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); |
3175 __ b(ne, &done); | 3198 __ b(ne, &done, Label::kNear); |
3176 if (Token::IsBitOp(op_)) { | 3199 if (Token::IsBitOp(op_)) { |
3177 __ mov(r0, Operand(Smi::FromInt(0))); | 3200 __ mov(r0, Operand(Smi::FromInt(0))); |
3178 } else { | 3201 } else { |
3179 __ LoadRoot(r0, Heap::kNanValueRootIndex); | 3202 __ LoadRoot(r0, Heap::kNanValueRootIndex); |
3180 } | 3203 } |
3181 __ bind(&done); | 3204 __ bind(&done); |
3182 | 3205 |
3183 GenerateHeapNumberStub(masm); | 3206 GenerateHeapNumberStub(masm); |
3184 } | 3207 } |
3185 | 3208 |
(...skipping 26 matching lines...) Expand all Loading... |
3212 | 3235 |
3213 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { | 3236 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { |
3214 ASSERT(op_ == Token::ADD); | 3237 ASSERT(op_ == Token::ADD); |
3215 Label left_not_string, call_runtime; | 3238 Label left_not_string, call_runtime; |
3216 | 3239 |
3217 Register left = r1; | 3240 Register left = r1; |
3218 Register right = r0; | 3241 Register right = r0; |
3219 | 3242 |
3220 // Check if left argument is a string. | 3243 // Check if left argument is a string. |
3221 __ JumpIfSmi(left, &left_not_string); | 3244 __ JumpIfSmi(left, &left_not_string); |
3222 __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE); | 3245 __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE, ge); |
3223 __ b(ge, &left_not_string); | 3246 __ bt(&left_not_string); |
3224 | 3247 |
3225 StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB); | 3248 StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB); |
3226 GenerateRegisterArgsPush(masm); | 3249 GenerateRegisterArgsPush(masm); |
3227 __ TailCallStub(&string_add_left_stub); | 3250 __ TailCallStub(&string_add_left_stub); |
3228 | 3251 |
3229 // Left operand is not a string, test right. | 3252 // Left operand is not a string, test right. |
3230 __ bind(&left_not_string); | 3253 __ bind(&left_not_string); |
3231 __ JumpIfSmi(right, &call_runtime); | 3254 __ JumpIfSmi(right, &call_runtime); |
3232 __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE); | 3255 __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE, ge); |
3233 __ b(ge, &call_runtime); | 3256 __ bt(&call_runtime); |
3234 | 3257 |
3235 StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB); | 3258 StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB); |
3236 GenerateRegisterArgsPush(masm); | 3259 GenerateRegisterArgsPush(masm); |
3237 __ TailCallStub(&string_add_right_stub); | 3260 __ TailCallStub(&string_add_right_stub); |
3238 | 3261 |
3239 // At least one argument is not a string. | 3262 // At least one argument is not a string. |
3240 __ bind(&call_runtime); | 3263 __ bind(&call_runtime); |
3241 } | 3264 } |
3242 | 3265 |
3243 | 3266 |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3298 Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0; | 3321 Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0; |
3299 // If the overwritable operand is already an object, we skip the | 3322 // If the overwritable operand is already an object, we skip the |
3300 // allocation of a heap number. | 3323 // allocation of a heap number. |
3301 __ JumpIfNotSmi(overwritable_operand, &skip_allocation); | 3324 __ JumpIfNotSmi(overwritable_operand, &skip_allocation); |
3302 // Allocate a heap number for the result. | 3325 // Allocate a heap number for the result. |
3303 __ AllocateHeapNumber( | 3326 __ AllocateHeapNumber( |
3304 result, scratch1, scratch2, heap_number_map, gc_required); | 3327 result, scratch1, scratch2, heap_number_map, gc_required); |
3305 __ b(&allocated); | 3328 __ b(&allocated); |
3306 __ bind(&skip_allocation); | 3329 __ bind(&skip_allocation); |
3307 // Use object holding the overwritable operand for result. | 3330 // Use object holding the overwritable operand for result. |
3308 __ mov(result, Operand(overwritable_operand)); | 3331 __ mov(result, overwritable_operand); |
3309 __ bind(&allocated); | 3332 __ bind(&allocated); |
3310 } else { | 3333 } else { |
3311 ASSERT(mode_ == NO_OVERWRITE); | 3334 ASSERT(mode_ == NO_OVERWRITE); |
3312 __ AllocateHeapNumber( | 3335 __ AllocateHeapNumber( |
3313 result, scratch1, scratch2, heap_number_map, gc_required); | 3336 result, scratch1, scratch2, heap_number_map, gc_required); |
3314 } | 3337 } |
3315 } | 3338 } |
3316 | 3339 |
3317 | 3340 |
3318 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { | 3341 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { |
3319 __ Push(r1, r0); | 3342 __ Push(r1, r0); |
3320 } | 3343 } |
3321 | 3344 |
3322 | 3345 |
3323 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { | 3346 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
3324 // Untagged case: double input in d2, double result goes | 3347 // Untagged case: double input in dr2, double result goes |
3325 // into d2. | 3348 // into dr2. |
3326 // Tagged case: tagged input on top of stack and in r0, | 3349 // Tagged case: tagged input on top of stack and in r0, |
3327 // tagged result (heap number) goes into r0. | 3350 // tagged result (heap number) goes into r0. |
3328 | 3351 |
3329 Label input_not_smi; | 3352 Label input_not_smi; |
3330 Label loaded; | 3353 Label loaded; |
3331 Label calculate; | 3354 Label calculate; |
3332 Label invalid_cache; | 3355 Label invalid_cache; |
3333 const Register scratch0 = r9; | 3356 const Register scratch0 = r9; |
3334 const Register scratch1 = r7; | 3357 const Register scratch1 = r7; |
3335 const Register cache_entry = r0; | 3358 const Register cache_entry = r0; |
3336 const bool tagged = (argument_type_ == TAGGED); | 3359 const bool tagged = (argument_type_ == TAGGED); |
3337 | 3360 |
3338 if (CpuFeatures::IsSupported(VFP2)) { | 3361 if (CpuFeatures::IsSupported(FPU)) { |
3339 CpuFeatures::Scope scope(VFP2); | |
3340 if (tagged) { | 3362 if (tagged) { |
3341 // Argument is a number and is on stack and in r0. | 3363 // Argument is a number and is on stack and in r0. |
3342 // Load argument and check if it is a smi. | 3364 // Load argument and check if it is a smi. |
3343 __ JumpIfNotSmi(r0, &input_not_smi); | 3365 __ JumpIfNotSmi(r0, &input_not_smi); |
3344 | 3366 |
3345 // Input is a smi. Convert to double and load the low and high words | 3367 // Input is a smi. Convert to double and load the low and high words |
3346 // of the double into r2, r3. | 3368 // of the double into r2, r3. |
3347 __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); | 3369 __ asr(scratch0, r0, Operand(kSmiTagSize)); |
| 3370 __ dfloat(dr0, scratch0); |
| 3371 __ movd(r2, r3, dr0); |
3348 __ b(&loaded); | 3372 __ b(&loaded); |
3349 | 3373 |
3350 __ bind(&input_not_smi); | 3374 __ bind(&input_not_smi); |
3351 // Check if input is a HeapNumber. | 3375 // Check if input is a HeapNumber. |
3352 __ CheckMap(r0, | 3376 __ CheckMap(r0, |
3353 r1, | 3377 r1, |
3354 Heap::kHeapNumberMapRootIndex, | 3378 Heap::kHeapNumberMapRootIndex, |
3355 &calculate, | 3379 &calculate, |
3356 DONT_DO_SMI_CHECK); | 3380 DONT_DO_SMI_CHECK); |
3357 // Input is a HeapNumber. Load it to a double register and store the | 3381 // Input is a HeapNumber. Load it to a double register and store the |
3358 // low and high words into r2, r3. | 3382 // low and high words into r2, r3. |
3359 __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset)); | 3383 __ dldr(dr0, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
3360 __ vmov(r2, r3, d0); | 3384 __ movd(r2, r3, dr0); |
3361 } else { | 3385 } else { |
3362 // Input is untagged double in d2. Output goes to d2. | 3386 UNIMPLEMENTED(); |
3363 __ vmov(r2, r3, d2); | |
3364 } | 3387 } |
3365 __ bind(&loaded); | 3388 __ bind(&loaded); |
3366 // r2 = low 32 bits of double value | 3389 // r2 = low 32 bits of double value |
3367 // r3 = high 32 bits of double value | 3390 // r3 = high 32 bits of double value |
3368 // Compute hash (the shifts are arithmetic): | 3391 // Compute hash (the shifts are arithmetic): |
3369 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); | 3392 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); |
3370 __ eor(r1, r2, Operand(r3)); | 3393 __ eor(r1, r2, r3); |
3371 __ eor(r1, r1, Operand(r1, ASR, 16)); | 3394 __ asr(scratch0, r1, Operand(16)); |
3372 __ eor(r1, r1, Operand(r1, ASR, 8)); | 3395 __ eor(r1, r1, scratch0); |
| 3396 __ asr(scratch0, r1, Operand(8)); |
| 3397 __ eor(r1, r1, scratch0); |
3373 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); | 3398 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); |
3374 __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1)); | 3399 // TODO(STM): ?? |
| 3400 __ land(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1)); |
3375 | 3401 |
3376 // r2 = low 32 bits of double value. | 3402 // r2 = low 32 bits of double value. |
3377 // r3 = high 32 bits of double value. | 3403 // r3 = high 32 bits of double value. |
3378 // r1 = TranscendentalCache::hash(double value). | 3404 // r1 = TranscendentalCache::hash(double value). |
3379 Isolate* isolate = masm->isolate(); | 3405 Isolate* isolate = masm->isolate(); |
3380 ExternalReference cache_array = | 3406 ExternalReference cache_array = |
3381 ExternalReference::transcendental_cache_array_address(isolate); | 3407 ExternalReference::transcendental_cache_array_address(isolate); |
3382 __ mov(cache_entry, Operand(cache_array)); | 3408 __ mov(cache_entry, Operand(cache_array)); |
3383 // cache_entry points to cache array. | 3409 // cache_entry points to cache array. |
3384 int cache_array_index | 3410 int cache_array_index |
(...skipping 13 matching lines...) Expand all Loading... |
3398 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); | 3424 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); |
3399 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); | 3425 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); |
3400 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. | 3426 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. |
3401 CHECK_EQ(0, elem_in0 - elem_start); | 3427 CHECK_EQ(0, elem_in0 - elem_start); |
3402 CHECK_EQ(kIntSize, elem_in1 - elem_start); | 3428 CHECK_EQ(kIntSize, elem_in1 - elem_start); |
3403 CHECK_EQ(2 * kIntSize, elem_out - elem_start); | 3429 CHECK_EQ(2 * kIntSize, elem_out - elem_start); |
3404 } | 3430 } |
3405 #endif | 3431 #endif |
3406 | 3432 |
3407 // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12]. | 3433 // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12]. |
3408 __ add(r1, r1, Operand(r1, LSL, 1)); | 3434 __ lsl(scratch0, r1, Operand(1)); |
3409 __ add(cache_entry, cache_entry, Operand(r1, LSL, 2)); | 3435 __ add(r1, r1, scratch0); |
| 3436 __ lsl(scratch0, r1, Operand(2)); |
| 3437 __ add(cache_entry, cache_entry, scratch0); |
3410 // Check if cache matches: Double value is stored in uint32_t[2] array. | 3438 // Check if cache matches: Double value is stored in uint32_t[2] array. |
3411 __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit()); | 3439 __ ldr(r4, MemOperand(cache_entry, 0)); |
| 3440 __ ldr(r5, MemOperand(cache_entry, 4)); |
| 3441 __ ldr(r6, MemOperand(cache_entry, 8)); |
3412 __ cmp(r2, r4); | 3442 __ cmp(r2, r4); |
3413 __ cmp(r3, r5, eq); | 3443 __ b(ne, &calculate); |
| 3444 __ cmp(r3, r5); |
3414 __ b(ne, &calculate); | 3445 __ b(ne, &calculate); |
3415 // Cache hit. Load result, cleanup and return. | 3446 // Cache hit. Load result, cleanup and return. |
3416 Counters* counters = masm->isolate()->counters(); | 3447 Counters* counters = masm->isolate()->counters(); |
3417 __ IncrementCounter( | 3448 __ IncrementCounter( |
3418 counters->transcendental_cache_hit(), 1, scratch0, scratch1); | 3449 counters->transcendental_cache_hit(), 1, scratch0, scratch1); |
3419 if (tagged) { | 3450 if (tagged) { |
3420 // Pop input value from stack and load result into r0. | 3451 // Pop input value from stack and load result into r0. |
3421 __ pop(); | 3452 __ pop(); |
3422 __ mov(r0, Operand(r6)); | 3453 __ mov(r0, r6); |
3423 } else { | 3454 } else { |
3424 // Load result into d2. | 3455 // Load result into dr2. |
3425 __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); | 3456 __ dldr(dr2, FieldMemOperand(r6, HeapNumber::kValueOffset)); |
3426 } | 3457 } |
3427 __ Ret(); | 3458 __ Ret(); |
3428 } // if (CpuFeatures::IsSupported(VFP3)) | 3459 } // if (CpuFeatures::IsSupported(FPU)) |
3429 | 3460 |
3430 __ bind(&calculate); | 3461 __ bind(&calculate); |
3431 Counters* counters = masm->isolate()->counters(); | 3462 Counters* counters = masm->isolate()->counters(); |
3432 __ IncrementCounter( | 3463 __ IncrementCounter( |
3433 counters->transcendental_cache_miss(), 1, scratch0, scratch1); | 3464 counters->transcendental_cache_miss(), 1, scratch0, scratch1); |
3434 if (tagged) { | 3465 if (tagged) { |
3435 __ bind(&invalid_cache); | 3466 __ bind(&invalid_cache); |
3436 ExternalReference runtime_function = | 3467 ExternalReference runtime_function = |
3437 ExternalReference(RuntimeFunction(), masm->isolate()); | 3468 ExternalReference(RuntimeFunction(), masm->isolate()); |
3438 __ TailCallExternalReference(runtime_function, 1, 1); | 3469 __ TailCallExternalReference(runtime_function, 1, 1); |
3439 } else { | 3470 } else { |
3440 ASSERT(CpuFeatures::IsSupported(VFP2)); | 3471 UNREACHABLE(); |
3441 CpuFeatures::Scope scope(VFP2); | |
3442 | |
3443 Label no_update; | |
3444 Label skip_cache; | |
3445 | |
3446 // Call C function to calculate the result and update the cache. | |
3447 // r0: precalculated cache entry address. | |
3448 // r2 and r3: parts of the double value. | |
3449 // Store r0, r2 and r3 on stack for later before calling C function. | |
3450 __ Push(r3, r2, cache_entry); | |
3451 GenerateCallCFunction(masm, scratch0); | |
3452 __ GetCFunctionDoubleResult(d2); | |
3453 | |
3454 // Try to update the cache. If we cannot allocate a | |
3455 // heap number, we return the result without updating. | |
3456 __ Pop(r3, r2, cache_entry); | |
3457 __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex); | |
3458 __ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update); | |
3459 __ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); | |
3460 __ stm(ia, cache_entry, r2.bit() | r3.bit() | r6.bit()); | |
3461 __ Ret(); | |
3462 | |
3463 __ bind(&invalid_cache); | |
3464 // The cache is invalid. Call runtime which will recreate the | |
3465 // cache. | |
3466 __ LoadRoot(r5, Heap::kHeapNumberMapRootIndex); | |
3467 __ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache); | |
3468 __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); | |
3469 { | |
3470 FrameScope scope(masm, StackFrame::INTERNAL); | |
3471 __ push(r0); | |
3472 __ CallRuntime(RuntimeFunction(), 1); | |
3473 } | |
3474 __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); | |
3475 __ Ret(); | |
3476 | |
3477 __ bind(&skip_cache); | |
3478 // Call C function to calculate the result and answer directly | |
3479 // without updating the cache. | |
3480 GenerateCallCFunction(masm, scratch0); | |
3481 __ GetCFunctionDoubleResult(d2); | |
3482 __ bind(&no_update); | |
3483 | |
3484 // We return the value in d2 without adding it to the cache, but | |
3485 // we cause a scavenging GC so that future allocations will succeed. | |
3486 { | |
3487 FrameScope scope(masm, StackFrame::INTERNAL); | |
3488 | |
3489 // Allocate an aligned object larger than a HeapNumber. | |
3490 ASSERT(4 * kPointerSize >= HeapNumber::kSize); | |
3491 __ mov(scratch0, Operand(4 * kPointerSize)); | |
3492 __ push(scratch0); | |
3493 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); | |
3494 } | |
3495 __ Ret(); | |
3496 } | 3472 } |
3497 } | 3473 } |
3498 | 3474 |
3499 | 3475 |
3500 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, | 3476 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, |
3501 Register scratch) { | 3477 Register scratch) { |
3502 ASSERT(CpuFeatures::IsEnabled(VFP2)); | |
3503 Isolate* isolate = masm->isolate(); | 3478 Isolate* isolate = masm->isolate(); |
3504 | 3479 |
3505 __ push(lr); | 3480 __ push(lr); |
3506 __ PrepareCallCFunction(0, 1, scratch); | 3481 __ PrepareCallCFunction(0, 1, scratch); |
3507 if (masm->use_eabi_hardfloat()) { | 3482 __ movd(dr4, r0, r1); |
3508 __ vmov(d0, d2); | |
3509 } else { | |
3510 __ vmov(r0, r1, d2); | |
3511 } | |
3512 AllowExternalCallThatCantCauseGC scope(masm); | |
3513 switch (type_) { | 3483 switch (type_) { |
3514 case TranscendentalCache::SIN: | 3484 case TranscendentalCache::SIN: |
3515 __ CallCFunction(ExternalReference::math_sin_double_function(isolate), | 3485 __ CallCFunction(ExternalReference::math_sin_double_function(isolate), |
3516 0, 1); | 3486 0, 1); |
3517 break; | 3487 break; |
3518 case TranscendentalCache::COS: | 3488 case TranscendentalCache::COS: |
3519 __ CallCFunction(ExternalReference::math_cos_double_function(isolate), | 3489 __ CallCFunction(ExternalReference::math_cos_double_function(isolate), |
3520 0, 1); | 3490 0, 1); |
3521 break; | 3491 break; |
3522 case TranscendentalCache::TAN: | 3492 case TranscendentalCache::TAN: |
(...skipping 30 matching lines...) Expand all Loading... |
3553 __ TailCallRuntime(Runtime::kStackGuard, 0, 1); | 3523 __ TailCallRuntime(Runtime::kStackGuard, 0, 1); |
3554 } | 3524 } |
3555 | 3525 |
3556 | 3526 |
3557 void InterruptStub::Generate(MacroAssembler* masm) { | 3527 void InterruptStub::Generate(MacroAssembler* masm) { |
3558 __ TailCallRuntime(Runtime::kInterrupt, 0, 1); | 3528 __ TailCallRuntime(Runtime::kInterrupt, 0, 1); |
3559 } | 3529 } |
3560 | 3530 |
3561 | 3531 |
3562 void MathPowStub::Generate(MacroAssembler* masm) { | 3532 void MathPowStub::Generate(MacroAssembler* masm) { |
3563 CpuFeatures::Scope vfp2_scope(VFP2); | 3533 // TODO(STM): not merged ! |
3564 const Register base = r1; | 3534 Label call_runtime; |
3565 const Register exponent = r2; | 3535 if (CpuFeatures::IsSupported(FPU)) { |
3566 const Register heapnumbermap = r5; | 3536 Label base_not_smi; |
3567 const Register heapnumber = r0; | 3537 Label exponent_not_smi; |
3568 const DoubleRegister double_base = d1; | 3538 Label convert_exponent; |
3569 const DoubleRegister double_exponent = d2; | |
3570 const DoubleRegister double_result = d3; | |
3571 const DoubleRegister double_scratch = d0; | |
3572 const SwVfpRegister single_scratch = s0; | |
3573 const Register scratch = r9; | |
3574 const Register scratch2 = r7; | |
3575 | 3539 |
3576 Label call_runtime, done, int_exponent; | 3540 const Register base = r0; |
3577 if (exponent_type_ == ON_STACK) { | 3541 const Register exponent = r4; |
3578 Label base_is_smi, unpack_exponent; | 3542 const Register heapnumbermap = sh4_r8; |
3579 // The exponent and base are supplied as arguments on the stack. | 3543 const Register heapnumber = r9; |
3580 // This can only happen if the stub is called from non-optimized code. | 3544 const DoubleRegister double_base = dr4; |
3581 // Load input parameters from stack to double registers. | 3545 const DoubleRegister double_exponent = dr6; |
| 3546 const DoubleRegister double_result = dr0; |
| 3547 const Register scratch = r5; |
| 3548 const Register scratch2 = r6; |
| 3549 |
| 3550 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex); |
3582 __ ldr(base, MemOperand(sp, 1 * kPointerSize)); | 3551 __ ldr(base, MemOperand(sp, 1 * kPointerSize)); |
3583 __ ldr(exponent, MemOperand(sp, 0 * kPointerSize)); | 3552 __ ldr(exponent, MemOperand(sp, 0 * kPointerSize)); |
3584 | 3553 |
3585 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex); | 3554 // Convert base to double value and store it in dr0. |
| 3555 __ JumpIfNotSmi(base, &base_not_smi); |
| 3556 // Base is a Smi. Untag and convert it. |
| 3557 __ SmiUntag(base); |
| 3558 __ dfloat(double_base, base); |
| 3559 __ b(&convert_exponent); |
3586 | 3560 |
3587 __ UntagAndJumpIfSmi(scratch, base, &base_is_smi); | 3561 __ bind(&base_not_smi); |
3588 __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset)); | 3562 __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset)); |
3589 __ cmp(scratch, heapnumbermap); | 3563 __ cmp(scratch, heapnumbermap); |
3590 __ b(ne, &call_runtime); | 3564 __ b(ne, &call_runtime); |
| 3565 // Base is a heapnumber. Load it into double register. |
| 3566 __ dldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset)); |
3591 | 3567 |
3592 __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset)); | 3568 __ bind(&convert_exponent); |
3593 __ jmp(&unpack_exponent); | 3569 __ JumpIfNotSmi(exponent, &exponent_not_smi); |
| 3570 __ SmiUntag(exponent); |
3594 | 3571 |
3595 __ bind(&base_is_smi); | 3572 // The base is in a double register and the exponent is |
3596 __ vmov(single_scratch, scratch); | 3573 // an untagged smi. Allocate a heap number and call a |
3597 __ vcvt_f64_s32(double_base, single_scratch); | 3574 // C function for integer exponents. The register containing |
3598 __ bind(&unpack_exponent); | 3575 // the heap number is callee-saved. |
| 3576 __ AllocateHeapNumber(heapnumber, |
| 3577 scratch, |
| 3578 scratch2, |
| 3579 heapnumbermap, |
| 3580 &call_runtime); |
| 3581 __ push(pr); |
| 3582 __ PrepareCallCFunction(1, 1, scratch); |
| 3583 // check that the argument are stored in the right registers (sh4 ABI) |
| 3584 ASSERT(double_base.is(dr4) && exponent.is(r4)); |
| 3585 __ CallCFunction( |
| 3586 ExternalReference::power_double_int_function(masm->isolate()), |
| 3587 1, 1); |
| 3588 __ pop(pr); |
| 3589 ASSERT(double_result.is(dr0)); |
| 3590 __ dstr(double_result, |
| 3591 FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); |
| 3592 __ mov(r0, heapnumber); |
| 3593 __ Drop(2); |
| 3594 __ rts(); |
3599 | 3595 |
3600 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); | 3596 __ bind(&exponent_not_smi); |
3601 | |
3602 __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset)); | 3597 __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset)); |
3603 __ cmp(scratch, heapnumbermap); | 3598 __ cmp(scratch, heapnumbermap); |
3604 __ b(ne, &call_runtime); | 3599 __ b(ne, &call_runtime); |
3605 __ vldr(double_exponent, | 3600 // Exponent is a heapnumber. Load it into double register. |
| 3601 __ dldr(double_exponent, |
3606 FieldMemOperand(exponent, HeapNumber::kValueOffset)); | 3602 FieldMemOperand(exponent, HeapNumber::kValueOffset)); |
3607 } else if (exponent_type_ == TAGGED) { | |
3608 // Base is already in double_base. | |
3609 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); | |
3610 | 3603 |
3611 __ vldr(double_exponent, | 3604 // The base and the exponent are in double registers. |
3612 FieldMemOperand(exponent, HeapNumber::kValueOffset)); | 3605 // Allocate a heap number and call a C function for |
| 3606 // double exponents. The register containing |
| 3607 // the heap number is callee-saved. |
| 3608 __ AllocateHeapNumber(heapnumber, |
| 3609 scratch, |
| 3610 scratch2, |
| 3611 heapnumbermap, |
| 3612 &call_runtime); |
| 3613 __ push(pr); |
| 3614 __ PrepareCallCFunction(0, 2, scratch); |
| 3615 ASSERT(double_base.is(dr4) && double_exponent.is(dr6)); |
| 3616 __ CallCFunction( |
| 3617 ExternalReference::power_double_double_function(masm->isolate()), |
| 3618 0, 2); |
| 3619 __ pop(pr); |
| 3620 ASSERT(double_result.is(dr0)); |
| 3621 __ dstr(double_result, |
| 3622 FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); |
| 3623 __ mov(r0, heapnumber); |
| 3624 __ Drop(2); |
| 3625 __ rts(); |
3613 } | 3626 } |
3614 | 3627 __ bind(&call_runtime); |
3615 if (exponent_type_ != INTEGER) { | 3628 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); |
3616 Label int_exponent_convert; | |
3617 // Detect integer exponents stored as double. | |
3618 __ vcvt_u32_f64(single_scratch, double_exponent); | |
3619 // We do not check for NaN or Infinity here because comparing numbers on | |
3620 // ARM correctly distinguishes NaNs. We end up calling the built-in. | |
3621 __ vcvt_f64_u32(double_scratch, single_scratch); | |
3622 __ VFPCompareAndSetFlags(double_scratch, double_exponent); | |
3623 __ b(eq, &int_exponent_convert); | |
3624 | |
3625 if (exponent_type_ == ON_STACK) { | |
3626 // Detect square root case. Crankshaft detects constant +/-0.5 at | |
3627 // compile time and uses DoMathPowHalf instead. We then skip this check | |
3628 // for non-constant cases of +/-0.5 as these hardly occur. | |
3629 Label not_plus_half; | |
3630 | |
3631 // Test for 0.5. | |
3632 __ vmov(double_scratch, 0.5, scratch); | |
3633 __ VFPCompareAndSetFlags(double_exponent, double_scratch); | |
3634 __ b(ne, ¬_plus_half); | |
3635 | |
3636 // Calculates square root of base. Check for the special case of | |
3637 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13). | |
3638 __ vmov(double_scratch, -V8_INFINITY, scratch); | |
3639 __ VFPCompareAndSetFlags(double_base, double_scratch); | |
3640 __ vneg(double_result, double_scratch, eq); | |
3641 __ b(eq, &done); | |
3642 | |
3643 // Add +0 to convert -0 to +0. | |
3644 __ vadd(double_scratch, double_base, kDoubleRegZero); | |
3645 __ vsqrt(double_result, double_scratch); | |
3646 __ jmp(&done); | |
3647 | |
3648 __ bind(¬_plus_half); | |
3649 __ vmov(double_scratch, -0.5, scratch); | |
3650 __ VFPCompareAndSetFlags(double_exponent, double_scratch); | |
3651 __ b(ne, &call_runtime); | |
3652 | |
3653 // Calculates square root of base. Check for the special case of | |
3654 // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13). | |
3655 __ vmov(double_scratch, -V8_INFINITY, scratch); | |
3656 __ VFPCompareAndSetFlags(double_base, double_scratch); | |
3657 __ vmov(double_result, kDoubleRegZero, eq); | |
3658 __ b(eq, &done); | |
3659 | |
3660 // Add +0 to convert -0 to +0. | |
3661 __ vadd(double_scratch, double_base, kDoubleRegZero); | |
3662 __ vmov(double_result, 1.0, scratch); | |
3663 __ vsqrt(double_scratch, double_scratch); | |
3664 __ vdiv(double_result, double_result, double_scratch); | |
3665 __ jmp(&done); | |
3666 } | |
3667 | |
3668 __ push(lr); | |
3669 { | |
3670 AllowExternalCallThatCantCauseGC scope(masm); | |
3671 __ PrepareCallCFunction(0, 2, scratch); | |
3672 __ SetCallCDoubleArguments(double_base, double_exponent); | |
3673 __ CallCFunction( | |
3674 ExternalReference::power_double_double_function(masm->isolate()), | |
3675 0, 2); | |
3676 } | |
3677 __ pop(lr); | |
3678 __ GetCFunctionDoubleResult(double_result); | |
3679 __ jmp(&done); | |
3680 | |
3681 __ bind(&int_exponent_convert); | |
3682 __ vcvt_u32_f64(single_scratch, double_exponent); | |
3683 __ vmov(scratch, single_scratch); | |
3684 } | |
3685 | |
3686 // Calculate power with integer exponent. | |
3687 __ bind(&int_exponent); | |
3688 | |
3689 // Get two copies of exponent in the registers scratch and exponent. | |
3690 if (exponent_type_ == INTEGER) { | |
3691 __ mov(scratch, exponent); | |
3692 } else { | |
3693 // Exponent has previously been stored into scratch as untagged integer. | |
3694 __ mov(exponent, scratch); | |
3695 } | |
3696 __ vmov(double_scratch, double_base); // Back up base. | |
3697 __ vmov(double_result, 1.0, scratch2); | |
3698 | |
3699 // Get absolute value of exponent. | |
3700 __ cmp(scratch, Operand(0)); | |
3701 __ mov(scratch2, Operand(0), LeaveCC, mi); | |
3702 __ sub(scratch, scratch2, scratch, LeaveCC, mi); | |
3703 | |
3704 Label while_true; | |
3705 __ bind(&while_true); | |
3706 __ mov(scratch, Operand(scratch, ASR, 1), SetCC); | |
3707 __ vmul(double_result, double_result, double_scratch, cs); | |
3708 __ vmul(double_scratch, double_scratch, double_scratch, ne); | |
3709 __ b(ne, &while_true); | |
3710 | |
3711 __ cmp(exponent, Operand(0)); | |
3712 __ b(ge, &done); | |
3713 __ vmov(double_scratch, 1.0, scratch); | |
3714 __ vdiv(double_result, double_scratch, double_result); | |
3715 // Test whether result is zero. Bail out to check for subnormal result. | |
3716 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases. | |
3717 __ VFPCompareAndSetFlags(double_result, 0.0); | |
3718 __ b(ne, &done); | |
3719 // double_exponent may not containe the exponent value if the input was a | |
3720 // smi. We set it with exponent value before bailing out. | |
3721 __ vmov(single_scratch, exponent); | |
3722 __ vcvt_f64_s32(double_exponent, single_scratch); | |
3723 | |
3724 // Returning or bailing out. | |
3725 Counters* counters = masm->isolate()->counters(); | |
3726 if (exponent_type_ == ON_STACK) { | |
3727 // The arguments are still on the stack. | |
3728 __ bind(&call_runtime); | |
3729 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); | |
3730 | |
3731 // The stub is called from non-optimized code, which expects the result | |
3732 // as heap number in exponent. | |
3733 __ bind(&done); | |
3734 __ AllocateHeapNumber( | |
3735 heapnumber, scratch, scratch2, heapnumbermap, &call_runtime); | |
3736 __ vstr(double_result, | |
3737 FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); | |
3738 ASSERT(heapnumber.is(r0)); | |
3739 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2); | |
3740 __ Ret(2); | |
3741 } else { | |
3742 __ push(lr); | |
3743 { | |
3744 AllowExternalCallThatCantCauseGC scope(masm); | |
3745 __ PrepareCallCFunction(0, 2, scratch); | |
3746 __ SetCallCDoubleArguments(double_base, double_exponent); | |
3747 __ CallCFunction( | |
3748 ExternalReference::power_double_double_function(masm->isolate()), | |
3749 0, 2); | |
3750 } | |
3751 __ pop(lr); | |
3752 __ GetCFunctionDoubleResult(double_result); | |
3753 | |
3754 __ bind(&done); | |
3755 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2); | |
3756 __ Ret(); | |
3757 } | |
3758 } | 3629 } |
3759 | 3630 |
3760 | 3631 |
3761 bool CEntryStub::NeedsImmovableCode() { | 3632 bool CEntryStub::NeedsImmovableCode() { |
3762 return true; | 3633 return true; |
3763 } | 3634 } |
3764 | 3635 |
3765 | 3636 |
3766 bool CEntryStub::IsPregenerated() { | 3637 bool CEntryStub::IsPregenerated() { |
3767 return (!save_doubles_ || ISOLATE->fp_stubs_generated()) && | 3638 return (!save_doubles_ || ISOLATE->fp_stubs_generated()) && |
(...skipping 25 matching lines...) Expand all Loading... |
3793 code->set_is_pregenerated(true); | 3664 code->set_is_pregenerated(true); |
3794 } | 3665 } |
3795 | 3666 |
3796 | 3667 |
3797 void CEntryStub::GenerateCore(MacroAssembler* masm, | 3668 void CEntryStub::GenerateCore(MacroAssembler* masm, |
3798 Label* throw_normal_exception, | 3669 Label* throw_normal_exception, |
3799 Label* throw_termination_exception, | 3670 Label* throw_termination_exception, |
3800 Label* throw_out_of_memory_exception, | 3671 Label* throw_out_of_memory_exception, |
3801 bool do_gc, | 3672 bool do_gc, |
3802 bool always_allocate) { | 3673 bool always_allocate) { |
| 3674 // WARNING: this function use the SH4 ABI !! |
| 3675 |
| 3676 // Input |
3803 // r0: result parameter for PerformGC, if any | 3677 // r0: result parameter for PerformGC, if any |
3804 // r4: number of arguments including receiver (C callee-saved) | 3678 // sh4_r8: number of arguments including receiver (C callee-saved) |
3805 // r5: pointer to builtin function (C callee-saved) | 3679 // Used later by LeaveExitFrame() |
3806 // r6: pointer to the first argument (C callee-saved) | 3680 // sh4_r9: pointer to builtin function (C callee-saved) |
| 3681 // sh4_r10: pointer to the first argument (C callee-saved) |
| 3682 // TODO(stm): use of r10 is dangerous (ip) |
| 3683 // sh4: moved callee-saved to stack localtion (see ::Generate()) |
3807 Isolate* isolate = masm->isolate(); | 3684 Isolate* isolate = masm->isolate(); |
| 3685 ASSERT(!r0.is(sh4_rtmp)); |
| 3686 ASSERT(!r0.is(sh4_ip)); |
| 3687 // TODO(STM): fix this merge |
| 3688 // ASSERT(!sh4_r8.is(sh4_rtmp) && !sh4_r9.is(sh4_rtmp) && |
| 3689 // !sh4_r10.is(sh4_rtmp)); |
3808 | 3690 |
3809 if (do_gc) { | 3691 if (do_gc) { |
3810 // Passing r0. | 3692 // Passing r0. |
3811 __ PrepareCallCFunction(1, 0, r1); | 3693 __ PrepareCallCFunction(1, 0, r1); |
| 3694 __ mov(r4, r0); |
3812 __ CallCFunction(ExternalReference::perform_gc_function(isolate), | 3695 __ CallCFunction(ExternalReference::perform_gc_function(isolate), |
3813 1, 0); | 3696 1, 0); |
3814 } | 3697 } |
3815 | 3698 |
3816 ExternalReference scope_depth = | 3699 ExternalReference scope_depth = |
3817 ExternalReference::heap_always_allocate_scope_depth(isolate); | 3700 ExternalReference::heap_always_allocate_scope_depth(isolate); |
3818 if (always_allocate) { | 3701 if (always_allocate) { |
3819 __ mov(r0, Operand(scope_depth)); | 3702 __ mov(r0, Operand(scope_depth)); |
3820 __ ldr(r1, MemOperand(r0)); | 3703 __ ldr(r1, MemOperand(r0)); |
3821 __ add(r1, r1, Operand(1)); | 3704 __ add(r1, r1, Operand(1)); |
3822 __ str(r1, MemOperand(r0)); | 3705 __ str(r1, MemOperand(r0)); |
3823 } | 3706 } |
3824 | 3707 |
3825 // Call C built-in. | 3708 // Call C built-in. |
3826 // r0 = argc, r1 = argv | 3709 // r4 = argc, r5 = argv, r6 = isolate |
3827 __ mov(r0, Operand(r4)); | 3710 // __ mov(r4, sh4_r8); |
3828 __ mov(r1, Operand(r6)); | 3711 // __ mov(r5, sh4_r10); |
| 3712 // sh4: ref to ::Generate that stored into the stack |
| 3713 __ ldr(r4, MemOperand(sp, (1+0)*kPointerSize)); |
| 3714 __ ldr(r5, MemOperand(sp, (1+2)*kPointerSize)); |
3829 | 3715 |
3830 #if defined(V8_HOST_ARCH_ARM) | 3716 #if defined(V8_HOST_ARCH_SH4) |
3831 int frame_alignment = MacroAssembler::ActivationFrameAlignment(); | 3717 int frame_alignment = OS::ActivationFrameAlignment(); |
3832 int frame_alignment_mask = frame_alignment - 1; | 3718 int frame_alignment_mask = frame_alignment - 1; |
3833 if (FLAG_debug_code) { | 3719 if (FLAG_debug_code) { |
3834 if (frame_alignment > kPointerSize) { | 3720 if (frame_alignment > kPointerSize) { |
3835 Label alignment_as_expected; | 3721 Label alignment_as_expected; |
3836 ASSERT(IsPowerOf2(frame_alignment)); | 3722 ASSERT(IsPowerOf2(frame_alignment)); |
3837 __ tst(sp, Operand(frame_alignment_mask)); | 3723 __ tst(sp, Operand(frame_alignment_mask)); |
3838 __ b(eq, &alignment_as_expected); | 3724 __ b(eq, &alignment_as_expected); |
3839 // Don't use Check here, as it will call Runtime_Abort re-entering here. | 3725 // Don't use Check here, as it will call Runtime_Abort re-entering here. |
3840 __ stop("Unexpected alignment"); | 3726 __ stop("Unexpected alignment"); |
3841 __ bind(&alignment_as_expected); | 3727 __ bind(&alignment_as_expected); |
3842 } | 3728 } |
3843 } | 3729 } |
3844 #endif | 3730 #endif |
3845 | 3731 |
3846 __ mov(r2, Operand(ExternalReference::isolate_address())); | 3732 __ mov(r6, Operand(ExternalReference::isolate_address())); |
| 3733 |
| 3734 // sh4: ref to ::Generate() that stored the builtin into the stack |
| 3735 __ ldr(r2, MemOperand(sp, (1+1)*kPointerSize)); |
3847 | 3736 |
3848 // To let the GC traverse the return address of the exit frames, we need to | 3737 // To let the GC traverse the return address of the exit frames, we need to |
3849 // know where the return address is. The CEntryStub is unmovable, so | 3738 // know where the return address is. The CEntryStub is unmovable, so |
3850 // we can store the address on the stack to be able to find it again and | 3739 // we can store the address on the stack to be able to find it again and |
3851 // we never have to restore it, because it will not change. | 3740 // we never have to restore it, because it will not change. |
3852 // Compute the return address in lr to return to after the jump below. Pc is | 3741 // Compute the return address in lr to return to after the jump below. Pc is |
3853 // already at '+ 8' from the current instruction but return is after three | 3742 // already at '+ 8' from the current instruction but return is after three |
3854 // instructions so add another 4 to pc to get the return address. | 3743 // instructions so add another 4 to pc to get the return address. |
3855 { | 3744 |
3856 // Prevent literal pool emission before return address. | 3745 // Compute the return address in pr to return to after the jsr below. |
3857 Assembler::BlockConstPoolScope block_const_pool(masm); | 3746 // We use the addpc operation for this with an offset of 6. |
3858 masm->add(lr, pc, Operand(4)); | 3747 // We add 3 * kInstrSize to the pc after the addpc for the size of |
3859 __ str(lr, MemOperand(sp, 0)); | 3748 // the sequence: [str, jsr, nop(delay slot)]. |
3860 masm->Jump(r5); | 3749 __ addpc(r3, 3 * Assembler::kInstrSize, pr); |
3861 } | 3750 #ifdef DEBUG |
| 3751 int old_pc = masm->pc_offset(); |
| 3752 #endif |
| 3753 __ str(r3, MemOperand(sp, 0)); |
| 3754 __ jsr(r2); |
| 3755 // __ jsr(sh4_r9); |
| 3756 #ifdef DEBUG |
| 3757 ASSERT(masm->pc_offset() - old_pc == 3 * Assembler::kInstrSize); |
| 3758 #endif |
3862 | 3759 |
3863 if (always_allocate) { | 3760 if (always_allocate) { |
3864 // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1 | 3761 // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1 |
3865 // though (contain the result). | 3762 // though (contain the result). |
3866 __ mov(r2, Operand(scope_depth)); | 3763 __ mov(r2, Operand(scope_depth)); |
3867 __ ldr(r3, MemOperand(r2)); | 3764 __ ldr(r3, MemOperand(r2)); |
3868 __ sub(r3, r3, Operand(1)); | 3765 __ sub(r3, r3, Operand(1)); |
3869 __ str(r3, MemOperand(r2)); | 3766 __ str(r3, MemOperand(r2)); |
3870 } | 3767 } |
3871 | 3768 |
3872 // check for failure result | 3769 // check for failure result |
3873 Label failure_returned; | 3770 Label failure_returned; |
3874 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); | 3771 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); |
3875 // Lower 2 bits of r2 are 0 iff r0 has failure tag. | 3772 // Lower 2 bits of r2 are 0 iff r0 has failure tag. |
3876 __ add(r2, r0, Operand(1)); | 3773 __ add(r2, r0, Operand(1)); |
3877 __ tst(r2, Operand(kFailureTagMask)); | 3774 __ tst(r2, Operand(kFailureTagMask)); |
3878 __ b(eq, &failure_returned); | 3775 __ b(eq, &failure_returned); |
3879 | 3776 |
3880 // Exit C frame and return. | 3777 // Exit C frame and return. |
3881 // r0:r1: result | 3778 // r0:r1: result |
3882 // sp: stack pointer | 3779 // sp: stack pointer |
3883 // fp: frame pointer | 3780 // fp: frame pointer |
3884 // Callee-saved register r4 still holds argc. | 3781 // Callee-saved register sh4_r8 still holds argc. |
3885 __ LeaveExitFrame(save_doubles_, r4); | 3782 // sh4: stored on stack into ::Generate() |
3886 __ mov(pc, lr); | 3783 __ ldr(r2, MemOperand(sp, (1+0)*kPointerSize)); |
| 3784 __ LeaveExitFrame(save_doubles_, r2); |
| 3785 // __ LeaveExitFrame(save_doubles_, sh4_r8); |
| 3786 __ rts(); |
3887 | 3787 |
3888 // check if we should retry or throw exception | 3788 // check if we should retry or throw exception |
3889 Label retry; | 3789 Label retry; |
3890 __ bind(&failure_returned); | 3790 __ bind(&failure_returned); |
3891 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); | 3791 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); |
3892 __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); | 3792 __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); |
3893 __ b(eq, &retry); | 3793 __ b(eq, &retry, Label::kNear); |
3894 | 3794 |
3895 // Special handling of out of memory exceptions. | 3795 // Special handling of out of memory exceptions. |
3896 Failure* out_of_memory = Failure::OutOfMemoryException(); | 3796 Failure* out_of_memory = Failure::OutOfMemoryException(); |
3897 __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory))); | 3797 __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory))); |
3898 __ b(eq, throw_out_of_memory_exception); | 3798 __ b(eq, throw_out_of_memory_exception); |
3899 | 3799 |
3900 // Retrieve the pending exception and clear the variable. | 3800 // Retrieve the pending exception and clear the variable. |
3901 __ mov(r3, Operand(isolate->factory()->the_hole_value())); | 3801 __ mov(r3, Operand(isolate->factory()->the_hole_value())); |
3902 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | 3802 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, |
3903 isolate))); | 3803 isolate))); |
3904 __ ldr(r0, MemOperand(ip)); | 3804 __ ldr(r0, MemOperand(ip)); |
3905 __ str(r3, MemOperand(ip)); | 3805 __ str(r3, MemOperand(ip)); |
3906 | 3806 |
3907 // Special handling of termination exceptions which are uncatchable | 3807 // Special handling of termination exceptions which are uncatchable |
3908 // by javascript code. | 3808 // by javascript code. |
3909 __ cmp(r0, Operand(isolate->factory()->termination_exception())); | 3809 __ mov(r3, Operand(isolate->factory()->termination_exception())); |
| 3810 __ cmpeq(r0, r3); |
3910 __ b(eq, throw_termination_exception); | 3811 __ b(eq, throw_termination_exception); |
3911 | 3812 |
3912 // Handle normal exception. | 3813 // Handle normal exception. |
3913 __ jmp(throw_normal_exception); | 3814 __ jmp(throw_normal_exception); |
3914 | 3815 |
3915 __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying | 3816 __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying |
3916 } | 3817 } |
3917 | 3818 |
3918 | 3819 |
3919 void CEntryStub::Generate(MacroAssembler* masm) { | 3820 void CEntryStub::Generate(MacroAssembler* masm) { |
3920 // Called from JavaScript; parameters are on stack as if calling JS function | 3821 // Called from JavaScript; parameters are on stack as if calling JS function |
3921 // r0: number of arguments including receiver | 3822 // r0: number of arguments including receiver |
3922 // r1: pointer to builtin function | 3823 // r1: pointer to builtin function |
3923 // fp: frame pointer (restored after C call) | 3824 // fp: frame pointer (restored after C call) |
3924 // sp: stack pointer (restored as callee's sp after C call) | 3825 // sp: stack pointer (restored as callee's sp after C call) |
3925 // cp: current context (C callee-saved) | 3826 // cp: current context (C callee-saved) |
3926 | 3827 |
| 3828 // sh4: clobbers r3 |
3927 // Result returned in r0 or r0+r1 by default. | 3829 // Result returned in r0 or r0+r1 by default. |
3928 | 3830 |
3929 // NOTE: Invocations of builtins may return failure objects | 3831 // NOTE: Invocations of builtins may return failure objects |
3930 // instead of a proper result. The builtin entry handles | 3832 // instead of a proper result. The builtin entry handles |
3931 // this by performing a garbage collection and retrying the | 3833 // this by performing a garbage collection and retrying the |
3932 // builtin once. | 3834 // builtin once. |
3933 | 3835 |
3934 // Compute the argv pointer in a callee-saved register. | 3836 // Compute the argv pointer in a callee-saved register. |
3935 __ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2)); | 3837 // sh4: will be saved on stack |
3936 __ sub(r6, r6, Operand(kPointerSize)); | 3838 // __ lsl(sh4_r10, r0, Operand(kPointerSizeLog2)); |
| 3839 // __ add(sh4_r10, sp, sh4_r10); |
| 3840 // __ sub(sh4_r10, sh4_r10, Operand(kPointerSize)); |
| 3841 __ lsl(r3, r0, Operand(kPointerSizeLog2)); |
| 3842 __ add(r3, sp, r3); |
| 3843 __ sub(r3, r3, Operand(kPointerSize)); |
3937 | 3844 |
3938 // Enter the exit frame that transitions from JavaScript to C++. | 3845 // Enter the exit frame that transitions from JavaScript to C++. |
3939 FrameScope scope(masm, StackFrame::MANUAL); | 3846 FrameScope scope(masm, StackFrame::MANUAL); |
3940 __ EnterExitFrame(save_doubles_); | 3847 // SH4: Reserve space for 3 stack locations |
| 3848 __ EnterExitFrame(save_doubles_, 3); |
3941 | 3849 |
3942 // Set up argc and the builtin function in callee-saved registers. | 3850 // Setup argc and the builtin function in callee-saved registers. sh4: save |
3943 __ mov(r4, Operand(r0)); | 3851 // on stack instead of keep in callee-saved sh4: sp contains: sp[0] == lr; |
3944 __ mov(r5, Operand(r1)); | 3852 // sp[1] == argc; sp[2] == builtin; sp[3] = argv |
| 3853 // __ mov(sh4_r8, r0); |
| 3854 // __ mov(sh4_r9, r1); |
| 3855 __ str(r0, MemOperand(sp, (1+0)*kPointerSize)); // skip lr location at sp[1] |
| 3856 __ str(r1, MemOperand(sp, (1+1)*kPointerSize)); |
| 3857 __ str(r3, MemOperand(sp, (1+2)*kPointerSize)); |
3945 | 3858 |
3946 // r4: number of arguments (C callee-saved) | 3859 // sh4_r8: number of arguments (C callee-saved) |
3947 // r5: pointer to builtin function (C callee-saved) | 3860 // sh4_r9: pointer to builtin function (C callee-saved) |
3948 // r6: pointer to first argument (C callee-saved) | 3861 // sh4_r10: pointer to first argument (C callee-saved) |
| 3862 // ASSERT(!sh4_r8.is(sh4_rtmp) && !sh4_r9.is(sh4_rtmp) && |
| 3863 // !sh4_r10.is(sh4_rtmp)); |
3949 | 3864 |
3950 Label throw_normal_exception; | 3865 Label throw_normal_exception; |
3951 Label throw_termination_exception; | 3866 Label throw_termination_exception; |
3952 Label throw_out_of_memory_exception; | 3867 Label throw_out_of_memory_exception; |
3953 | 3868 |
3954 // Call into the runtime system. | 3869 // Call into the runtime system. |
3955 GenerateCore(masm, | 3870 GenerateCore(masm, |
3956 &throw_normal_exception, | 3871 &throw_normal_exception, |
3957 &throw_termination_exception, | 3872 &throw_termination_exception, |
3958 &throw_out_of_memory_exception, | 3873 &throw_out_of_memory_exception, |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3996 | 3911 |
3997 __ bind(&throw_termination_exception); | 3912 __ bind(&throw_termination_exception); |
3998 __ ThrowUncatchable(r0); | 3913 __ ThrowUncatchable(r0); |
3999 | 3914 |
4000 __ bind(&throw_normal_exception); | 3915 __ bind(&throw_normal_exception); |
4001 __ Throw(r0); | 3916 __ Throw(r0); |
4002 } | 3917 } |
4003 | 3918 |
4004 | 3919 |
4005 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { | 3920 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { |
4006 // r0: code entry | 3921 // r4: code entry |
4007 // r1: function | 3922 // r5: function |
4008 // r2: receiver | 3923 // r6: receiver |
4009 // r3: argc | 3924 // r7: argc |
4010 // [sp+0]: argv | 3925 // [sp+0]: argv |
4011 | 3926 |
4012 Label invoke, handler_entry, exit; | 3927 Label invoke, handler_entry, exit; |
4013 | 3928 |
4014 // Called from C, so do not pop argc and args on exit (preserve sp) | 3929 // Save callee-saved registers |
4015 // No need to save register-passed args | 3930 __ push(pr); |
4016 // Save callee-saved registers (incl. cp and fp), sp, and lr | 3931 __ pushm(kCalleeSaved); |
4017 __ stm(db_w, sp, kCalleeSaved | lr.bit()); | |
4018 | 3932 |
4019 if (CpuFeatures::IsSupported(VFP2)) { | 3933 // We don't need to save the callee saved double registers: we only use the |
4020 CpuFeatures::Scope scope(VFP2); | 3934 // caller saved ones. |
4021 // Save callee-saved vfp registers. | |
4022 __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); | |
4023 // Set up the reserved register for 0.0. | |
4024 __ vmov(kDoubleRegZero, 0.0); | |
4025 } | |
4026 | 3935 |
4027 // Get address of argv, see stm above. | 3936 // Move the registers to use ARM ABI (and JS ABI) |
| 3937 __ mov(r0, r4); |
| 3938 __ mov(r1, r5); |
| 3939 __ mov(r2, r6); |
| 3940 __ mov(r3, r7); |
| 3941 |
| 3942 // Get address of argv |
4028 // r0: code entry | 3943 // r0: code entry |
4029 // r1: function | 3944 // r1: function |
4030 // r2: receiver | 3945 // r2: receiver |
4031 // r3: argc | 3946 // r3: argc |
4032 | 3947 |
4033 // Set up argv in r4. | 3948 // Setup argv in r4. |
4034 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; | 3949 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; |
4035 if (CpuFeatures::IsSupported(VFP2)) { | |
4036 offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize; | |
4037 } | |
4038 __ ldr(r4, MemOperand(sp, offset_to_argv)); | 3950 __ ldr(r4, MemOperand(sp, offset_to_argv)); |
4039 | 3951 |
4040 // Push a frame with special values setup to mark it as an entry frame. | 3952 // Push a frame with special values setup to mark it as an entry frame. |
4041 // r0: code entry | 3953 // r0: code entry |
4042 // r1: function | 3954 // r1: function |
4043 // r2: receiver | 3955 // r2: receiver |
4044 // r3: argc | 3956 // r3: argc |
4045 // r4: argv | 3957 // r4: argv |
4046 Isolate* isolate = masm->isolate(); | 3958 Isolate* isolate = masm->isolate(); |
4047 __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used. | 3959 // Push a bad frame pointer to fail if it is used. |
| 3960 __ mov(ip, Operand(-1)); |
| 3961 __ push(ip); |
| 3962 |
4048 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; | 3963 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; |
4049 __ mov(r7, Operand(Smi::FromInt(marker))); | 3964 __ mov(r7, Operand(Smi::FromInt(marker))); |
4050 __ mov(r6, Operand(Smi::FromInt(marker))); | 3965 __ mov(r6, Operand(Smi::FromInt(marker))); |
4051 __ mov(r5, | 3966 __ mov(r5, |
4052 Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate))); | 3967 Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate))); |
4053 __ ldr(r5, MemOperand(r5)); | 3968 __ ldr(r5, MemOperand(r5)); |
4054 __ Push(r8, r7, r6, r5); | 3969 __ push(r7); |
| 3970 __ push(r6); |
| 3971 __ push(r5); |
4055 | 3972 |
4056 // Set up frame pointer for the frame to be pushed. | 3973 // Setup frame pointer for the frame to be pushed. |
4057 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); | 3974 __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); |
4058 | 3975 |
4059 // If this is the outermost JS call, set js_entry_sp value. | 3976 // If this is the outermost JS call, set js_entry_sp value. |
4060 Label non_outermost_js; | 3977 Label non_outermost_js; |
4061 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate); | 3978 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate); |
4062 __ mov(r5, Operand(ExternalReference(js_entry_sp))); | 3979 __ mov(r5, Operand(ExternalReference(js_entry_sp))); |
4063 __ ldr(r6, MemOperand(r5)); | 3980 __ ldr(r6, MemOperand(r5)); |
4064 __ cmp(r6, Operand::Zero()); | 3981 __ cmp(r6, Operand(0)); |
4065 __ b(ne, &non_outermost_js); | 3982 __ b(ne, &non_outermost_js, Label::kNear); |
4066 __ str(fp, MemOperand(r5)); | 3983 __ str(fp, MemOperand(r5)); |
4067 __ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); | 3984 __ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); |
4068 Label cont; | 3985 Label cont; |
4069 __ b(&cont); | 3986 __ b_near(&cont); |
4070 __ bind(&non_outermost_js); | 3987 __ bind(&non_outermost_js); |
4071 __ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME))); | 3988 __ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME))); |
4072 __ bind(&cont); | 3989 __ bind(&cont); |
4073 __ push(ip); | 3990 __ push(ip); |
4074 | 3991 |
4075 // Jump to a faked try block that does the invoke, with a faked catch | 3992 // Jump to a faked try block that does the invoke, with a faked catch |
4076 // block that sets the pending exception. | 3993 // block that sets the pending exception. |
4077 __ jmp(&invoke); | 3994 __ jmp(&invoke); |
4078 | 3995 |
4079 // Block literal pool emission whilst taking the position of the handler | 3996 // Block literal pool emission whilst taking the position of the handler |
4080 // entry. This avoids making the assumption that literal pools are always | 3997 // entry. This avoids making the assumption that literal pools are always |
4081 // emitted after an instruction is emitted, rather than before. | 3998 // emitted after an instruction is emitted, rather than before. |
4082 { | 3999 { |
4083 Assembler::BlockConstPoolScope block_const_pool(masm); | 4000 // TODO(STM): block constant pool |
4084 __ bind(&handler_entry); | 4001 __ bind(&handler_entry); |
4085 handler_offset_ = handler_entry.pos(); | 4002 handler_offset_ = handler_entry.pos(); |
4086 // Caught exception: Store result (exception) in the pending exception | 4003 // Caught exception: Store result (exception) in the pending exception |
4087 // field in the JSEnv and return a failure sentinel. Coming in here the | 4004 // field in the JSEnv and return a failure sentinel. Coming in here the |
4088 // fp will be invalid because the PushTryHandler below sets it to 0 to | 4005 // fp will be invalid because the PushTryHandler below sets it to 0 to |
4089 // signal the existence of the JSEntry frame. | 4006 // signal the existence of the JSEntry frame. |
4090 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | 4007 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, |
4091 isolate))); | 4008 isolate))); |
4092 } | 4009 } |
4093 __ str(r0, MemOperand(ip)); | 4010 __ str(r0, MemOperand(ip)); |
4094 __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception()))); | 4011 __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception()))); |
4095 __ b(&exit); | 4012 __ b(&exit); |
4096 | 4013 |
4097 // Invoke: Link this frame into the handler chain. There's only one | 4014 // Invoke: Link this frame into the handler chain. There's only one |
4098 // handler block in this code object, so its index is 0. | 4015 // handler block in this code object, so its index is 0. |
4099 __ bind(&invoke); | 4016 __ bind(&invoke); |
4100 // Must preserve r0-r4, r5-r7 are available. | 4017 // Must preserve r0-r4, r5-r7 are available. |
4101 __ PushTryHandler(StackHandler::JS_ENTRY, 0); | 4018 __ PushTryHandler(StackHandler::JS_ENTRY, 0); |
4102 // If an exception not caught by another handler occurs, this handler | 4019 // If an exception not caught by another handler occurs, this handler |
4103 // returns control to the code after the bl(&invoke) above, which | 4020 // returns control to the code after the jmp(&invoke) above, which |
4104 // restores all kCalleeSaved registers (including cp and fp) to their | 4021 // restores all kCalleeSaved registers (including cp and fp) to their |
4105 // saved values before returning a failure to C. | 4022 // saved values before returning a failure to C. |
4106 | 4023 |
4107 // Clear any pending exceptions. | 4024 // Clear any pending exceptions. |
4108 __ mov(r5, Operand(isolate->factory()->the_hole_value())); | 4025 __ mov(r5, Operand(isolate->factory()->the_hole_value())); |
4109 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | 4026 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, |
4110 isolate))); | 4027 isolate))); |
4111 __ str(r5, MemOperand(ip)); | 4028 __ str(r5, MemOperand(ip)); |
4112 | 4029 |
4113 // Invoke the function by calling through JS entry trampoline builtin. | 4030 // Invoke the function by calling through JS entry trampoline builtin. |
4114 // Notice that we cannot store a reference to the trampoline code directly in | 4031 // Notice that we cannot store a reference to the trampoline code directly in |
4115 // this stub, because runtime stubs are not traversed when doing GC. | 4032 // this stub, because runtime stubs are not traversed when doing GC. |
4116 | 4033 |
4117 // Expected registers by Builtins::JSEntryTrampoline | 4034 // Expected registers by Builtins::JSEntryTrampoline |
4118 // r0: code entry | 4035 // r0: code entry |
4119 // r1: function | 4036 // r1: function |
4120 // r2: receiver | 4037 // r2: receiver |
4121 // r3: argc | 4038 // r3: argc |
4122 // r4: argv | 4039 // r4: argv |
4123 if (is_construct) { | 4040 if (is_construct) { |
4124 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline, | 4041 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline, |
4125 isolate); | 4042 isolate); |
4126 __ mov(ip, Operand(construct_entry)); | 4043 __ mov(ip, Operand(construct_entry)); |
4127 } else { | 4044 } else { |
4128 ExternalReference entry(Builtins::kJSEntryTrampoline, isolate); | 4045 ExternalReference entry(Builtins::kJSEntryTrampoline, isolate); |
4129 __ mov(ip, Operand(entry)); | 4046 __ mov(ip, Operand(entry)); |
4130 } | 4047 } |
4131 __ ldr(ip, MemOperand(ip)); // deref address | 4048 __ ldr(ip, MemOperand(ip)); // deref address |
4132 | 4049 |
4133 // Branch and link to JSEntryTrampoline. We don't use the double underscore | 4050 // JSEntryTrampoline |
4134 // macro for the add instruction because we don't want the coverage tool | 4051 __ add(ip, Operand(Code::kHeaderSize - kHeapObjectTag)); |
4135 // inserting instructions here after we read the pc. We block literal pool | 4052 __ jsr(ip); |
4136 // emission for the same reason. | |
4137 { | |
4138 Assembler::BlockConstPoolScope block_const_pool(masm); | |
4139 __ mov(lr, Operand(pc)); | |
4140 masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); | |
4141 } | |
4142 | 4053 |
4143 // Unlink this frame from the handler chain. | 4054 // Unlink this frame from the handler chain. |
4144 __ PopTryHandler(); | 4055 __ PopTryHandler(); |
4145 | 4056 |
4146 __ bind(&exit); // r0 holds result | 4057 __ bind(&exit); // r0 holds result |
4147 // Check if the current stack frame is marked as the outermost JS frame. | 4058 // Check if the current stack frame is marked as the outermost JS frame. |
4148 Label non_outermost_js_2; | 4059 Label non_outermost_js_2; |
4149 __ pop(r5); | 4060 __ pop(r5); |
4150 __ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); | 4061 __ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); |
4151 __ b(ne, &non_outermost_js_2); | 4062 __ b(ne, &non_outermost_js_2, Label::kNear); |
4152 __ mov(r6, Operand::Zero()); | 4063 __ mov(r6, Operand(0)); |
4153 __ mov(r5, Operand(ExternalReference(js_entry_sp))); | 4064 __ mov(r5, Operand(ExternalReference(js_entry_sp))); |
4154 __ str(r6, MemOperand(r5)); | 4065 __ str(r6, MemOperand(r5)); |
4155 __ bind(&non_outermost_js_2); | 4066 __ bind(&non_outermost_js_2); |
4156 | 4067 |
4157 // Restore the top frame descriptors from the stack. | 4068 // Restore the top frame descriptors from the stack. |
4158 __ pop(r3); | 4069 __ pop(r3); |
4159 __ mov(ip, | 4070 __ mov(ip, |
4160 Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate))); | 4071 Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate))); |
4161 __ str(r3, MemOperand(ip)); | 4072 __ str(r3, MemOperand(ip)); |
4162 | 4073 |
4163 // Reset the stack to the callee saved registers. | 4074 // Reset the stack to the callee saved registers. |
4164 __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); | 4075 __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); |
4165 | 4076 |
4166 // Restore callee-saved registers and return. | 4077 // Restore callee-saved registers and return. |
4167 #ifdef DEBUG | 4078 __ popm(kCalleeSaved); |
4168 if (FLAG_debug_code) { | 4079 __ pop(pr); |
4169 __ mov(lr, Operand(pc)); | |
4170 } | |
4171 #endif | |
4172 | 4080 |
4173 if (CpuFeatures::IsSupported(VFP2)) { | 4081 __ rts(); |
4174 CpuFeatures::Scope scope(VFP2); | |
4175 // Restore callee-saved vfp registers. | |
4176 __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); | |
4177 } | |
4178 | |
4179 __ ldm(ia_w, sp, kCalleeSaved | pc.bit()); | |
4180 } | 4082 } |
4181 | 4083 |
4182 | 4084 |
4183 // Uses registers r0 to r4. | 4085 // Uses registers r0 to r4. |
4184 // Expected input (depending on whether args are in registers or on the stack): | 4086 // Expected input (depending on whether args are in registers or on the stack): |
4185 // * object: r0 or at sp + 1 * kPointerSize. | 4087 // * object: r0 or at sp + 1 * kPointerSize. |
4186 // * function: r1 or at sp. | 4088 // * function: r1 or at sp. |
4187 // | 4089 // |
4188 // An inlined call site may have been generated before calling this stub. | 4090 // An inlined call site may have been generated before calling this stub. |
4189 // In this case the offset to the inline site to patch is passed on the stack, | 4091 // In this case the offset to the inline site to patch is passed on the stack, |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4260 // Get prototype of object into r2. | 4162 // Get prototype of object into r2. |
4261 __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset)); | 4163 __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset)); |
4262 | 4164 |
4263 // We don't need map any more. Use it as a scratch register. | 4165 // We don't need map any more. Use it as a scratch register. |
4264 Register scratch2 = map; | 4166 Register scratch2 = map; |
4265 map = no_reg; | 4167 map = no_reg; |
4266 | 4168 |
4267 // Loop through the prototype chain looking for the function prototype. | 4169 // Loop through the prototype chain looking for the function prototype. |
4268 __ LoadRoot(scratch2, Heap::kNullValueRootIndex); | 4170 __ LoadRoot(scratch2, Heap::kNullValueRootIndex); |
4269 __ bind(&loop); | 4171 __ bind(&loop); |
4270 __ cmp(scratch, Operand(prototype)); | 4172 __ cmp(scratch, prototype); |
4271 __ b(eq, &is_instance); | 4173 __ b(eq, &is_instance, Label::kNear); |
4272 __ cmp(scratch, scratch2); | 4174 __ cmp(scratch, scratch2); |
4273 __ b(eq, &is_not_instance); | 4175 __ b(eq, &is_not_instance); |
4274 __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); | 4176 __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); |
4275 __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset)); | 4177 __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset)); |
4276 __ jmp(&loop); | 4178 __ jmp(&loop); |
4277 | 4179 |
4278 __ bind(&is_instance); | 4180 __ bind(&is_instance); |
4279 if (!HasCallSiteInlineCheck()) { | 4181 if (!HasCallSiteInlineCheck()) { |
4280 __ mov(r0, Operand(Smi::FromInt(0))); | 4182 __ mov(r0, Operand(Smi::FromInt(0))); |
4281 __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); | 4183 __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); |
(...skipping 27 matching lines...) Expand all Loading... |
4309 __ mov(r0, Operand(Smi::FromInt(1))); | 4211 __ mov(r0, Operand(Smi::FromInt(1))); |
4310 } | 4212 } |
4311 } | 4213 } |
4312 __ Ret(HasArgsInRegisters() ? 0 : 2); | 4214 __ Ret(HasArgsInRegisters() ? 0 : 2); |
4313 | 4215 |
4314 Label object_not_null, object_not_null_or_smi; | 4216 Label object_not_null, object_not_null_or_smi; |
4315 __ bind(¬_js_object); | 4217 __ bind(¬_js_object); |
4316 // Before null, smi and string value checks, check that the rhs is a function | 4218 // Before null, smi and string value checks, check that the rhs is a function |
4317 // as for a non-function rhs an exception needs to be thrown. | 4219 // as for a non-function rhs an exception needs to be thrown. |
4318 __ JumpIfSmi(function, &slow); | 4220 __ JumpIfSmi(function, &slow); |
4319 __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE); | 4221 __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE, eq); |
4320 __ b(ne, &slow); | 4222 __ b(ne, &slow); |
4321 | 4223 |
4322 // Null is not instance of anything. | 4224 // Null is not instance of anything. |
4323 __ cmp(scratch, Operand(masm->isolate()->factory()->null_value())); | 4225 __ cmp(scratch, Operand(masm->isolate()->factory()->null_value())); |
4324 __ b(ne, &object_not_null); | 4226 __ b(ne, &object_not_null, Label::kNear); |
4325 __ mov(r0, Operand(Smi::FromInt(1))); | 4227 __ mov(r0, Operand(Smi::FromInt(1))); |
4326 __ Ret(HasArgsInRegisters() ? 0 : 2); | 4228 __ Ret(HasArgsInRegisters() ? 0 : 2); |
4327 | 4229 |
4328 __ bind(&object_not_null); | 4230 __ bind(&object_not_null); |
4329 // Smi values are not instances of anything. | 4231 // Smi values are not instances of anything. |
4330 __ JumpIfNotSmi(object, &object_not_null_or_smi); | 4232 __ JumpIfNotSmi(object, &object_not_null_or_smi); |
4331 __ mov(r0, Operand(Smi::FromInt(1))); | 4233 __ mov(r0, Operand(Smi::FromInt(1))); |
4332 __ Ret(HasArgsInRegisters() ? 0 : 2); | 4234 __ Ret(HasArgsInRegisters() ? 0 : 2); |
4333 | 4235 |
4334 __ bind(&object_not_null_or_smi); | 4236 __ bind(&object_not_null_or_smi); |
4335 // String values are not instances of anything. | 4237 // String values are not instances of anything. |
4336 __ IsObjectJSStringType(object, scratch, &slow); | 4238 __ IsObjectJSStringType(object, scratch, &slow); |
4337 __ mov(r0, Operand(Smi::FromInt(1))); | 4239 __ mov(r0, Operand(Smi::FromInt(1))); |
4338 __ Ret(HasArgsInRegisters() ? 0 : 2); | 4240 __ Ret(HasArgsInRegisters() ? 0 : 2); |
4339 | 4241 |
4340 // Slow-case. Tail call builtin. | 4242 // Slow-case. Tail call builtin. |
4341 __ bind(&slow); | 4243 __ bind(&slow); |
4342 if (!ReturnTrueFalseObject()) { | 4244 if (!ReturnTrueFalseObject()) { |
4343 if (HasArgsInRegisters()) { | 4245 if (HasArgsInRegisters()) { |
4344 __ Push(r0, r1); | 4246 __ Push(r0, r1); |
4345 } | 4247 } |
4346 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); | 4248 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); |
4347 } else { | 4249 } else { |
4348 { | 4250 { |
4349 FrameScope scope(masm, StackFrame::INTERNAL); | 4251 FrameScope scope(masm, StackFrame::INTERNAL); |
4350 __ Push(r0, r1); | 4252 __ Push(r0, r1); |
4351 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); | 4253 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); |
4352 } | 4254 } |
4353 __ cmp(r0, Operand::Zero()); | 4255 Label ltrue, lfalse; |
4354 __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq); | 4256 __ cmp(r0, Operand(0)); |
4355 __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne); | 4257 __ bf_near(&lfalse); |
| 4258 __ LoadRoot(r0, Heap::kTrueValueRootIndex); |
| 4259 __ jmp_near(<rue); |
| 4260 __ bind(&lfalse); |
| 4261 __ LoadRoot(r0, Heap::kFalseValueRootIndex); |
| 4262 __ bind(<rue); |
4356 __ Ret(HasArgsInRegisters() ? 0 : 2); | 4263 __ Ret(HasArgsInRegisters() ? 0 : 2); |
4357 } | 4264 } |
4358 } | 4265 } |
4359 | 4266 |
4360 | 4267 |
4361 Register InstanceofStub::left() { return r0; } | 4268 Register InstanceofStub::left() { return r0; } |
4362 | 4269 |
4363 | 4270 |
4364 Register InstanceofStub::right() { return r1; } | 4271 Register InstanceofStub::right() { return r1; } |
4365 | 4272 |
4366 | 4273 |
4367 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { | 4274 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { |
4368 // The displacement is the offset of the last parameter (if any) | 4275 // The displacement is the offset of the last parameter (if any) |
4369 // relative to the frame pointer. | 4276 // relative to the frame pointer. |
4370 const int kDisplacement = | 4277 const int kDisplacement = |
4371 StandardFrameConstants::kCallerSPOffset - kPointerSize; | 4278 StandardFrameConstants::kCallerSPOffset - kPointerSize; |
4372 | 4279 |
4373 // Check that the key is a smi. | 4280 // Check that the key is a smi. |
4374 Label slow; | 4281 Label slow; |
4375 __ JumpIfNotSmi(r1, &slow); | 4282 __ JumpIfNotSmi(r1, &slow, Label::kNear); |
4376 | 4283 |
4377 // Check if the calling frame is an arguments adaptor frame. | 4284 // Check if the calling frame is an arguments adaptor frame. |
4378 Label adaptor; | 4285 Label adaptor; |
4379 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 4286 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
4380 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); | 4287 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); |
4381 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | 4288 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
4382 __ b(eq, &adaptor); | 4289 __ b(eq, &adaptor, Label::kNear); |
4383 | 4290 |
4384 // Check index against formal parameters count limit passed in | 4291 // Check index against formal parameters count limit passed in |
4385 // through register r0. Use unsigned comparison to get negative | 4292 // through register r0. Use unsigned comparison to get negative |
4386 // check for free. | 4293 // check for free. |
4387 __ cmp(r1, r0); | 4294 __ cmphs(r1, r0); |
4388 __ b(hs, &slow); | 4295 __ bt_near(&slow); |
4389 | 4296 |
4390 // Read the argument from the stack and return it. | 4297 // Read the argument from the stack and return it. |
4391 __ sub(r3, r0, r1); | 4298 __ sub(r3, r0, r1); |
4392 __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); | 4299 __ lsl(r0, r3, Operand(kPointerSizeLog2 - kSmiTagSize)); |
| 4300 __ add(r3, fp, r0); |
4393 __ ldr(r0, MemOperand(r3, kDisplacement)); | 4301 __ ldr(r0, MemOperand(r3, kDisplacement)); |
4394 __ Jump(lr); | 4302 __ rts(); |
4395 | 4303 |
4396 // Arguments adaptor case: Check index against actual arguments | 4304 // Arguments adaptor case: Check index against actual arguments |
4397 // limit found in the arguments adaptor frame. Use unsigned | 4305 // limit found in the arguments adaptor frame. Use unsigned |
4398 // comparison to get negative check for free. | 4306 // comparison to get negative check for free. |
4399 __ bind(&adaptor); | 4307 __ bind(&adaptor); |
4400 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 4308 __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
4401 __ cmp(r1, r0); | 4309 __ cmphs(r1, r0); |
4402 __ b(cs, &slow); | 4310 __ bt_near(&slow); |
4403 | 4311 |
4404 // Read the argument from the adaptor frame and return it. | 4312 // Read the argument from the adaptor frame and return it. |
4405 __ sub(r3, r0, r1); | 4313 __ sub(r3, r0, r1); |
4406 __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); | 4314 __ lsl(r0, r3, Operand(kPointerSizeLog2 - kSmiTagSize)); |
| 4315 __ add(r3, r2, r0); |
4407 __ ldr(r0, MemOperand(r3, kDisplacement)); | 4316 __ ldr(r0, MemOperand(r3, kDisplacement)); |
4408 __ Jump(lr); | 4317 __ rts(); |
4409 | 4318 |
4410 // Slow-case: Handle non-smi or out-of-bounds access to arguments | 4319 // Slow-case: Handle non-smi or out-of-bounds access to arguments |
4411 // by calling the runtime system. | 4320 // by calling the runtime system. |
4412 __ bind(&slow); | 4321 __ bind(&slow); |
4413 __ push(r1); | 4322 __ push(r1); |
4414 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); | 4323 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); |
4415 } | 4324 } |
4416 | 4325 |
4417 | 4326 |
4418 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) { | 4327 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) { |
4419 // sp[0] : number of parameters | 4328 // sp[0] : number of parameters |
4420 // sp[4] : receiver displacement | 4329 // sp[4] : receiver displacement |
4421 // sp[8] : function | 4330 // sp[8] : function |
4422 | 4331 |
4423 // Check if the calling frame is an arguments adaptor frame. | 4332 // Check if the calling frame is an arguments adaptor frame. |
4424 Label runtime; | 4333 Label runtime; |
4425 __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 4334 __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
4426 __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset)); | 4335 __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset)); |
4427 __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | 4336 __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
4428 __ b(ne, &runtime); | 4337 __ b(ne, &runtime, Label::kNear); |
4429 | 4338 |
4430 // Patch the arguments.length and the parameters pointer in the current frame. | 4339 // Patch the arguments.length and the parameters pointer in the current frame. |
4431 __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 4340 __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
4432 __ str(r2, MemOperand(sp, 0 * kPointerSize)); | 4341 __ str(r2, MemOperand(sp, 0 * kPointerSize)); |
4433 __ add(r3, r3, Operand(r2, LSL, 1)); | 4342 __ lsl(ip, r2, Operand(1)); |
| 4343 __ add(r3, r3, ip); |
4434 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); | 4344 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); |
4435 __ str(r3, MemOperand(sp, 1 * kPointerSize)); | 4345 __ str(r3, MemOperand(sp, 1 * kPointerSize)); |
4436 | 4346 |
4437 __ bind(&runtime); | 4347 __ bind(&runtime); |
4438 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); | 4348 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); |
4439 } | 4349 } |
4440 | 4350 |
4441 | 4351 |
4442 void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { | 4352 void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { |
4443 // Stack layout: | 4353 // Stack layout: |
4444 // sp[0] : number of parameters (tagged) | 4354 // sp[0] : number of parameters (tagged) |
4445 // sp[4] : address of receiver argument | 4355 // sp[4] : address of receiver argument |
4446 // sp[8] : function | 4356 // sp[8] : function |
4447 // Registers used over whole function: | 4357 // Registers used over whole function: |
4448 // r6 : allocated object (tagged) | 4358 // r6 : allocated object (tagged) |
4449 // r9 : mapped parameter count (tagged) | 4359 // r9 : mapped parameter count (tagged) |
4450 | 4360 |
4451 __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); | 4361 __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); |
4452 // r1 = parameter count (tagged) | 4362 // r1 = parameter count (tagged) |
4453 | 4363 |
4454 // Check if the calling frame is an arguments adaptor frame. | 4364 // Check if the calling frame is an arguments adaptor frame. |
4455 Label runtime; | 4365 Label runtime; |
4456 Label adaptor_frame, try_allocate; | 4366 Label adaptor_frame, try_allocate; |
4457 __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 4367 __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
4458 __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset)); | 4368 __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset)); |
4459 __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | 4369 __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
4460 __ b(eq, &adaptor_frame); | 4370 __ b(eq, &adaptor_frame, Label::kNear); |
4461 | 4371 |
4462 // No adaptor, parameter count = argument count. | 4372 // No adaptor, parameter count = argument count. |
4463 __ mov(r2, r1); | 4373 __ mov(r2, r1); |
4464 __ b(&try_allocate); | 4374 __ b_near(&try_allocate); |
4465 | 4375 |
4466 // We have an adaptor frame. Patch the parameters pointer. | 4376 // We have an adaptor frame. Patch the parameters pointer. |
4467 __ bind(&adaptor_frame); | 4377 __ bind(&adaptor_frame); |
4468 __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 4378 __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
4469 __ add(r3, r3, Operand(r2, LSL, 1)); | 4379 __ lsl(ip, r2, Operand(1)); |
| 4380 __ add(r3, r3, ip); |
4470 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); | 4381 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); |
4471 __ str(r3, MemOperand(sp, 1 * kPointerSize)); | 4382 __ str(r3, MemOperand(sp, 1 * kPointerSize)); |
4472 | 4383 |
4473 // r1 = parameter count (tagged) | 4384 // r1 = parameter count (tagged) |
4474 // r2 = argument count (tagged) | 4385 // r2 = argument count (tagged) |
4475 // Compute the mapped parameter count = min(r1, r2) in r1. | 4386 // Compute the mapped parameter count = min(r1, r2) in r1. |
4476 __ cmp(r1, Operand(r2)); | 4387 __ cmpgt(r1, r2); |
4477 __ mov(r1, Operand(r2), LeaveCC, gt); | 4388 __ mov(r1, r2, t); |
4478 | 4389 |
4479 __ bind(&try_allocate); | 4390 __ bind(&try_allocate); |
4480 | 4391 |
4481 // Compute the sizes of backing store, parameter map, and arguments object. | 4392 // Compute the sizes of backing store, parameter map, and arguments object. |
4482 // 1. Parameter map, has 2 extra words containing context and backing store. | 4393 // 1. Parameter map, has 2 extra words containing context and backing store. |
4483 const int kParameterMapHeaderSize = | 4394 const int kParameterMapHeaderSize = |
4484 FixedArray::kHeaderSize + 2 * kPointerSize; | 4395 FixedArray::kHeaderSize + 2 * kPointerSize; |
4485 // If there are no mapped parameters, we do not need the parameter_map. | 4396 // If there are no mapped parameters, we do not need the parameter_map. |
4486 __ cmp(r1, Operand(Smi::FromInt(0))); | 4397 __ cmpeq(r1, Operand(Smi::FromInt(0))); |
4487 __ mov(r9, Operand::Zero(), LeaveCC, eq); | 4398 __ mov(r9, Operand(0), t); |
4488 __ mov(r9, Operand(r1, LSL, 1), LeaveCC, ne); | 4399 Label skip; |
4489 __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne); | 4400 __ bt_near(&skip); |
| 4401 __ lsl(r9, r1, Operand(1)); |
| 4402 __ add(r9, r9, Operand(kParameterMapHeaderSize)); |
| 4403 __ bind(&skip); |
4490 | 4404 |
4491 // 2. Backing store. | 4405 // 2. Backing store. |
4492 __ add(r9, r9, Operand(r2, LSL, 1)); | 4406 __ lsl(ip, r2, Operand(1)); |
| 4407 __ add(r9, r9, ip); |
4493 __ add(r9, r9, Operand(FixedArray::kHeaderSize)); | 4408 __ add(r9, r9, Operand(FixedArray::kHeaderSize)); |
4494 | 4409 |
4495 // 3. Arguments object. | 4410 // 3. Arguments object. |
4496 __ add(r9, r9, Operand(Heap::kArgumentsObjectSize)); | 4411 __ add(r9, r9, Operand(Heap::kArgumentsObjectSize)); |
4497 | 4412 |
4498 // Do the allocation of all three objects in one go. | 4413 // Do the allocation of all three objects in one go. |
4499 __ AllocateInNewSpace(r9, r0, r3, r4, &runtime, TAG_OBJECT); | 4414 __ AllocateInNewSpace(r9, r0, r3, r4, &runtime, TAG_OBJECT); |
4500 | 4415 |
4501 // r0 = address of new object(s) (tagged) | 4416 // r0 = address of new object(s) (tagged) |
4502 // r2 = argument count (tagged) | 4417 // r2 = argument count (tagged) |
4503 // Get the arguments boilerplate from the current native context into r4. | 4418 // Get the arguments boilerplate from the current native context into r4. |
4504 const int kNormalOffset = | 4419 const int kNormalOffset = |
4505 Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); | 4420 Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); |
4506 const int kAliasedOffset = | 4421 const int kAliasedOffset = |
4507 Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX); | 4422 Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX); |
4508 | 4423 |
4509 __ ldr(r4, MemOperand(r8, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | 4424 __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
4510 __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset)); | 4425 __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset)); |
4511 __ cmp(r1, Operand::Zero()); | 4426 __ cmp(r1, Operand(0)); |
4512 __ ldr(r4, MemOperand(r4, kNormalOffset), eq); | 4427 Label lf, end; |
4513 __ ldr(r4, MemOperand(r4, kAliasedOffset), ne); | 4428 __ bf_near(&lf); |
| 4429 __ ldr(r4, MemOperand(r4, kNormalOffset)); |
| 4430 __ b_near(&end); |
| 4431 __ bind(&lf); |
| 4432 __ ldr(r4, MemOperand(r4, kAliasedOffset)); |
| 4433 __ bind(&end); |
4514 | 4434 |
4515 // r0 = address of new object (tagged) | 4435 // r0 = address of new object (tagged) |
4516 // r1 = mapped parameter count (tagged) | 4436 // r1 = mapped parameter count (tagged) |
4517 // r2 = argument count (tagged) | 4437 // r2 = argument count (tagged) |
4518 // r4 = address of boilerplate object (tagged) | 4438 // r4 = address of boilerplate object (tagged) |
4519 // Copy the JS object part. | 4439 // Copy the JS object part. |
4520 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { | 4440 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { |
4521 __ ldr(r3, FieldMemOperand(r4, i)); | 4441 __ ldr(r3, FieldMemOperand(r4, i)); |
4522 __ str(r3, FieldMemOperand(r0, i)); | 4442 __ str(r3, FieldMemOperand(r0, i)); |
4523 } | 4443 } |
(...skipping 19 matching lines...) Expand all Loading... |
4543 | 4463 |
4544 // r0 = address of new object (tagged) | 4464 // r0 = address of new object (tagged) |
4545 // r1 = mapped parameter count (tagged) | 4465 // r1 = mapped parameter count (tagged) |
4546 // r2 = argument count (tagged) | 4466 // r2 = argument count (tagged) |
4547 // r4 = address of parameter map or backing store (tagged) | 4467 // r4 = address of parameter map or backing store (tagged) |
4548 // Initialize parameter map. If there are no mapped arguments, we're done. | 4468 // Initialize parameter map. If there are no mapped arguments, we're done. |
4549 Label skip_parameter_map; | 4469 Label skip_parameter_map; |
4550 __ cmp(r1, Operand(Smi::FromInt(0))); | 4470 __ cmp(r1, Operand(Smi::FromInt(0))); |
4551 // Move backing store address to r3, because it is | 4471 // Move backing store address to r3, because it is |
4552 // expected there when filling in the unmapped arguments. | 4472 // expected there when filling in the unmapped arguments. |
4553 __ mov(r3, r4, LeaveCC, eq); | 4473 __ mov(r3, r4, eq); |
4554 __ b(eq, &skip_parameter_map); | 4474 __ b(eq, &skip_parameter_map); |
4555 | 4475 |
4556 __ LoadRoot(r6, Heap::kNonStrictArgumentsElementsMapRootIndex); | 4476 __ LoadRoot(r6, Heap::kNonStrictArgumentsElementsMapRootIndex); |
4557 __ str(r6, FieldMemOperand(r4, FixedArray::kMapOffset)); | 4477 __ str(r6, FieldMemOperand(r4, FixedArray::kMapOffset)); |
4558 __ add(r6, r1, Operand(Smi::FromInt(2))); | 4478 __ add(r6, r1, Operand(Smi::FromInt(2))); |
4559 __ str(r6, FieldMemOperand(r4, FixedArray::kLengthOffset)); | 4479 __ str(r6, FieldMemOperand(r4, FixedArray::kLengthOffset)); |
4560 __ str(r8, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize)); | 4480 __ str(cp, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize)); |
4561 __ add(r6, r4, Operand(r1, LSL, 1)); | 4481 __ lsl(r6, r1, Operand(1)); |
| 4482 __ add(r6, r4, r6); |
4562 __ add(r6, r6, Operand(kParameterMapHeaderSize)); | 4483 __ add(r6, r6, Operand(kParameterMapHeaderSize)); |
4563 __ str(r6, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize)); | 4484 __ str(r6, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize)); |
4564 | 4485 |
4565 // Copy the parameter slots and the holes in the arguments. | 4486 // Copy the parameter slots and the holes in the arguments. |
4566 // We need to fill in mapped_parameter_count slots. They index the context, | 4487 // We need to fill in mapped_parameter_count slots. They index the context, |
4567 // where parameters are stored in reverse order, at | 4488 // where parameters are stored in reverse order, at |
4568 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1 | 4489 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1 |
4569 // The mapped parameter thus need to get indices | 4490 // The mapped parameter thus need to get indices |
4570 // MIN_CONTEXT_SLOTS+parameter_count-1 .. | 4491 // MIN_CONTEXT_SLOTS+parameter_count-1 .. |
4571 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count | 4492 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count |
4572 // We loop from right to left. | 4493 // We loop from right to left. |
4573 Label parameters_loop, parameters_test; | 4494 Label parameters_loop, parameters_test; |
4574 __ mov(r6, r1); | 4495 __ mov(r6, r1); |
4575 __ ldr(r9, MemOperand(sp, 0 * kPointerSize)); | 4496 __ ldr(r9, MemOperand(sp, 0 * kPointerSize)); |
4576 __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); | 4497 __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); |
4577 __ sub(r9, r9, Operand(r1)); | 4498 __ sub(r9, r9, r1); |
4578 __ LoadRoot(r7, Heap::kTheHoleValueRootIndex); | 4499 __ LoadRoot(r7, Heap::kTheHoleValueRootIndex); |
4579 __ add(r3, r4, Operand(r6, LSL, 1)); | 4500 __ lsl(r3, r6, Operand(1)); |
| 4501 __ add(r3, r4, r3); |
4580 __ add(r3, r3, Operand(kParameterMapHeaderSize)); | 4502 __ add(r3, r3, Operand(kParameterMapHeaderSize)); |
4581 | 4503 |
4582 // r6 = loop variable (tagged) | 4504 // r6 = loop variable (tagged) |
4583 // r1 = mapping index (tagged) | 4505 // r1 = mapping index (tagged) |
4584 // r3 = address of backing store (tagged) | 4506 // r3 = address of backing store (tagged) |
4585 // r4 = address of parameter map (tagged) | 4507 // r4 = address of parameter map (tagged) |
4586 // r5 = temporary scratch (a.o., for address calculation) | 4508 // r5 = temporary scratch (a.o., for address calculation) |
4587 // r7 = the hole value | 4509 // r7 = the hole value |
4588 __ jmp(¶meters_test); | 4510 __ jmp_near(¶meters_test); |
4589 | 4511 |
4590 __ bind(¶meters_loop); | 4512 __ bind(¶meters_loop); |
4591 __ sub(r6, r6, Operand(Smi::FromInt(1))); | 4513 __ sub(r6, r6, Operand(Smi::FromInt(1))); |
4592 __ mov(r5, Operand(r6, LSL, 1)); | 4514 __ lsl(r5, r6, Operand(1)); |
4593 __ add(r5, r5, Operand(kParameterMapHeaderSize - kHeapObjectTag)); | 4515 __ add(r5, r5, Operand(kParameterMapHeaderSize - kHeapObjectTag)); |
4594 __ str(r9, MemOperand(r4, r5)); | 4516 __ str(r9, MemOperand(r4, r5)); |
4595 __ sub(r5, r5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize)); | 4517 __ sub(r5, r5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize)); |
4596 __ str(r7, MemOperand(r3, r5)); | 4518 __ str(r7, MemOperand(r3, r5)); |
4597 __ add(r9, r9, Operand(Smi::FromInt(1))); | 4519 __ add(r9, r9, Operand(Smi::FromInt(1))); |
4598 __ bind(¶meters_test); | 4520 __ bind(¶meters_test); |
4599 __ cmp(r6, Operand(Smi::FromInt(0))); | 4521 __ cmp(r6, Operand(Smi::FromInt(0))); |
4600 __ b(ne, ¶meters_loop); | 4522 __ b(ne, ¶meters_loop); |
4601 | 4523 |
4602 __ bind(&skip_parameter_map); | 4524 __ bind(&skip_parameter_map); |
4603 // r2 = argument count (tagged) | 4525 // r2 = argument count (tagged) |
4604 // r3 = address of backing store (tagged) | 4526 // r3 = address of backing store (tagged) |
4605 // r5 = scratch | 4527 // r5 = scratch |
4606 // Copy arguments header and remaining slots (if there are any). | 4528 // Copy arguments header and remaining slots (if there are any). |
4607 __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex); | 4529 __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex); |
4608 __ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset)); | 4530 __ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset)); |
4609 __ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset)); | 4531 __ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset)); |
4610 | 4532 |
4611 Label arguments_loop, arguments_test; | 4533 Label arguments_loop, arguments_test; |
4612 __ mov(r9, r1); | 4534 __ mov(r9, r1); |
4613 __ ldr(r4, MemOperand(sp, 1 * kPointerSize)); | 4535 __ ldr(r4, MemOperand(sp, 1 * kPointerSize)); |
4614 __ sub(r4, r4, Operand(r9, LSL, 1)); | 4536 __ lsl(r5, r9, Operand(1)); |
4615 __ jmp(&arguments_test); | 4537 __ sub(r4, r4, r5); |
| 4538 __ jmp_near(&arguments_test); |
4616 | 4539 |
4617 __ bind(&arguments_loop); | 4540 __ bind(&arguments_loop); |
4618 __ sub(r4, r4, Operand(kPointerSize)); | 4541 __ sub(r4, r4, Operand(kPointerSize)); |
4619 __ ldr(r6, MemOperand(r4, 0)); | 4542 __ ldr(r6, MemOperand(r4, 0)); |
4620 __ add(r5, r3, Operand(r9, LSL, 1)); | 4543 __ lsl(r5, r9, Operand(1)); |
| 4544 __ add(r5, r3, r5); |
4621 __ str(r6, FieldMemOperand(r5, FixedArray::kHeaderSize)); | 4545 __ str(r6, FieldMemOperand(r5, FixedArray::kHeaderSize)); |
4622 __ add(r9, r9, Operand(Smi::FromInt(1))); | 4546 __ add(r9, r9, Operand(Smi::FromInt(1))); |
4623 | 4547 |
4624 __ bind(&arguments_test); | 4548 __ bind(&arguments_test); |
4625 __ cmp(r9, Operand(r2)); | 4549 __ cmpge(r9, r2); |
4626 __ b(lt, &arguments_loop); | 4550 __ bf(&arguments_loop); |
4627 | 4551 |
4628 // Return and remove the on-stack parameters. | 4552 // Return and remove the on-stack parameters. |
4629 __ add(sp, sp, Operand(3 * kPointerSize)); | 4553 __ add(sp, sp, Operand(3 * kPointerSize)); |
4630 __ Ret(); | 4554 __ Ret(); |
4631 | 4555 |
4632 // Do the runtime call to allocate the arguments object. | 4556 // Do the runtime call to allocate the arguments object. |
4633 // r2 = argument count (tagged) | 4557 // r2 = argument count (tagged) |
4634 __ bind(&runtime); | 4558 __ bind(&runtime); |
4635 __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count. | 4559 __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count. |
4636 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); | 4560 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); |
4637 } | 4561 } |
4638 | 4562 |
4639 | 4563 |
4640 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { | 4564 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { |
4641 // sp[0] : number of parameters | 4565 // sp[0] : number of parameters |
4642 // sp[4] : receiver displacement | 4566 // sp[4] : receiver displacement |
4643 // sp[8] : function | 4567 // sp[8] : function |
4644 // Check if the calling frame is an arguments adaptor frame. | 4568 // Check if the calling frame is an arguments adaptor frame. |
4645 Label adaptor_frame, try_allocate, runtime; | 4569 Label adaptor_frame, try_allocate, runtime; |
4646 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 4570 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
4647 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); | 4571 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); |
4648 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | 4572 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
4649 __ b(eq, &adaptor_frame); | 4573 __ b(eq, &adaptor_frame, Label::kNear); |
4650 | 4574 |
4651 // Get the length from the frame. | 4575 // Get the length from the frame. |
4652 __ ldr(r1, MemOperand(sp, 0)); | 4576 __ ldr(r1, MemOperand(sp, 0)); |
4653 __ b(&try_allocate); | 4577 __ b_near(&try_allocate); |
4654 | 4578 |
4655 // Patch the arguments.length and the parameters pointer. | 4579 // Patch the arguments.length and the parameters pointer. |
4656 __ bind(&adaptor_frame); | 4580 __ bind(&adaptor_frame); |
4657 __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 4581 __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
4658 __ str(r1, MemOperand(sp, 0)); | 4582 __ str(r1, MemOperand(sp, 0)); |
4659 __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize)); | 4583 __ lsl(r3, r1, Operand(kPointerSizeLog2 - kSmiTagSize)); |
| 4584 __ add(r3, r2, r3); |
4660 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); | 4585 __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); |
4661 __ str(r3, MemOperand(sp, 1 * kPointerSize)); | 4586 __ str(r3, MemOperand(sp, 1 * kPointerSize)); |
4662 | 4587 |
4663 // Try the new space allocation. Start out with computing the size | 4588 // Try the new space allocation. Start out with computing the size |
4664 // of the arguments object and the elements array in words. | 4589 // of the arguments object and the elements array in words. |
4665 Label add_arguments_object; | 4590 Label add_arguments_object; |
4666 __ bind(&try_allocate); | 4591 __ bind(&try_allocate); |
4667 __ cmp(r1, Operand(0, RelocInfo::NONE)); | 4592 __ cmp(r1, Operand(0, RelocInfo::NONE)); |
4668 __ b(eq, &add_arguments_object); | 4593 __ b(eq, &add_arguments_object, Label::kNear); |
4669 __ mov(r1, Operand(r1, LSR, kSmiTagSize)); | 4594 __ lsr(r1, r1, Operand(kSmiTagSize)); |
4670 __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize)); | 4595 __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize)); |
4671 __ bind(&add_arguments_object); | 4596 __ bind(&add_arguments_object); |
4672 __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize)); | 4597 __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize)); |
4673 | 4598 |
4674 // Do the allocation of both objects in one go. | 4599 // Do the allocation of both objects in one go. |
4675 __ AllocateInNewSpace(r1, | 4600 __ AllocateInNewSpace(r1, // object size |
4676 r0, | 4601 r0, // result |
4677 r2, | 4602 r2, // scratch1 |
4678 r3, | 4603 r3, // scratch2 |
4679 &runtime, | 4604 &runtime, |
4680 static_cast<AllocationFlags>(TAG_OBJECT | | 4605 static_cast<AllocationFlags>(TAG_OBJECT | |
4681 SIZE_IN_WORDS)); | 4606 SIZE_IN_WORDS)); |
4682 | 4607 |
4683 // Get the arguments boilerplate from the current native context. | 4608 // Get the arguments boilerplate from the current native context. |
4684 __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | 4609 __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
4685 __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset)); | 4610 __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset)); |
4686 __ ldr(r4, MemOperand(r4, Context::SlotOffset( | 4611 __ ldr(r4, MemOperand(r4, Context::SlotOffset( |
4687 Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX))); | 4612 Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX))); |
4688 | 4613 |
4689 // Copy the JS object part. | 4614 // Copy the JS object part. |
4690 __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize); | 4615 __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize); |
4691 | 4616 |
4692 // Get the length (smi tagged) and set that as an in-object property too. | 4617 // Get the length (smi tagged) and set that as an in-object property too. |
4693 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); | 4618 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); |
4694 __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); | 4619 __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); |
4695 __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + | 4620 __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + |
4696 Heap::kArgumentsLengthIndex * kPointerSize)); | 4621 Heap::kArgumentsLengthIndex * kPointerSize)); |
4697 | 4622 |
4698 // If there are no actual arguments, we're done. | 4623 // If there are no actual arguments, we're done. |
4699 Label done; | 4624 Label done; |
4700 __ cmp(r1, Operand(0, RelocInfo::NONE)); | 4625 __ cmp(r1, Operand(0, RelocInfo::NONE)); |
4701 __ b(eq, &done); | 4626 __ b(eq, &done, Label::kNear); |
4702 | 4627 |
4703 // Get the parameters pointer from the stack. | 4628 // Get the parameters pointer from the stack. |
4704 __ ldr(r2, MemOperand(sp, 1 * kPointerSize)); | 4629 __ ldr(r2, MemOperand(sp, 1 * kPointerSize)); |
4705 | 4630 |
4706 // Set up the elements pointer in the allocated arguments object and | 4631 // Setup the elements pointer in the allocated arguments object and |
4707 // initialize the header in the elements fixed array. | 4632 // initialize the header in the elements fixed array. |
4708 __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict)); | 4633 __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict)); |
4709 __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset)); | 4634 __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset)); |
4710 __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex); | 4635 __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex); |
4711 __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset)); | 4636 __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset)); |
4712 __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset)); | 4637 __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset)); |
4713 // Untag the length for the loop. | 4638 // Untag the length for the loop. |
4714 __ mov(r1, Operand(r1, LSR, kSmiTagSize)); | 4639 __ lsr(r1, r1, Operand(kSmiTagSize)); |
4715 | 4640 |
4716 // Copy the fixed array slots. | 4641 // Copy the fixed array slots. |
4717 Label loop; | 4642 Label loop; |
4718 // Set up r4 to point to the first array slot. | 4643 // Setup r4 to point to the first array slot. |
4719 __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 4644 __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
4720 __ bind(&loop); | 4645 __ bind(&loop); |
4721 // Pre-decrement r2 with kPointerSize on each iteration. | 4646 // Pre-decrement r2 with kPointerSize on each iteration. |
4722 // Pre-decrement in order to skip receiver. | 4647 // Pre-decrement in order to skip receiver. |
4723 __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex)); | 4648 __ sub(r2, r2, Operand(kPointerSize)); |
| 4649 __ ldr(r3, MemOperand(r2)); |
4724 // Post-increment r4 with kPointerSize on each iteration. | 4650 // Post-increment r4 with kPointerSize on each iteration. |
4725 __ str(r3, MemOperand(r4, kPointerSize, PostIndex)); | 4651 __ str(r3, MemOperand(r4, kPointerSize, PostIndex)); |
4726 __ sub(r1, r1, Operand(1)); | 4652 __ dt(r1); |
4727 __ cmp(r1, Operand(0, RelocInfo::NONE)); | |
4728 __ b(ne, &loop); | 4653 __ b(ne, &loop); |
4729 | 4654 |
4730 // Return and remove the on-stack parameters. | 4655 // Return and remove the on-stack parameters. |
4731 __ bind(&done); | 4656 __ bind(&done); |
4732 __ add(sp, sp, Operand(3 * kPointerSize)); | 4657 __ add(sp, sp, Operand(3 * kPointerSize)); |
4733 __ Ret(); | 4658 __ Ret(); |
4734 | 4659 |
4735 // Do the runtime call to allocate the arguments object. | 4660 // Do the runtime call to allocate the arguments object. |
4736 __ bind(&runtime); | 4661 __ bind(&runtime); |
4737 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1); | 4662 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1); |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4776 ExternalReference::address_of_regexp_stack_memory_size(isolate); | 4701 ExternalReference::address_of_regexp_stack_memory_size(isolate); |
4777 __ mov(r0, Operand(address_of_regexp_stack_memory_size)); | 4702 __ mov(r0, Operand(address_of_regexp_stack_memory_size)); |
4778 __ ldr(r0, MemOperand(r0, 0)); | 4703 __ ldr(r0, MemOperand(r0, 0)); |
4779 __ cmp(r0, Operand(0)); | 4704 __ cmp(r0, Operand(0)); |
4780 __ b(eq, &runtime); | 4705 __ b(eq, &runtime); |
4781 | 4706 |
4782 // Check that the first argument is a JSRegExp object. | 4707 // Check that the first argument is a JSRegExp object. |
4783 __ ldr(r0, MemOperand(sp, kJSRegExpOffset)); | 4708 __ ldr(r0, MemOperand(sp, kJSRegExpOffset)); |
4784 STATIC_ASSERT(kSmiTag == 0); | 4709 STATIC_ASSERT(kSmiTag == 0); |
4785 __ JumpIfSmi(r0, &runtime); | 4710 __ JumpIfSmi(r0, &runtime); |
4786 __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE); | 4711 __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE, eq); |
4787 __ b(ne, &runtime); | 4712 __ b(ne, &runtime); |
4788 | 4713 |
4789 // Check that the RegExp has been compiled (data contains a fixed array). | 4714 // Check that the RegExp has been compiled (data contains a fixed array). |
4790 __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset)); | 4715 __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset)); |
4791 if (FLAG_debug_code) { | 4716 if (FLAG_debug_code) { |
4792 __ tst(regexp_data, Operand(kSmiTagMask)); | 4717 __ tst(regexp_data, Operand(kSmiTagMask)); |
4793 __ Check(ne, "Unexpected type for RegExp data, FixedArray expected"); | 4718 __ Check(ne, "Unexpected type for RegExp data, FixedArray expected"); |
4794 __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE); | 4719 __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE, eq); |
4795 __ Check(eq, "Unexpected type for RegExp data, FixedArray expected"); | 4720 __ Check(eq, "Unexpected type for RegExp data, FixedArray expected"); |
4796 } | 4721 } |
4797 | 4722 |
4798 // regexp_data: RegExp data (FixedArray) | 4723 // regexp_data: RegExp data (FixedArray) |
4799 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. | 4724 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. |
4800 __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); | 4725 __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); |
4801 __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP))); | 4726 __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP))); |
4802 __ b(ne, &runtime); | 4727 __ b(ne, &runtime); |
4803 | 4728 |
4804 // regexp_data: RegExp data (FixedArray) | 4729 // regexp_data: RegExp data (FixedArray) |
4805 // Check that the number of captures fit in the static offsets vector buffer. | 4730 // Check that the number of captures fit in the static offsets vector buffer. |
4806 __ ldr(r2, | 4731 __ ldr(r2, |
4807 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); | 4732 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); |
4808 // Calculate number of capture registers (number_of_captures + 1) * 2. This | 4733 // Calculate number of capture registers (number_of_captures + 1) * 2. This |
4809 // uses the asumption that smis are 2 * their untagged value. | 4734 // uses the asumption that smis are 2 * their untagged value. |
4810 STATIC_ASSERT(kSmiTag == 0); | 4735 STATIC_ASSERT(kSmiTag == 0); |
4811 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); | 4736 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); |
4812 __ add(r2, r2, Operand(2)); // r2 was a smi. | 4737 __ add(r2, r2, Operand(2)); // r2 was a smi. |
4813 // Check that the static offsets vector buffer is large enough. | 4738 // Check that the static offsets vector buffer is large enough. |
4814 __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize)); | 4739 __ cmphi(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize)); |
4815 __ b(hi, &runtime); | 4740 __ bt(&runtime); |
4816 | 4741 |
4817 // r2: Number of capture registers | 4742 // r2: Number of capture registers |
4818 // regexp_data: RegExp data (FixedArray) | 4743 // regexp_data: RegExp data (FixedArray) |
4819 // Check that the second argument is a string. | 4744 // Check that the second argument is a string. |
4820 __ ldr(subject, MemOperand(sp, kSubjectOffset)); | 4745 __ ldr(subject, MemOperand(sp, kSubjectOffset)); |
4821 __ JumpIfSmi(subject, &runtime); | 4746 __ JumpIfSmi(subject, &runtime); |
4822 Condition is_string = masm->IsObjectStringType(subject, r0); | 4747 Condition is_string = masm->IsObjectStringType(subject, r0); |
4823 __ b(NegateCondition(is_string), &runtime); | 4748 __ b(NegateCondition(is_string), &runtime); |
4824 // Get the length of the string to r3. | 4749 // Get the length of the string to r3. |
4825 __ ldr(r3, FieldMemOperand(subject, String::kLengthOffset)); | 4750 __ ldr(r3, FieldMemOperand(subject, String::kLengthOffset)); |
4826 | 4751 |
4827 // r2: Number of capture registers | 4752 // r2: Number of capture registers |
4828 // r3: Length of subject string as a smi | 4753 // r3: Length of subject string as a smi |
4829 // subject: Subject string | 4754 // subject: Subject string |
4830 // regexp_data: RegExp data (FixedArray) | 4755 // regexp_data: RegExp data (FixedArray) |
4831 // Check that the third argument is a positive smi less than the subject | 4756 // Check that the third argument is a positive smi less than the subject |
4832 // string length. A negative value will be greater (unsigned comparison). | 4757 // string length. A negative value will be greater (unsigned comparison). |
4833 __ ldr(r0, MemOperand(sp, kPreviousIndexOffset)); | 4758 __ ldr(r0, MemOperand(sp, kPreviousIndexOffset)); |
4834 __ JumpIfNotSmi(r0, &runtime); | 4759 __ JumpIfNotSmi(r0, &runtime); |
4835 __ cmp(r3, Operand(r0)); | 4760 __ cmphi(r3, r0); |
4836 __ b(ls, &runtime); | 4761 __ bf(&runtime); |
4837 | 4762 |
4838 // r2: Number of capture registers | 4763 // r2: Number of capture registers |
4839 // subject: Subject string | 4764 // subject: Subject string |
4840 // regexp_data: RegExp data (FixedArray) | 4765 // regexp_data: RegExp data (FixedArray) |
4841 // Check that the fourth object is a JSArray object. | 4766 // Check that the fourth object is a JSArray object. |
4842 __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); | 4767 __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); |
4843 __ JumpIfSmi(r0, &runtime); | 4768 __ JumpIfSmi(r0, &runtime); |
4844 __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE); | 4769 __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE, eq); |
4845 __ b(ne, &runtime); | 4770 __ b(ne, &runtime); |
4846 // Check that the JSArray is in fast case. | 4771 // Check that the JSArray is in fast case. |
4847 __ ldr(last_match_info_elements, | 4772 __ ldr(last_match_info_elements, |
4848 FieldMemOperand(r0, JSArray::kElementsOffset)); | 4773 FieldMemOperand(r0, JSArray::kElementsOffset)); |
4849 __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); | 4774 __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); |
4850 __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex); | 4775 __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex); |
4851 __ b(ne, &runtime); | 4776 __ b(ne, &runtime); |
4852 // Check that the last match info has space for the capture registers and the | 4777 // Check that the last match info has space for the capture registers and the |
4853 // additional information. | 4778 // additional information. |
4854 __ ldr(r0, | 4779 __ ldr(r0, |
4855 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); | 4780 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); |
4856 __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead)); | 4781 __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead)); |
4857 __ cmp(r2, Operand(r0, ASR, kSmiTagSize)); | 4782 __ asr(ip, r0, Operand(kSmiTagSize)); |
4858 __ b(gt, &runtime); | 4783 __ cmpgt(r2, ip); |
| 4784 __ bt(&runtime); |
4859 | 4785 |
4860 // Reset offset for possibly sliced string. | 4786 // Reset offset for possibly sliced string. |
4861 __ mov(r9, Operand(0)); | 4787 __ mov(r9, Operand(0)); |
4862 // subject: Subject string | 4788 // subject: Subject string |
4863 // regexp_data: RegExp data (FixedArray) | 4789 // regexp_data: RegExp data (FixedArray) |
4864 // Check the representation and encoding of the subject string. | 4790 // Check the representation and encoding of the subject string. |
4865 Label seq_string; | 4791 Label seq_string; |
4866 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); | 4792 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); |
4867 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); | 4793 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); |
4868 // First check for flat string. None of the following string type tests will | 4794 // First check for flat string. None of the following string type tests will |
4869 // succeed if subject is not a string or a short external string. | 4795 // succeed if subject is not a string or a short external string. |
4870 __ and_(r1, | 4796 __ land(r1, |
4871 r0, | 4797 r0, |
4872 Operand(kIsNotStringMask | | 4798 Operand(kIsNotStringMask | |
4873 kStringRepresentationMask | | 4799 kStringRepresentationMask | |
4874 kShortExternalStringMask), | 4800 kShortExternalStringMask)); |
4875 SetCC); | 4801 __ tst(r1, r1); |
4876 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0); | 4802 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0); |
4877 __ b(eq, &seq_string); | 4803 __ b(eq, &seq_string); |
4878 | 4804 |
4879 // subject: Subject string | 4805 // subject: Subject string |
4880 // regexp_data: RegExp data (FixedArray) | 4806 // regexp_data: RegExp data (FixedArray) |
4881 // r1: whether subject is a string and if yes, its string representation | |
4882 // Check for flat cons string or sliced string. | 4807 // Check for flat cons string or sliced string. |
4883 // A flat cons string is a cons string where the second part is the empty | 4808 // A flat cons string is a cons string where the second part is the empty |
4884 // string. In that case the subject string is just the first part of the cons | 4809 // string. In that case the subject string is just the first part of the cons |
4885 // string. Also in this case the first part of the cons string is known to be | 4810 // string. Also in this case the first part of the cons string is known to be |
4886 // a sequential string or an external string. | 4811 // a sequential string or an external string. |
4887 // In the case of a sliced string its offset has to be taken into account. | 4812 // In the case of a sliced string its offset has to be taken into account. |
4888 Label cons_string, external_string, check_encoding; | 4813 Label cons_string, external_string, check_encoding; |
4889 STATIC_ASSERT(kConsStringTag < kExternalStringTag); | 4814 STATIC_ASSERT(kConsStringTag < kExternalStringTag); |
4890 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); | 4815 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); |
4891 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag); | 4816 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag); |
4892 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag); | 4817 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag); |
4893 __ cmp(r1, Operand(kExternalStringTag)); | 4818 __ cmpge(r1, Operand(kExternalStringTag)); |
4894 __ b(lt, &cons_string); | 4819 __ bf(&cons_string); |
4895 __ b(eq, &external_string); | 4820 __ cmpeq(r1, Operand(kExternalStringTag)); |
| 4821 __ bt(&external_string); |
4896 | 4822 |
4897 // Catch non-string subject or short external string. | 4823 // Catch non-string subject or short external string. |
4898 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0); | 4824 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0); |
4899 __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask)); | 4825 __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask)); |
4900 __ b(ne, &runtime); | 4826 __ b(ne, &runtime); |
4901 | 4827 |
4902 // String is sliced. | 4828 // String is sliced. |
4903 __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset)); | 4829 __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset)); |
4904 __ mov(r9, Operand(r9, ASR, kSmiTagSize)); | 4830 __ asr(r9, r9, Operand(kSmiTagSize)); |
4905 __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset)); | 4831 __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset)); |
4906 // r9: offset of sliced string, smi-tagged. | 4832 // r9: offset of sliced string, smi-tagged. |
4907 __ jmp(&check_encoding); | 4833 __ jmp(&check_encoding); |
4908 // String is a cons string, check whether it is flat. | 4834 // String is a cons string, check whether it is flat. |
4909 __ bind(&cons_string); | 4835 __ bind(&cons_string); |
4910 __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset)); | 4836 __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset)); |
4911 __ CompareRoot(r0, Heap::kEmptyStringRootIndex); | 4837 __ CompareRoot(r0, Heap::kEmptyStringRootIndex); |
4912 __ b(ne, &runtime); | 4838 __ b(ne, &runtime); |
4913 __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); | 4839 __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); |
4914 // Is first part of cons or parent of slice a flat string? | 4840 // Is first part of cons or parent of slice a flat string? |
4915 __ bind(&check_encoding); | 4841 __ bind(&check_encoding); |
4916 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); | 4842 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); |
4917 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); | 4843 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); |
4918 STATIC_ASSERT(kSeqStringTag == 0); | 4844 STATIC_ASSERT(kSeqStringTag == 0); |
4919 __ tst(r0, Operand(kStringRepresentationMask)); | 4845 __ tst(r0, Operand(kStringRepresentationMask)); |
4920 __ b(ne, &external_string); | 4846 __ b(ne, &external_string); |
4921 | 4847 |
4922 __ bind(&seq_string); | 4848 __ bind(&seq_string); |
4923 // subject: Subject string | 4849 // subject: Subject string |
4924 // regexp_data: RegExp data (FixedArray) | 4850 // regexp_data: RegExp data (FixedArray) |
4925 // r0: Instance type of subject string | 4851 // r0: Instance type of subject string |
4926 STATIC_ASSERT(4 == kAsciiStringTag); | 4852 STATIC_ASSERT(4 == kAsciiStringTag); |
4927 STATIC_ASSERT(kTwoByteStringTag == 0); | 4853 STATIC_ASSERT(kTwoByteStringTag == 0); |
4928 // Find the code object based on the assumptions above. | 4854 // Find the code object based on the assumptions above. |
4929 __ and_(r0, r0, Operand(kStringEncodingMask)); | 4855 __ land(r0, r0, Operand(kStringEncodingMask)); |
4930 __ mov(r3, Operand(r0, ASR, 2), SetCC); | 4856 __ asr(r3, r0, Operand(2)); |
4931 __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne); | 4857 __ tst(r3, r3); |
4932 __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq); | 4858 Label skip_true, skip_end; |
| 4859 __ bt_near(&skip_true); |
| 4860 __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset)); |
| 4861 __ b_near(&skip_end); |
| 4862 __ bind(&skip_true); |
| 4863 __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset)); |
| 4864 __ bind(&skip_end); |
4933 | 4865 |
4934 // Check that the irregexp code has been generated for the actual string | 4866 // Check that the irregexp code has been generated for the actual string |
4935 // encoding. If it has, the field contains a code object otherwise it contains | 4867 // encoding. If it has, the field contains a code object otherwise it contains |
4936 // a smi (code flushing support). | 4868 // a smi (code flushing support). |
4937 __ JumpIfSmi(r7, &runtime); | 4869 __ JumpIfSmi(r7, &runtime); |
4938 | 4870 |
4939 // r3: encoding of subject string (1 if ASCII, 0 if two_byte); | 4871 // r3: encoding of subject string (1 if ASCII, 0 if two_byte); |
4940 // r7: code | 4872 // r7: code |
4941 // subject: Subject string | 4873 // subject: Subject string |
4942 // regexp_data: RegExp data (FixedArray) | 4874 // regexp_data: RegExp data (FixedArray) |
4943 // Load used arguments before starting to push arguments for call to native | 4875 // Load used arguments before starting to push arguments for call to native |
4944 // RegExp code to avoid handling changing stack height. | 4876 // RegExp code to avoid handling changing stack height. |
4945 __ ldr(r1, MemOperand(sp, kPreviousIndexOffset)); | 4877 __ ldr(r1, MemOperand(sp, kPreviousIndexOffset)); |
4946 __ mov(r1, Operand(r1, ASR, kSmiTagSize)); | 4878 __ asr(r1, r1, Operand(kSmiTagSize)); |
4947 | 4879 |
4948 // r1: previous index | 4880 // r1: previous index |
4949 // r3: encoding of subject string (1 if ASCII, 0 if two_byte); | 4881 // r3: encoding of subject string (1 if ASCII, 0 if two_byte); |
4950 // r7: code | 4882 // r7: code |
4951 // subject: Subject string | 4883 // subject: Subject string |
4952 // regexp_data: RegExp data (FixedArray) | 4884 // regexp_data: RegExp data (FixedArray) |
4953 // All checks done. Now push arguments for native regexp code. | 4885 // All checks done. Now push arguments for native regexp code. |
4954 __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2); | 4886 __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2); |
4955 | 4887 |
| 4888 // Save r5-r7 as they are going to be used afterward in the C code |
| 4889 // r4 is restored by a load on the right place in the same frame |
| 4890 __ Push(r5, r6, r7); |
| 4891 |
4956 // Isolates: note we add an additional parameter here (isolate pointer). | 4892 // Isolates: note we add an additional parameter here (isolate pointer). |
4957 const int kRegExpExecuteArguments = 9; | 4893 const int kRegExpExecuteArguments = 9; |
4958 const int kParameterRegisters = 4; | 4894 const int kParameterRegisters = 4; |
4959 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); | 4895 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); |
4960 | 4896 |
4961 // Stack pointer now points to cell where return address is to be written. | 4897 // Stack pointer now points to cell where return address is to be written. |
4962 // Arguments are before that on the stack or in registers. | 4898 // Arguments are before that on the stack or in registers. |
4963 | 4899 |
4964 // Argument 9 (sp[20]): Pass current isolate address. | 4900 // Argument 9 (sp[20]): Pass current isolate address. |
4965 __ mov(r0, Operand(ExternalReference::isolate_address())); | 4901 __ mov(r0, Operand(ExternalReference::isolate_address())); |
4966 __ str(r0, MemOperand(sp, 5 * kPointerSize)); | 4902 __ str(r0, MemOperand(sp, 5 * kPointerSize)); |
4967 | 4903 |
4968 // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript. | 4904 // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript. |
4969 __ mov(r0, Operand(1)); | 4905 __ mov(r0, Operand(1)); |
4970 __ str(r0, MemOperand(sp, 4 * kPointerSize)); | 4906 __ str(r0, MemOperand(sp, 4 * kPointerSize)); |
4971 | 4907 |
4972 // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area. | 4908 // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area. |
4973 __ mov(r0, Operand(address_of_regexp_stack_memory_address)); | 4909 __ mov(r0, Operand(address_of_regexp_stack_memory_address)); |
4974 __ ldr(r0, MemOperand(r0, 0)); | 4910 __ ldr(r0, MemOperand(r0, 0)); |
4975 __ mov(r2, Operand(address_of_regexp_stack_memory_size)); | 4911 __ mov(r2, Operand(address_of_regexp_stack_memory_size)); |
4976 __ ldr(r2, MemOperand(r2, 0)); | 4912 __ ldr(r2, MemOperand(r2, 0)); |
4977 __ add(r0, r0, Operand(r2)); | 4913 __ add(r0, r0, r2); |
4978 __ str(r0, MemOperand(sp, 3 * kPointerSize)); | 4914 __ str(r0, MemOperand(sp, 3 * kPointerSize)); |
4979 | 4915 |
4980 // Argument 6: Set the number of capture registers to zero to force global | 4916 // Argument 6: Set the number of capture registers to zero to force global |
4981 // regexps to behave as non-global. This does not affect non-global regexps. | 4917 // regexps to behave as non-global. This does not affect non-global regexps. |
4982 __ mov(r0, Operand(0)); | 4918 __ mov(r0, Operand(0)); |
4983 __ str(r0, MemOperand(sp, 2 * kPointerSize)); | 4919 __ str(r0, MemOperand(sp, 2 * kPointerSize)); |
4984 | 4920 |
4985 // Argument 5 (sp[4]): static offsets vector buffer. | 4921 // Argument 5 (sp[4]): static offsets vector buffer. |
4986 __ mov(r0, | 4922 __ mov(r0, |
4987 Operand(ExternalReference::address_of_static_offsets_vector(isolate))); | 4923 Operand(ExternalReference::address_of_static_offsets_vector(isolate))); |
4988 __ str(r0, MemOperand(sp, 1 * kPointerSize)); | 4924 __ str(r0, MemOperand(sp, 1 * kPointerSize)); |
4989 | 4925 |
4990 // For arguments 4 and 3 get string length, calculate start of string data and | 4926 // For arguments 4 and 3 get string length, calculate start of string data and |
4991 // calculate the shift of the index (0 for ASCII and 1 for two byte). | 4927 // calculate the shift of the index (0 for ASCII and 1 for two byte). |
4992 __ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag)); | 4928 __ add(sh4_r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag)); |
4993 __ eor(r3, r3, Operand(1)); | 4929 __ eor(r3, r3, Operand(1)); |
4994 // Load the length from the original subject string from the previous stack | 4930 // Load the length from the original subject string from the previous stack |
4995 // frame. Therefore we have to use fp, which points exactly to two pointer | 4931 // frame. Therefore we have to use fp, which points exactly to two pointer |
4996 // sizes below the previous sp. (Because creating a new stack frame pushes | 4932 // sizes below the previous sp. (Because creating a new stack frame pushes |
4997 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.) | 4933 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.) We |
4998 __ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize)); | 4934 // also have to take into account the 3 registers pushed on the stack |
| 4935 // [r5, r7] |
| 4936 __ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize + |
| 4937 3 * kPointerSize)); |
4999 // If slice offset is not 0, load the length from the original sliced string. | 4938 // If slice offset is not 0, load the length from the original sliced string. |
5000 // Argument 4, r3: End of string data | 4939 // Argument 4, r3: End of string data |
5001 // Argument 3, r2: Start of string data | 4940 // Argument 3, r2: Start of string data |
5002 // Prepare start and end index of the input. | 4941 // Prepare start and end index of the input. |
5003 __ add(r9, r8, Operand(r9, LSL, r3)); | 4942 __ lsl(r9, r9, r3); |
5004 __ add(r2, r9, Operand(r1, LSL, r3)); | 4943 __ add(r9, sh4_r8, r9); |
| 4944 __ lsl(ip, r1, r3); |
| 4945 __ add(r2, r9, ip); |
5005 | 4946 |
5006 __ ldr(r8, FieldMemOperand(subject, String::kLengthOffset)); | 4947 __ ldr(sh4_r8, FieldMemOperand(subject, String::kLengthOffset)); |
5007 __ mov(r8, Operand(r8, ASR, kSmiTagSize)); | 4948 __ asr(sh4_r8, sh4_r8, Operand(kSmiTagSize)); |
5008 __ add(r3, r9, Operand(r8, LSL, r3)); | 4949 __ lsl(r3, sh4_r8, r3); |
| 4950 __ add(r3, r9, r3); |
5009 | 4951 |
5010 // Argument 2 (r1): Previous index. | 4952 // Argument 2 (r1): Previous index. |
5011 // Already there | 4953 // Already there |
5012 | 4954 |
5013 // Argument 1 (r0): Subject string. | 4955 // Argument 1 (r0): Subject string. |
5014 __ mov(r0, subject); | 4956 __ mov(r0, subject); |
5015 | 4957 |
| 4958 __ mov(r4, r0); |
| 4959 __ mov(r5, r1); |
| 4960 __ mov(r6, r2); |
| 4961 |
5016 // Locate the code entry and call it. | 4962 // Locate the code entry and call it. |
5017 __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag)); | 4963 __ add(r0, r7, Operand(Code::kHeaderSize - kHeapObjectTag)); |
5018 DirectCEntryStub stub; | 4964 __ mov(r7, r3); |
5019 stub.GenerateCall(masm, r7); | |
5020 | 4965 |
| 4966 DirectCEntryStub stub(r2); |
| 4967 stub.GenerateCall(masm, r0, r3); |
| 4968 |
| 4969 // Get back the subject from the previous frame: r4 will not be scratched by |
| 4970 // a call to LeaveExitFrame |
| 4971 __ ldr(r4, MemOperand(fp, kSubjectOffset + 2 * kPointerSize + |
| 4972 3 * kPointerSize)); |
5021 __ LeaveExitFrame(false, no_reg); | 4973 __ LeaveExitFrame(false, no_reg); |
| 4974 __ Pop(r5, r6, r7); |
5022 | 4975 |
5023 // r0: result | 4976 // r0: result |
5024 // subject: subject string (callee saved) | 4977 // subject: subject string (callee saved) |
5025 // regexp_data: RegExp data (callee saved) | 4978 // regexp_data: RegExp data (callee saved) |
5026 // last_match_info_elements: Last match info elements (callee saved) | 4979 // last_match_info_elements: Last match info elements (callee saved) |
5027 | 4980 |
5028 // Check the result. | 4981 // Check the result. |
5029 Label success; | 4982 Label success; |
5030 | 4983 |
5031 __ cmp(r0, Operand(1)); | 4984 __ cmp(r0, Operand(1)); |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5073 __ ldr(r1, | 5026 __ ldr(r1, |
5074 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); | 5027 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); |
5075 // Calculate number of capture registers (number_of_captures + 1) * 2. | 5028 // Calculate number of capture registers (number_of_captures + 1) * 2. |
5076 STATIC_ASSERT(kSmiTag == 0); | 5029 STATIC_ASSERT(kSmiTag == 0); |
5077 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); | 5030 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); |
5078 __ add(r1, r1, Operand(2)); // r1 was a smi. | 5031 __ add(r1, r1, Operand(2)); // r1 was a smi. |
5079 | 5032 |
5080 // r1: number of capture registers | 5033 // r1: number of capture registers |
5081 // r4: subject string | 5034 // r4: subject string |
5082 // Store the capture count. | 5035 // Store the capture count. |
5083 __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize)); // To smi. | 5036 __ lsl(r2, r1, Operand(kSmiTagSize + kSmiShiftSize)); // To smi. |
5084 __ str(r2, FieldMemOperand(last_match_info_elements, | 5037 __ str(r2, FieldMemOperand(last_match_info_elements, |
5085 RegExpImpl::kLastCaptureCountOffset)); | 5038 RegExpImpl::kLastCaptureCountOffset)); |
5086 // Store last subject and last input. | 5039 // Store last subject and last input. |
5087 __ str(subject, | 5040 __ str(subject, |
5088 FieldMemOperand(last_match_info_elements, | 5041 FieldMemOperand(last_match_info_elements, |
5089 RegExpImpl::kLastSubjectOffset)); | 5042 RegExpImpl::kLastSubjectOffset)); |
5090 __ mov(r2, subject); | 5043 __ mov(r2, subject); |
5091 __ RecordWriteField(last_match_info_elements, | 5044 __ RecordWriteField(last_match_info_elements, |
5092 RegExpImpl::kLastSubjectOffset, | 5045 RegExpImpl::kLastSubjectOffset, |
5093 r2, | 5046 r2, |
(...skipping 17 matching lines...) Expand all Loading... |
5111 | 5064 |
5112 // r1: number of capture registers | 5065 // r1: number of capture registers |
5113 // r2: offsets vector | 5066 // r2: offsets vector |
5114 Label next_capture, done; | 5067 Label next_capture, done; |
5115 // Capture register counter starts from number of capture registers and | 5068 // Capture register counter starts from number of capture registers and |
5116 // counts down until wraping after zero. | 5069 // counts down until wraping after zero. |
5117 __ add(r0, | 5070 __ add(r0, |
5118 last_match_info_elements, | 5071 last_match_info_elements, |
5119 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag)); | 5072 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag)); |
5120 __ bind(&next_capture); | 5073 __ bind(&next_capture); |
5121 __ sub(r1, r1, Operand(1), SetCC); | 5074 __ sub(r1, r1, Operand(1)); |
5122 __ b(mi, &done); | 5075 __ cmpge(r1, Operand(0)); |
| 5076 __ bf(&done); |
5123 // Read the value from the static offsets vector buffer. | 5077 // Read the value from the static offsets vector buffer. |
5124 __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex)); | 5078 __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex)); |
5125 // Store the smi value in the last match info. | 5079 // Store the smi value in the last match info. |
5126 __ mov(r3, Operand(r3, LSL, kSmiTagSize)); | 5080 __ lsl(r3, r3, Operand(kSmiTagSize)); |
5127 __ str(r3, MemOperand(r0, kPointerSize, PostIndex)); | 5081 __ str(r3, MemOperand(r0, kPointerSize, PostIndex)); |
5128 __ jmp(&next_capture); | 5082 __ jmp(&next_capture); |
5129 __ bind(&done); | 5083 __ bind(&done); |
5130 | 5084 |
5131 // Return last match info. | 5085 // Return last match info. |
5132 __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); | 5086 __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); |
5133 __ add(sp, sp, Operand(4 * kPointerSize)); | 5087 __ add(sp, sp, Operand(4 * kPointerSize)); |
5134 __ Ret(); | 5088 __ Ret(); |
5135 | 5089 |
5136 // External string. Short external strings have already been ruled out. | 5090 // External string. Short external strings have already been ruled out. |
(...skipping 26 matching lines...) Expand all Loading... |
5163 void RegExpConstructResultStub::Generate(MacroAssembler* masm) { | 5117 void RegExpConstructResultStub::Generate(MacroAssembler* masm) { |
5164 const int kMaxInlineLength = 100; | 5118 const int kMaxInlineLength = 100; |
5165 Label slowcase; | 5119 Label slowcase; |
5166 Label done; | 5120 Label done; |
5167 Factory* factory = masm->isolate()->factory(); | 5121 Factory* factory = masm->isolate()->factory(); |
5168 | 5122 |
5169 __ ldr(r1, MemOperand(sp, kPointerSize * 2)); | 5123 __ ldr(r1, MemOperand(sp, kPointerSize * 2)); |
5170 STATIC_ASSERT(kSmiTag == 0); | 5124 STATIC_ASSERT(kSmiTag == 0); |
5171 STATIC_ASSERT(kSmiTagSize == 1); | 5125 STATIC_ASSERT(kSmiTagSize == 1); |
5172 __ JumpIfNotSmi(r1, &slowcase); | 5126 __ JumpIfNotSmi(r1, &slowcase); |
5173 __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength))); | 5127 __ cmphi(r1, Operand(Smi::FromInt(kMaxInlineLength))); |
5174 __ b(hi, &slowcase); | 5128 __ b(t, &slowcase); |
5175 // Smi-tagging is equivalent to multiplying by 2. | 5129 // Smi-tagging is equivalent to multiplying by 2. |
5176 // Allocate RegExpResult followed by FixedArray with size in ebx. | 5130 // Allocate RegExpResult followed by FixedArray with size in ebx. |
5177 // JSArray: [Map][empty properties][Elements][Length-smi][index][input] | 5131 // JSArray: [Map][empty properties][Elements][Length-smi][index][input] |
5178 // Elements: [Map][Length][..elements..] | 5132 // Elements: [Map][Length][..elements..] |
5179 // Size of JSArray with two in-object properties and the header of a | 5133 // Size of JSArray with two in-object properties and the header of a |
5180 // FixedArray. | 5134 // FixedArray. |
5181 int objects_size = | 5135 int objects_size = |
5182 (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize; | 5136 (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize; |
5183 __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize)); | 5137 __ lsr(r5, r1, Operand(kSmiTagSize + kSmiShiftSize)); |
5184 __ add(r2, r5, Operand(objects_size)); | 5138 __ add(r2, r5, Operand(objects_size)); |
5185 __ AllocateInNewSpace( | 5139 __ AllocateInNewSpace( |
5186 r2, // In: Size, in words. | 5140 r2, // In: Size, in words. |
5187 r0, // Out: Start of allocation (tagged). | 5141 r0, // Out: Start of allocation (tagged). |
5188 r3, // Scratch register. | 5142 r3, // Scratch register. |
5189 r4, // Scratch register. | 5143 r4, // Scratch register. |
5190 &slowcase, | 5144 &slowcase, |
5191 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); | 5145 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); |
5192 // r0: Start of allocated area, object-tagged. | 5146 // r0: Start of allocated area, object-tagged. |
5193 // r1: Number of elements in array, as smi. | 5147 // r1: Number of elements in array, as smi. |
(...skipping 22 matching lines...) Expand all Loading... |
5216 | 5170 |
5217 // Fill out the elements FixedArray. | 5171 // Fill out the elements FixedArray. |
5218 // r0: JSArray, tagged. | 5172 // r0: JSArray, tagged. |
5219 // r3: FixedArray, tagged. | 5173 // r3: FixedArray, tagged. |
5220 // r5: Number of elements in array, untagged. | 5174 // r5: Number of elements in array, untagged. |
5221 | 5175 |
5222 // Set map. | 5176 // Set map. |
5223 __ mov(r2, Operand(factory->fixed_array_map())); | 5177 __ mov(r2, Operand(factory->fixed_array_map())); |
5224 __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); | 5178 __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); |
5225 // Set FixedArray length. | 5179 // Set FixedArray length. |
5226 __ mov(r6, Operand(r5, LSL, kSmiTagSize)); | 5180 __ lsl(r6, r5, Operand(kSmiTagSize)); |
5227 __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset)); | 5181 __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset)); |
5228 // Fill contents of fixed-array with undefined. | 5182 // Fill contents of fixed-array with undefined. |
5229 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); | 5183 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); |
5230 __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 5184 __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
5231 // Fill fixed array elements with undefined. | 5185 // Fill fixed array elements with undefined. |
5232 // r0: JSArray, tagged. | 5186 // r0: JSArray, tagged. |
5233 // r2: undefined. | 5187 // r2: undefined. |
5234 // r3: Start of elements in FixedArray. | 5188 // r3: Start of elements in FixedArray. |
5235 // r5: Number of elements to fill. | 5189 // r5: Number of elements to fill. |
5236 Label loop; | 5190 Label loop; |
5237 __ cmp(r5, Operand(0)); | 5191 __ cmpgt(r5, Operand(0)); |
5238 __ bind(&loop); | 5192 __ bind(&loop); |
5239 __ b(le, &done); // Jump if r5 is negative or zero. | 5193 __ b(f, &done, Label::kNear); // Jump if r1 is negative or zero. |
5240 __ sub(r5, r5, Operand(1), SetCC); | 5194 __ sub(r5, r5, Operand(1)); |
5241 __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2)); | 5195 __ lsl(ip, r5, Operand(kPointerSizeLog2)); |
| 5196 __ str(r2, MemOperand(r3, ip)); |
| 5197 __ cmpgt(r5, Operand(0)); |
5242 __ jmp(&loop); | 5198 __ jmp(&loop); |
5243 | 5199 |
5244 __ bind(&done); | 5200 __ bind(&done); |
5245 __ add(sp, sp, Operand(3 * kPointerSize)); | 5201 __ add(sp, sp, Operand(3 * kPointerSize)); |
5246 __ Ret(); | 5202 __ Ret(); |
5247 | 5203 |
5248 __ bind(&slowcase); | 5204 __ bind(&slowcase); |
5249 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1); | 5205 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1); |
5250 } | 5206 } |
5251 | 5207 |
(...skipping 16 matching lines...) Expand all Loading... |
5268 | 5224 |
5269 // A monomorphic cache hit or an already megamorphic state: invoke the | 5225 // A monomorphic cache hit or an already megamorphic state: invoke the |
5270 // function without changing the state. | 5226 // function without changing the state. |
5271 __ cmp(r3, r1); | 5227 __ cmp(r3, r1); |
5272 __ b(eq, &done); | 5228 __ b(eq, &done); |
5273 __ CompareRoot(r3, Heap::kUndefinedValueRootIndex); | 5229 __ CompareRoot(r3, Heap::kUndefinedValueRootIndex); |
5274 __ b(eq, &done); | 5230 __ b(eq, &done); |
5275 | 5231 |
5276 // A monomorphic miss (i.e, here the cache is not uninitialized) goes | 5232 // A monomorphic miss (i.e, here the cache is not uninitialized) goes |
5277 // megamorphic. | 5233 // megamorphic. |
| 5234 Label skip; |
5278 __ CompareRoot(r3, Heap::kTheHoleValueRootIndex); | 5235 __ CompareRoot(r3, Heap::kTheHoleValueRootIndex); |
5279 // MegamorphicSentinel is an immortal immovable object (undefined) so no | 5236 // MegamorphicSentinel is an immortal immovable object (undefined) so no |
5280 // write-barrier is needed. | 5237 // write-barrier is needed. |
5281 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex, ne); | 5238 __ bt_near(&skip); |
5282 __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset), ne); | 5239 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
| 5240 __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset)); |
| 5241 __ jmp(&done); |
5283 | 5242 |
5284 // An uninitialized cache is patched with the function. | 5243 // An uninitialized cache is patched with the function. |
5285 __ str(r1, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset), eq); | 5244 __ bind(&skip); |
| 5245 __ str(r1, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset)); |
5286 // No need for a write barrier here - cells are rescanned. | 5246 // No need for a write barrier here - cells are rescanned. |
5287 | 5247 |
5288 __ bind(&done); | 5248 __ bind(&done); |
5289 } | 5249 } |
5290 | 5250 |
5291 | 5251 |
5292 void CallFunctionStub::Generate(MacroAssembler* masm) { | 5252 void CallFunctionStub::Generate(MacroAssembler* masm) { |
5293 // r1 : the function to call | 5253 // r1 : the function to call |
5294 // r2 : cache cell for call target | 5254 // r2 : cache cell for call target |
5295 Label slow, non_function; | 5255 Label slow, non_function; |
5296 | 5256 |
5297 // The receiver might implicitly be the global object. This is | 5257 // The receiver might implicitly be the global object. This is |
5298 // indicated by passing the hole as the receiver to the call | 5258 // indicated by passing the hole as the receiver to the call |
5299 // function stub. | 5259 // function stub. |
5300 if (ReceiverMightBeImplicit()) { | 5260 if (ReceiverMightBeImplicit()) { |
5301 Label call; | 5261 Label call; |
5302 // Get the receiver from the stack. | 5262 // Get the receiver from the stack. |
5303 // function, receiver [, arguments] | 5263 // function, receiver [, arguments] |
5304 __ ldr(r4, MemOperand(sp, argc_ * kPointerSize)); | 5264 __ ldr(r4, MemOperand(sp, argc_ * kPointerSize)); |
5305 // Call as function is indicated with the hole. | 5265 // Call as function is indicated with the hole. |
5306 __ CompareRoot(r4, Heap::kTheHoleValueRootIndex); | 5266 __ CompareRoot(r4, Heap::kTheHoleValueRootIndex); |
5307 __ b(ne, &call); | 5267 __ b(ne, &call, Label::kNear); |
5308 // Patch the receiver on the stack with the global receiver object. | 5268 // Patch the receiver on the stack with the global receiver object. |
5309 __ ldr(r3, | 5269 __ ldr(r3, |
5310 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | 5270 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
5311 __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalReceiverOffset)); | 5271 __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalReceiverOffset)); |
5312 __ str(r3, MemOperand(sp, argc_ * kPointerSize)); | 5272 __ str(r3, MemOperand(sp, argc_ * kPointerSize)); |
5313 __ bind(&call); | 5273 __ bind(&call); |
5314 } | 5274 } |
5315 | 5275 |
5316 // Check that the function is really a JavaScript function. | 5276 // Check that the function is really a JavaScript function. |
5317 // r1: pushed function (to be verified) | 5277 // r1: pushed function (to be verified) |
5318 __ JumpIfSmi(r1, &non_function); | 5278 __ JumpIfSmi(r1, &non_function); |
5319 // Get the map of the function object. | 5279 // Get the map of the function object. |
5320 __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE); | 5280 __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE, eq); |
5321 __ b(ne, &slow); | 5281 __ b(ne, &slow); |
5322 | 5282 |
5323 if (RecordCallTarget()) { | 5283 if (RecordCallTarget()) { |
5324 GenerateRecordCallTarget(masm); | 5284 GenerateRecordCallTarget(masm); |
5325 } | 5285 } |
5326 | 5286 |
5327 // Fast-case: Invoke the function now. | 5287 // Fast-case: Invoke the function now. |
5328 // r1: pushed function | 5288 // r1: pushed function |
5329 ParameterCount actual(argc_); | 5289 ParameterCount actual(argc_); |
5330 | 5290 |
5331 if (ReceiverMightBeImplicit()) { | 5291 if (ReceiverMightBeImplicit()) { |
5332 Label call_as_function; | 5292 Label call_as_function; |
5333 __ CompareRoot(r4, Heap::kTheHoleValueRootIndex); | 5293 __ CompareRoot(r4, Heap::kTheHoleValueRootIndex); |
5334 __ b(eq, &call_as_function); | 5294 __ b(eq, &call_as_function, Label::kNear); |
5335 __ InvokeFunction(r1, | 5295 __ InvokeFunction(r1, |
5336 actual, | 5296 actual, |
5337 JUMP_FUNCTION, | 5297 JUMP_FUNCTION, |
5338 NullCallWrapper(), | 5298 NullCallWrapper(), |
5339 CALL_AS_METHOD); | 5299 CALL_AS_METHOD); |
5340 __ bind(&call_as_function); | 5300 __ bind(&call_as_function); |
5341 } | 5301 } |
5342 __ InvokeFunction(r1, | 5302 __ InvokeFunction(r1, |
5343 actual, | 5303 actual, |
5344 JUMP_FUNCTION, | 5304 JUMP_FUNCTION, |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5385 | 5345 |
5386 void CallConstructStub::Generate(MacroAssembler* masm) { | 5346 void CallConstructStub::Generate(MacroAssembler* masm) { |
5387 // r0 : number of arguments | 5347 // r0 : number of arguments |
5388 // r1 : the function to call | 5348 // r1 : the function to call |
5389 // r2 : cache cell for call target | 5349 // r2 : cache cell for call target |
5390 Label slow, non_function_call; | 5350 Label slow, non_function_call; |
5391 | 5351 |
5392 // Check that the function is not a smi. | 5352 // Check that the function is not a smi. |
5393 __ JumpIfSmi(r1, &non_function_call); | 5353 __ JumpIfSmi(r1, &non_function_call); |
5394 // Check that the function is a JSFunction. | 5354 // Check that the function is a JSFunction. |
5395 __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE); | 5355 __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE, eq); |
5396 __ b(ne, &slow); | 5356 __ b(ne, &slow); |
5397 | 5357 |
5398 if (RecordCallTarget()) { | 5358 if (RecordCallTarget()) { |
5399 GenerateRecordCallTarget(masm); | 5359 GenerateRecordCallTarget(masm); |
5400 } | 5360 } |
5401 | 5361 |
5402 // Jump to the function-specific construct stub. | 5362 // Jump to the function-specific construct stub. |
5403 __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); | 5363 __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); |
5404 __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kConstructStubOffset)); | 5364 __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kConstructStubOffset)); |
5405 __ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag)); | 5365 __ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 5366 __ jmp(r2); |
5406 | 5367 |
5407 // r0: number of arguments | 5368 // r0: number of arguments |
5408 // r1: called object | 5369 // r1: called object |
5409 // r3: object type | 5370 // r3: object type |
5410 Label do_call; | 5371 Label do_call; |
5411 __ bind(&slow); | 5372 __ bind(&slow); |
5412 __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE)); | 5373 __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE)); |
5413 __ b(ne, &non_function_call); | 5374 __ b(ne, &non_function_call); |
5414 __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); | 5375 __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); |
5415 __ jmp(&do_call); | 5376 __ jmp(&do_call); |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5483 // If the receiver is not a string trigger the non-string case. | 5444 // If the receiver is not a string trigger the non-string case. |
5484 __ tst(result_, Operand(kIsNotStringMask)); | 5445 __ tst(result_, Operand(kIsNotStringMask)); |
5485 __ b(ne, receiver_not_string_); | 5446 __ b(ne, receiver_not_string_); |
5486 | 5447 |
5487 // If the index is non-smi trigger the non-smi case. | 5448 // If the index is non-smi trigger the non-smi case. |
5488 __ JumpIfNotSmi(index_, &index_not_smi_); | 5449 __ JumpIfNotSmi(index_, &index_not_smi_); |
5489 __ bind(&got_smi_index_); | 5450 __ bind(&got_smi_index_); |
5490 | 5451 |
5491 // Check for index out of range. | 5452 // Check for index out of range. |
5492 __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset)); | 5453 __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset)); |
5493 __ cmp(ip, Operand(index_)); | 5454 __ cmphi(ip, index_); |
5494 __ b(ls, index_out_of_range_); | 5455 __ bf(index_out_of_range_); |
5495 | 5456 |
5496 __ mov(index_, Operand(index_, ASR, kSmiTagSize)); | 5457 __ asr(index_, index_, Operand(kSmiTagSize)); |
5497 | 5458 |
5498 StringCharLoadGenerator::Generate(masm, | 5459 StringCharLoadGenerator::Generate(masm, |
5499 object_, | 5460 object_, |
5500 index_, | 5461 index_, |
5501 result_, | 5462 result_, |
5502 &call_runtime_); | 5463 &call_runtime_); |
5503 | 5464 |
5504 __ mov(result_, Operand(result_, LSL, kSmiTagSize)); | 5465 __ lsl(result_, result_, Operand(kSmiTagSize)); |
5505 __ bind(&exit_); | 5466 __ bind(&exit_); |
5506 } | 5467 } |
5507 | 5468 |
5508 | 5469 |
5509 void StringCharCodeAtGenerator::GenerateSlow( | 5470 void StringCharCodeAtGenerator::GenerateSlow( |
5510 MacroAssembler* masm, | 5471 MacroAssembler* masm, |
5511 const RuntimeCallHelper& call_helper) { | 5472 const RuntimeCallHelper& call_helper) { |
5512 __ Abort("Unexpected fallthrough to CharCodeAt slow case"); | 5473 __ Abort("Unexpected fallthrough to CharCodeAt slow case"); |
5513 | 5474 |
5514 // Index is not a smi. | 5475 // Index is not a smi. |
5515 __ bind(&index_not_smi_); | 5476 __ bind(&index_not_smi_); |
5516 // If index is a heap number, try converting it to an integer. | 5477 // If index is a heap number, try converting it to an integer. |
5517 __ CheckMap(index_, | 5478 __ CheckMap(index_, |
5518 result_, | 5479 result_, |
5519 Heap::kHeapNumberMapRootIndex, | 5480 Heap::kHeapNumberMapRootIndex, |
5520 index_not_number_, | 5481 index_not_number_, |
5521 DONT_DO_SMI_CHECK); | 5482 DONT_DO_SMI_CHECK); |
5522 call_helper.BeforeCall(masm); | 5483 call_helper.BeforeCall(masm); |
5523 __ push(object_); | 5484 __ push(object_); |
5524 __ push(index_); // Consumed by runtime conversion function. | 5485 __ push(index_); // Consumed by runtime conversion function. |
5525 if (index_flags_ == STRING_INDEX_IS_NUMBER) { | 5486 if (index_flags_ == STRING_INDEX_IS_NUMBER) { |
5526 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); | 5487 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); |
5527 } else { | 5488 } else { |
5528 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); | 5489 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); |
5529 // NumberToSmi discards numbers that are not exact integers. | 5490 // NumberToSmi discards numbers that are not exact integers. |
5530 __ CallRuntime(Runtime::kNumberToSmi, 1); | 5491 __ CallRuntime(Runtime::kNumberToSmi, 1); |
5531 } | 5492 } |
5532 // Save the conversion result before the pop instructions below | 5493 // Save the conversion result before the pop instructions below |
5533 // have a chance to overwrite it. | 5494 // have a chance to overwrite it. |
5534 __ Move(index_, r0); | 5495 __ mov(index_, r0); |
| 5496 |
5535 __ pop(object_); | 5497 __ pop(object_); |
5536 // Reload the instance type. | 5498 // Reload the instance type. |
5537 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | 5499 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); |
5538 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | 5500 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); |
5539 call_helper.AfterCall(masm); | 5501 call_helper.AfterCall(masm); |
5540 // If index is still not a smi, it must be out of range. | 5502 // If index is still not a smi, it must be out of range. |
5541 __ JumpIfNotSmi(index_, index_out_of_range_); | 5503 __ JumpIfNotSmi(index_, index_out_of_range_); |
5542 // Otherwise, return to the fast path. | 5504 // Otherwise, return to the fast path. |
5543 __ jmp(&got_smi_index_); | 5505 __ jmp(&got_smi_index_); |
5544 | 5506 |
5545 // Call runtime. We get here when the receiver is a string and the | 5507 // Call runtime. We get here when the receiver is a string and the |
5546 // index is a number, but the code of getting the actual character | 5508 // index is a number, but the code of getting the actual character |
5547 // is too complex (e.g., when the string needs to be flattened). | 5509 // is too complex (e.g., when the string needs to be flattened). |
5548 __ bind(&call_runtime_); | 5510 __ bind(&call_runtime_); |
5549 call_helper.BeforeCall(masm); | 5511 call_helper.BeforeCall(masm); |
5550 __ mov(index_, Operand(index_, LSL, kSmiTagSize)); | 5512 __ lsl(index_, index_, Operand(kSmiTagSize)); |
5551 __ Push(object_, index_); | 5513 __ Push(object_, index_); |
5552 __ CallRuntime(Runtime::kStringCharCodeAt, 2); | 5514 __ CallRuntime(Runtime::kStringCharCodeAt, 2); |
5553 __ Move(result_, r0); | 5515 __ mov(result_, r0); |
5554 call_helper.AfterCall(masm); | 5516 call_helper.AfterCall(masm); |
5555 __ jmp(&exit_); | 5517 __ jmp(&exit_); |
5556 | 5518 |
5557 __ Abort("Unexpected fallthrough from CharCodeAt slow case"); | 5519 __ Abort("Unexpected fallthrough from CharCodeAt slow case"); |
5558 } | 5520 } |
5559 | 5521 |
5560 | 5522 |
5561 // ------------------------------------------------------------------------- | 5523 // ------------------------------------------------------------------------- |
5562 // StringCharFromCodeGenerator | 5524 // StringCharFromCodeGenerator |
5563 | 5525 |
5564 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { | 5526 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { |
5565 // Fast case of Heap::LookupSingleCharacterStringFromCode. | 5527 // Fast case of Heap::LookupSingleCharacterStringFromCode. |
5566 STATIC_ASSERT(kSmiTag == 0); | 5528 STATIC_ASSERT(kSmiTag == 0); |
5567 STATIC_ASSERT(kSmiShiftSize == 0); | 5529 STATIC_ASSERT(kSmiShiftSize == 0); |
5568 ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1)); | 5530 ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1)); |
| 5531 |
| 5532 ASSERT(!code_.is(ip) && !result_.is(ip)); |
| 5533 |
5569 __ tst(code_, | 5534 __ tst(code_, |
5570 Operand(kSmiTagMask | | 5535 Operand(kSmiTagMask | |
5571 ((~String::kMaxAsciiCharCode) << kSmiTagSize))); | 5536 ((~String::kMaxAsciiCharCode) << kSmiTagSize))); |
5572 __ b(ne, &slow_case_); | 5537 __ b(ne, &slow_case_); |
5573 | 5538 |
5574 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); | 5539 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); |
5575 // At this point code register contains smi tagged ASCII char code. | 5540 // At this point code register contains smi tagged ASCII char code. |
5576 STATIC_ASSERT(kSmiTag == 0); | 5541 STATIC_ASSERT(kSmiTag == 0); |
5577 __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize)); | 5542 __ lsl(ip, code_, Operand(kPointerSizeLog2 - kSmiTagSize)); |
| 5543 __ add(result_, result_, ip); |
5578 __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); | 5544 __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); |
5579 __ CompareRoot(result_, Heap::kUndefinedValueRootIndex); | 5545 __ CompareRoot(result_, Heap::kUndefinedValueRootIndex); |
5580 __ b(eq, &slow_case_); | 5546 __ b(eq, &slow_case_); |
5581 __ bind(&exit_); | 5547 __ bind(&exit_); |
5582 } | 5548 } |
5583 | 5549 |
5584 | 5550 |
5585 void StringCharFromCodeGenerator::GenerateSlow( | 5551 void StringCharFromCodeGenerator::GenerateSlow( |
5586 MacroAssembler* masm, | 5552 MacroAssembler* masm, |
5587 const RuntimeCallHelper& call_helper) { | 5553 const RuntimeCallHelper& call_helper) { |
5588 __ Abort("Unexpected fallthrough to CharFromCode slow case"); | 5554 __ Abort("Unexpected fallthrough to CharFromCode slow case"); |
5589 | 5555 |
5590 __ bind(&slow_case_); | 5556 __ bind(&slow_case_); |
5591 call_helper.BeforeCall(masm); | 5557 call_helper.BeforeCall(masm); |
5592 __ push(code_); | 5558 __ push(code_); |
5593 __ CallRuntime(Runtime::kCharFromCode, 1); | 5559 __ CallRuntime(Runtime::kCharFromCode, 1); |
5594 __ Move(result_, r0); | 5560 __ mov(result_, r0); |
5595 call_helper.AfterCall(masm); | 5561 call_helper.AfterCall(masm); |
5596 __ jmp(&exit_); | 5562 __ jmp(&exit_); |
5597 | 5563 |
5598 __ Abort("Unexpected fallthrough from CharFromCode slow case"); | 5564 __ Abort("Unexpected fallthrough from CharFromCode slow case"); |
5599 } | 5565 } |
5600 | 5566 |
5601 | 5567 |
5602 // ------------------------------------------------------------------------- | 5568 // ------------------------------------------------------------------------- |
5603 // StringCharAtGenerator | 5569 // StringCharAtGenerator |
5604 | 5570 |
(...skipping 15 matching lines...) Expand all Loading... |
5620 Register dest, | 5586 Register dest, |
5621 Register src, | 5587 Register src, |
5622 Register count, | 5588 Register count, |
5623 Register scratch, | 5589 Register scratch, |
5624 bool ascii) { | 5590 bool ascii) { |
5625 Label loop; | 5591 Label loop; |
5626 Label done; | 5592 Label done; |
5627 // This loop just copies one character at a time, as it is only used for very | 5593 // This loop just copies one character at a time, as it is only used for very |
5628 // short strings. | 5594 // short strings. |
5629 if (!ascii) { | 5595 if (!ascii) { |
5630 __ add(count, count, Operand(count), SetCC); | 5596 __ add(count, count, count); |
5631 } else { | |
5632 __ cmp(count, Operand(0, RelocInfo::NONE)); | |
5633 } | 5597 } |
5634 __ b(eq, &done); | 5598 __ cmp(count, Operand(0)); |
| 5599 __ b(eq, &done, Label::kNear); |
5635 | 5600 |
5636 __ bind(&loop); | 5601 __ bind(&loop); |
5637 __ ldrb(scratch, MemOperand(src, 1, PostIndex)); | 5602 __ ldrb(scratch, MemOperand(src)); |
| 5603 __ add(src, src, Operand(1)); |
5638 // Perform sub between load and dependent store to get the load time to | 5604 // Perform sub between load and dependent store to get the load time to |
5639 // complete. | 5605 // complete. |
5640 __ sub(count, count, Operand(1), SetCC); | 5606 __ cmpgt(count, Operand(1)); |
5641 __ strb(scratch, MemOperand(dest, 1, PostIndex)); | 5607 __ sub(count, count, Operand(1)); |
| 5608 __ strb(scratch, MemOperand(dest)); |
| 5609 __ add(dest, dest, Operand(1)); |
5642 // last iteration. | 5610 // last iteration. |
5643 __ b(gt, &loop); | 5611 __ bt(&loop); |
5644 | 5612 |
5645 __ bind(&done); | 5613 __ bind(&done); |
5646 } | 5614 } |
5647 | 5615 |
5648 | 5616 |
5649 enum CopyCharactersFlags { | 5617 enum CopyCharactersFlags { |
5650 COPY_ASCII = 1, | 5618 COPY_ASCII = 1, |
5651 DEST_ALWAYS_ALIGNED = 2 | 5619 DEST_ALWAYS_ALIGNED = 2 |
5652 }; | 5620 }; |
5653 | 5621 |
(...skipping 12 matching lines...) Expand all Loading... |
5666 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0; | 5634 bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0; |
5667 | 5635 |
5668 if (dest_always_aligned && FLAG_debug_code) { | 5636 if (dest_always_aligned && FLAG_debug_code) { |
5669 // Check that destination is actually word aligned if the flag says | 5637 // Check that destination is actually word aligned if the flag says |
5670 // that it is. | 5638 // that it is. |
5671 __ tst(dest, Operand(kPointerAlignmentMask)); | 5639 __ tst(dest, Operand(kPointerAlignmentMask)); |
5672 __ Check(eq, "Destination of copy not aligned."); | 5640 __ Check(eq, "Destination of copy not aligned."); |
5673 } | 5641 } |
5674 | 5642 |
5675 const int kReadAlignment = 4; | 5643 const int kReadAlignment = 4; |
5676 const int kReadAlignmentMask = kReadAlignment - 1; | |
5677 // Ensure that reading an entire aligned word containing the last character | 5644 // Ensure that reading an entire aligned word containing the last character |
5678 // of a string will not read outside the allocated area (because we pad up | 5645 // of a string will not read outside the allocated area (because we pad up |
5679 // to kObjectAlignment). | 5646 // to kObjectAlignment). |
5680 STATIC_ASSERT(kObjectAlignment >= kReadAlignment); | 5647 STATIC_ASSERT(kObjectAlignment >= kReadAlignment); |
5681 // Assumes word reads and writes are little endian. | 5648 // Assumes word reads and writes are little endian. |
5682 // Nothing to do for zero characters. | 5649 // Nothing to do for zero characters. |
5683 Label done; | 5650 Label done; |
5684 if (!ascii) { | 5651 if (!ascii) { |
5685 __ add(count, count, Operand(count), SetCC); | 5652 __ add(count, count, count); |
5686 } else { | |
5687 __ cmp(count, Operand(0, RelocInfo::NONE)); | |
5688 } | 5653 } |
5689 __ b(eq, &done); | 5654 __ cmpeq(count, Operand(0)); |
| 5655 __ bt_near(&done); |
5690 | 5656 |
5691 // Assume that you cannot read (or write) unaligned. | 5657 // Use an optimized version for sh4 |
5692 Label byte_loop; | 5658 __ memcpy(dest, src, count, scratch1, scratch2, scratch3, scratch4); |
5693 // Must copy at least eight bytes, otherwise just do it one byte at a time. | |
5694 __ cmp(count, Operand(8)); | |
5695 __ add(count, dest, Operand(count)); | |
5696 Register limit = count; // Read until src equals this. | |
5697 __ b(lt, &byte_loop); | |
5698 | |
5699 if (!dest_always_aligned) { | |
5700 // Align dest by byte copying. Copies between zero and three bytes. | |
5701 __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC); | |
5702 Label dest_aligned; | |
5703 __ b(eq, &dest_aligned); | |
5704 __ cmp(scratch4, Operand(2)); | |
5705 __ ldrb(scratch1, MemOperand(src, 1, PostIndex)); | |
5706 __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le); | |
5707 __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt); | |
5708 __ strb(scratch1, MemOperand(dest, 1, PostIndex)); | |
5709 __ strb(scratch2, MemOperand(dest, 1, PostIndex), le); | |
5710 __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt); | |
5711 __ bind(&dest_aligned); | |
5712 } | |
5713 | |
5714 Label simple_loop; | |
5715 | |
5716 __ sub(scratch4, dest, Operand(src)); | |
5717 __ and_(scratch4, scratch4, Operand(0x03), SetCC); | |
5718 __ b(eq, &simple_loop); | |
5719 // Shift register is number of bits in a source word that | |
5720 // must be combined with bits in the next source word in order | |
5721 // to create a destination word. | |
5722 | |
5723 // Complex loop for src/dst that are not aligned the same way. | |
5724 { | |
5725 Label loop; | |
5726 __ mov(scratch4, Operand(scratch4, LSL, 3)); | |
5727 Register left_shift = scratch4; | |
5728 __ and_(src, src, Operand(~3)); // Round down to load previous word. | |
5729 __ ldr(scratch1, MemOperand(src, 4, PostIndex)); | |
5730 // Store the "shift" most significant bits of scratch in the least | |
5731 // signficant bits (i.e., shift down by (32-shift)). | |
5732 __ rsb(scratch2, left_shift, Operand(32)); | |
5733 Register right_shift = scratch2; | |
5734 __ mov(scratch1, Operand(scratch1, LSR, right_shift)); | |
5735 | |
5736 __ bind(&loop); | |
5737 __ ldr(scratch3, MemOperand(src, 4, PostIndex)); | |
5738 __ sub(scratch5, limit, Operand(dest)); | |
5739 __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift)); | |
5740 __ str(scratch1, MemOperand(dest, 4, PostIndex)); | |
5741 __ mov(scratch1, Operand(scratch3, LSR, right_shift)); | |
5742 // Loop if four or more bytes left to copy. | |
5743 // Compare to eight, because we did the subtract before increasing dst. | |
5744 __ sub(scratch5, scratch5, Operand(8), SetCC); | |
5745 __ b(ge, &loop); | |
5746 } | |
5747 // There is now between zero and three bytes left to copy (negative that | |
5748 // number is in scratch5), and between one and three bytes already read into | |
5749 // scratch1 (eight times that number in scratch4). We may have read past | |
5750 // the end of the string, but because objects are aligned, we have not read | |
5751 // past the end of the object. | |
5752 // Find the minimum of remaining characters to move and preloaded characters | |
5753 // and write those as bytes. | |
5754 __ add(scratch5, scratch5, Operand(4), SetCC); | |
5755 __ b(eq, &done); | |
5756 __ cmp(scratch4, Operand(scratch5, LSL, 3), ne); | |
5757 // Move minimum of bytes read and bytes left to copy to scratch4. | |
5758 __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt); | |
5759 // Between one and three (value in scratch5) characters already read into | |
5760 // scratch ready to write. | |
5761 __ cmp(scratch5, Operand(2)); | |
5762 __ strb(scratch1, MemOperand(dest, 1, PostIndex)); | |
5763 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge); | |
5764 __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge); | |
5765 __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt); | |
5766 __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt); | |
5767 // Copy any remaining bytes. | |
5768 __ b(&byte_loop); | |
5769 | |
5770 // Simple loop. | |
5771 // Copy words from src to dst, until less than four bytes left. | |
5772 // Both src and dest are word aligned. | |
5773 __ bind(&simple_loop); | |
5774 { | |
5775 Label loop; | |
5776 __ bind(&loop); | |
5777 __ ldr(scratch1, MemOperand(src, 4, PostIndex)); | |
5778 __ sub(scratch3, limit, Operand(dest)); | |
5779 __ str(scratch1, MemOperand(dest, 4, PostIndex)); | |
5780 // Compare to 8, not 4, because we do the substraction before increasing | |
5781 // dest. | |
5782 __ cmp(scratch3, Operand(8)); | |
5783 __ b(ge, &loop); | |
5784 } | |
5785 | |
5786 // Copy bytes from src to dst until dst hits limit. | |
5787 __ bind(&byte_loop); | |
5788 __ cmp(dest, Operand(limit)); | |
5789 __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt); | |
5790 __ b(ge, &done); | |
5791 __ strb(scratch1, MemOperand(dest, 1, PostIndex)); | |
5792 __ b(&byte_loop); | |
5793 | |
5794 __ bind(&done); | 5659 __ bind(&done); |
5795 } | 5660 } |
5796 | 5661 |
5797 | 5662 |
5798 void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, | 5663 void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, |
5799 Register c1, | 5664 Register c1, |
5800 Register c2, | 5665 Register c2, |
5801 Register scratch1, | 5666 Register scratch1, |
5802 Register scratch2, | 5667 Register scratch2, |
5803 Register scratch3, | 5668 Register scratch3, |
5804 Register scratch4, | 5669 Register scratch4, |
5805 Register scratch5, | 5670 Register scratch5, |
5806 Label* not_found) { | 5671 Label* not_found) { |
5807 // Register scratch3 is the general scratch register in this function. | 5672 // Register scratch3 is the general scratch register in this function. |
5808 Register scratch = scratch3; | 5673 Register scratch = scratch3; |
5809 | 5674 |
5810 // Make sure that both characters are not digits as such strings has a | 5675 // Make sure that both characters are not digits as such strings has a |
5811 // different hash algorithm. Don't try to look for these in the symbol table. | 5676 // different hash algorithm. Don't try to look for these in the symbol table. |
5812 Label not_array_index; | 5677 Label not_array_index; |
5813 __ sub(scratch, c1, Operand(static_cast<int>('0'))); | 5678 __ sub(scratch, c1, Operand(static_cast<int>('0'))); |
5814 __ cmp(scratch, Operand(static_cast<int>('9' - '0'))); | 5679 __ cmphi(scratch, Operand(static_cast<int>('9' - '0'))); |
5815 __ b(hi, ¬_array_index); | 5680 __ bt_near(¬_array_index); |
5816 __ sub(scratch, c2, Operand(static_cast<int>('0'))); | 5681 __ sub(scratch, c2, Operand(static_cast<int>('0'))); |
5817 __ cmp(scratch, Operand(static_cast<int>('9' - '0'))); | 5682 __ cmphi(scratch, Operand(static_cast<int>('9' - '0'))); |
5818 | 5683 |
5819 // If check failed combine both characters into single halfword. | 5684 // If check failed combine both characters into single halfword. |
5820 // This is required by the contract of the method: code at the | 5685 // This is required by the contract of the method: code at the |
5821 // not_found branch expects this combination in c1 register | 5686 // not_found branch expects this combination in c1 register |
5822 __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls); | 5687 __ lsl(scratch1, c2, Operand(kBitsPerByte)); |
5823 __ b(ls, not_found); | 5688 __ lor(c1, c1, scratch1, f); |
| 5689 __ b(f, not_found); |
5824 | 5690 |
5825 __ bind(¬_array_index); | 5691 __ bind(¬_array_index); |
5826 // Calculate the two character string hash. | 5692 // Calculate the two character string hash. |
5827 Register hash = scratch1; | 5693 Register hash = scratch1; |
5828 StringHelper::GenerateHashInit(masm, hash, c1); | 5694 StringHelper::GenerateHashInit(masm, hash, c1, scratch); |
5829 StringHelper::GenerateHashAddCharacter(masm, hash, c2); | 5695 StringHelper::GenerateHashAddCharacter(masm, hash, c2, scratch); |
5830 StringHelper::GenerateHashGetHash(masm, hash); | 5696 StringHelper::GenerateHashGetHash(masm, hash, scratch); |
5831 | 5697 |
5832 // Collect the two characters in a register. | 5698 // Collect the two characters in a register. |
5833 Register chars = c1; | 5699 Register chars = c1; |
5834 __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte)); | 5700 __ lsl(scratch, c2, Operand(kBitsPerByte)); |
| 5701 __ lor(chars, chars, scratch); |
5835 | 5702 |
5836 // chars: two character string, char 1 in byte 0 and char 2 in byte 1. | 5703 // chars: two character string, char 1 in byte 0 and char 2 in byte 1. |
5837 // hash: hash of two character string. | 5704 // hash: hash of two character string. |
5838 | 5705 |
5839 // Load symbol table | 5706 // Load symbol table |
5840 // Load address of first element of the symbol table. | 5707 // Load address of first element of the symbol table. |
5841 Register symbol_table = c2; | 5708 Register symbol_table = c2; |
5842 __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex); | 5709 __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex); |
5843 | 5710 |
5844 Register undefined = scratch4; | 5711 Register undefined = scratch4; |
5845 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); | 5712 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); |
5846 | 5713 |
5847 // Calculate capacity mask from the symbol table capacity. | 5714 // Calculate capacity mask from the symbol table capacity. |
5848 Register mask = scratch2; | 5715 Register mask = scratch2; |
5849 __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset)); | 5716 __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset)); |
5850 __ mov(mask, Operand(mask, ASR, 1)); | 5717 __ asr(mask, mask, Operand(1)); |
5851 __ sub(mask, mask, Operand(1)); | 5718 __ sub(mask, mask, Operand(1)); |
5852 | 5719 |
5853 // Calculate untagged address of the first element of the symbol table. | 5720 // Calculate untagged address of the first element of the symbol table. |
5854 Register first_symbol_table_element = symbol_table; | 5721 Register first_symbol_table_element = symbol_table; |
5855 __ add(first_symbol_table_element, symbol_table, | 5722 __ add(first_symbol_table_element, symbol_table, |
5856 Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag)); | 5723 Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag)); |
5857 | 5724 |
5858 // Registers | 5725 // Registers |
5859 // chars: two character string, char 1 in byte 0 and char 2 in byte 1. | 5726 // chars: two character string, char 1 in byte 0 and char 2 in byte 1. |
5860 // hash: hash of two character string | 5727 // hash: hash of two character string |
5861 // mask: capacity mask | 5728 // mask: capacity mask |
5862 // first_symbol_table_element: address of the first element of | 5729 // first_symbol_table_element: address of the first element of |
5863 // the symbol table | 5730 // the symbol table |
5864 // undefined: the undefined object | 5731 // undefined: the undefined object |
5865 // scratch: - | 5732 // scratch: - |
5866 | 5733 |
5867 // Perform a number of probes in the symbol table. | 5734 // Perform a number of probes in the symbol table. |
5868 const int kProbes = 4; | 5735 const int kProbes = 4; |
5869 Label found_in_symbol_table; | 5736 Label found_in_symbol_table; |
5870 Label next_probe[kProbes]; | 5737 Label next_probe[kProbes]; |
5871 Register candidate = scratch5; // Scratch register contains candidate. | 5738 Register candidate = scratch5; // Scratch register contains candidate. |
5872 for (int i = 0; i < kProbes; i++) { | 5739 for (int i = 0; i < kProbes; i++) { |
5873 // Calculate entry in symbol table. | 5740 // Calculate entry in symbol table. |
5874 if (i > 0) { | 5741 if (i > 0) { |
5875 __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i))); | 5742 __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i))); |
5876 } else { | 5743 } else { |
5877 __ mov(candidate, hash); | 5744 __ mov(candidate, hash); |
5878 } | 5745 } |
5879 | 5746 |
5880 __ and_(candidate, candidate, Operand(mask)); | 5747 __ land(candidate, candidate, mask); |
5881 | 5748 |
5882 // Load the entry from the symble table. | 5749 // Load the entry from the symble table. |
5883 STATIC_ASSERT(SymbolTable::kEntrySize == 1); | 5750 STATIC_ASSERT(SymbolTable::kEntrySize == 1); |
5884 __ ldr(candidate, | 5751 __ lsl(scratch, candidate, Operand(kPointerSizeLog2)); |
5885 MemOperand(first_symbol_table_element, | 5752 __ add(scratch, first_symbol_table_element, scratch); |
5886 candidate, | 5753 __ ldr(candidate, MemOperand(scratch)); |
5887 LSL, | |
5888 kPointerSizeLog2)); | |
5889 | 5754 |
5890 // If entry is undefined no string with this hash can be found. | 5755 // If entry is undefined no string with this hash can be found. |
5891 Label is_string; | 5756 Label is_string; |
5892 __ CompareObjectType(candidate, scratch, scratch, ODDBALL_TYPE); | 5757 __ CompareObjectType(candidate, scratch, scratch, ODDBALL_TYPE, eq); |
5893 __ b(ne, &is_string); | 5758 __ b(ne, &is_string, Label::kNear); |
5894 | 5759 |
5895 __ cmp(undefined, candidate); | 5760 __ cmp(undefined, candidate); |
5896 __ b(eq, not_found); | 5761 __ b(eq, not_found); |
5897 // Must be the hole (deleted entry). | 5762 // Must be the hole (deleted entry). |
5898 if (FLAG_debug_code) { | 5763 if (FLAG_debug_code) { |
5899 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 5764 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
5900 __ cmp(ip, candidate); | 5765 __ cmp(ip, candidate); |
5901 __ Assert(eq, "oddball in symbol table is not undefined or the hole"); | 5766 __ Assert(eq, "oddball in symbol table is not undefined or the hole"); |
5902 } | 5767 } |
5903 __ jmp(&next_probe[i]); | 5768 __ jmp(&next_probe[i]); |
(...skipping 23 matching lines...) Expand all Loading... |
5927 | 5792 |
5928 // Scratch register contains result when we fall through to here. | 5793 // Scratch register contains result when we fall through to here. |
5929 Register result = candidate; | 5794 Register result = candidate; |
5930 __ bind(&found_in_symbol_table); | 5795 __ bind(&found_in_symbol_table); |
5931 __ Move(r0, result); | 5796 __ Move(r0, result); |
5932 } | 5797 } |
5933 | 5798 |
5934 | 5799 |
5935 void StringHelper::GenerateHashInit(MacroAssembler* masm, | 5800 void StringHelper::GenerateHashInit(MacroAssembler* masm, |
5936 Register hash, | 5801 Register hash, |
5937 Register character) { | 5802 Register character, |
| 5803 Register scratch) { |
| 5804 // Added a scratch parameter for the SH4 implementation compared to ARM. |
5938 // hash = character + (character << 10); | 5805 // hash = character + (character << 10); |
5939 __ LoadRoot(hash, Heap::kHashSeedRootIndex); | 5806 __ LoadRoot(hash, Heap::kHashSeedRootIndex); |
5940 // Untag smi seed and add the character. | 5807 // Untag smi seed and add the character. |
5941 __ add(hash, character, Operand(hash, LSR, kSmiTagSize)); | 5808 __ lsr(hash, hash, Operand(kSmiTagSize)); |
| 5809 __ add(hash, character, hash); |
5942 // hash += hash << 10; | 5810 // hash += hash << 10; |
5943 __ add(hash, hash, Operand(hash, LSL, 10)); | 5811 __ lsl(scratch, hash, Operand(10)); |
| 5812 __ add(hash, hash, scratch); |
5944 // hash ^= hash >> 6; | 5813 // hash ^= hash >> 6; |
5945 __ eor(hash, hash, Operand(hash, LSR, 6)); | 5814 __ lsr(scratch, hash, Operand(6)); |
| 5815 __ eor(hash, hash, scratch); |
5946 } | 5816 } |
5947 | 5817 |
5948 | 5818 |
5949 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, | 5819 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, |
5950 Register hash, | 5820 Register hash, |
5951 Register character) { | 5821 Register character, |
| 5822 Register scratch) { |
| 5823 // Added a scratch parameter for the SH4 implementation compared to ARM. |
5952 // hash += character; | 5824 // hash += character; |
5953 __ add(hash, hash, Operand(character)); | 5825 __ add(hash, hash, character); |
5954 // hash += hash << 10; | 5826 // hash += hash << 10; |
5955 __ add(hash, hash, Operand(hash, LSL, 10)); | 5827 __ lsl(scratch, hash, Operand(10)); |
| 5828 __ add(hash, hash, scratch); |
5956 // hash ^= hash >> 6; | 5829 // hash ^= hash >> 6; |
5957 __ eor(hash, hash, Operand(hash, LSR, 6)); | 5830 __ lsr(scratch, hash, Operand(6)); |
| 5831 __ eor(hash, hash, scratch); |
5958 } | 5832 } |
5959 | 5833 |
5960 | 5834 |
5961 void StringHelper::GenerateHashGetHash(MacroAssembler* masm, | 5835 void StringHelper::GenerateHashGetHash(MacroAssembler* masm, |
5962 Register hash) { | 5836 Register hash, |
| 5837 Register scratch) { |
| 5838 // Added a scratch parameter for the SH4 implementation compared to ARM. |
5963 // hash += hash << 3; | 5839 // hash += hash << 3; |
5964 __ add(hash, hash, Operand(hash, LSL, 3)); | 5840 __ lsl(scratch, hash, Operand(3)); |
| 5841 __ add(hash, hash, scratch); |
5965 // hash ^= hash >> 11; | 5842 // hash ^= hash >> 11; |
5966 __ eor(hash, hash, Operand(hash, LSR, 11)); | 5843 __ lsr(scratch, hash, Operand(11)); |
| 5844 __ eor(hash, hash, scratch); |
5967 // hash += hash << 15; | 5845 // hash += hash << 15; |
5968 __ add(hash, hash, Operand(hash, LSL, 15)); | 5846 __ lsl(scratch, hash, Operand(15)); |
5969 | 5847 __ add(hash, hash, scratch); |
5970 __ and_(hash, hash, Operand(String::kHashBitMask), SetCC); | 5848 __ land(hash, hash, Operand(String::kHashBitMask)); |
| 5849 __ cmpeq(hash, Operand(0)); |
5971 | 5850 |
5972 // if (hash == 0) hash = 27; | 5851 // if (hash == 0) hash = 27; |
5973 __ mov(hash, Operand(StringHasher::kZeroHash), LeaveCC, eq); | 5852 __ mov(hash, Operand(StringHasher::kZeroHash), eq); |
5974 } | 5853 } |
5975 | 5854 |
5976 | 5855 |
5977 void SubStringStub::Generate(MacroAssembler* masm) { | 5856 void SubStringStub::Generate(MacroAssembler* masm) { |
5978 Label runtime; | 5857 Label runtime; |
5979 | 5858 |
5980 // Stack frame on entry. | 5859 // Stack frame on entry. |
5981 // lr: return address | 5860 // lr: return address |
5982 // sp[0]: to | 5861 // sp[0]: to |
5983 // sp[4]: from | 5862 // sp[4]: from |
5984 // sp[8]: string | 5863 // sp[8]: string |
5985 | 5864 |
5986 // This stub is called from the native-call %_SubString(...), so | 5865 // This stub is called from the native-call %_SubString(...), so |
5987 // nothing can be assumed about the arguments. It is tested that: | 5866 // nothing can be assumed about the arguments. It is tested that: |
5988 // "string" is a sequential string, | 5867 // "string" is a sequential string, |
5989 // both "from" and "to" are smis, and | 5868 // both "from" and "to" are smis, and |
5990 // 0 <= from <= to <= string.length. | 5869 // 0 <= from <= to <= string.length. |
5991 // If any of these assumptions fail, we call the runtime system. | 5870 // If any of these assumptions fail, we call the runtime system. |
5992 | 5871 |
5993 const int kToOffset = 0 * kPointerSize; | 5872 const int kToOffset = 0 * kPointerSize; |
5994 const int kFromOffset = 1 * kPointerSize; | 5873 const int kFromOffset = 1 * kPointerSize; |
5995 const int kStringOffset = 2 * kPointerSize; | 5874 const int kStringOffset = 2 * kPointerSize; |
5996 | 5875 |
5997 __ Ldrd(r2, r3, MemOperand(sp, kToOffset)); | 5876 __ Ldrd(r2, r3, MemOperand(sp, kToOffset)); |
5998 STATIC_ASSERT(kFromOffset == kToOffset + 4); | 5877 STATIC_ASSERT(kFromOffset == kToOffset + 4); |
5999 STATIC_ASSERT(kSmiTag == 0); | 5878 STATIC_ASSERT(kSmiTag == 0); |
6000 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); | 5879 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); |
| 5880 __ JumpIfNotBothSmi(r2, r3, &runtime); // not in arm code |
| 5881 // I.e., arithmetic shift right by one un-smi-tags. |
| 5882 __ asr(r2, r2, Operand(1)); |
| 5883 __ asr(r3, r3, Operand(1)); |
| 5884 __ cmpge(r3, Operand(0)); |
| 5885 __ bf(&runtime); // From is negative. |
6001 | 5886 |
6002 // I.e., arithmetic shift right by one un-smi-tags. | 5887 // Both to and from are smis. |
6003 __ mov(r2, Operand(r2, ASR, 1), SetCC); | 5888 __ sub(r2, r2, r3); |
6004 __ mov(r3, Operand(r3, ASR, 1), SetCC, cc); | 5889 __ cmpge(r2, Operand(0)); |
6005 // If either to or from had the smi tag bit set, then carry is set now. | 5890 __ bf(&runtime); // Fail if from > to. |
6006 __ b(cs, &runtime); // Either "from" or "to" is not a smi. | |
6007 // We want to bailout to runtime here if From is negative. In that case, the | |
6008 // next instruction is not executed and we fall through to bailing out to | |
6009 // runtime. pl is the opposite of mi. | |
6010 // Both r2 and r3 are untagged integers. | |
6011 __ sub(r2, r2, Operand(r3), SetCC, pl); | |
6012 __ b(mi, &runtime); // Fail if from > to. | |
6013 | 5891 |
6014 // Make sure first argument is a string. | 5892 // Make sure first argument is a string. |
6015 __ ldr(r0, MemOperand(sp, kStringOffset)); | 5893 __ ldr(r0, MemOperand(sp, kStringOffset)); |
6016 STATIC_ASSERT(kSmiTag == 0); | 5894 STATIC_ASSERT(kSmiTag == 0); |
6017 __ JumpIfSmi(r0, &runtime); | 5895 __ JumpIfSmi(r0, &runtime); |
6018 Condition is_string = masm->IsObjectStringType(r0, r1); | 5896 Condition is_string = masm->IsObjectStringType(r0, r1); |
6019 __ b(NegateCondition(is_string), &runtime); | 5897 __ b(NegateCondition(is_string), &runtime); |
6020 | 5898 |
6021 // Short-cut for the case of trivial substring. | 5899 // Short-cut for the case of trivial substring. |
6022 Label return_r0; | 5900 Label return_r0; |
6023 // r0: original string | 5901 // r0: original string |
6024 // r2: result string length | 5902 // r2: result string length |
6025 __ ldr(r4, FieldMemOperand(r0, String::kLengthOffset)); | 5903 __ ldr(r4, FieldMemOperand(r0, String::kLengthOffset)); |
6026 __ cmp(r2, Operand(r4, ASR, 1)); | 5904 __ asr(r4, r4, Operand(1)); |
6027 // Return original string. | 5905 __ cmpeq(r2, r4); |
6028 __ b(eq, &return_r0); | 5906 __ bt(&return_r0); |
6029 // Longer than original string's length or negative: unsafe arguments. | 5907 // Longer than original string's length or negative: unsafe arguments. |
6030 __ b(hi, &runtime); | 5908 __ cmphi(r2, r4); |
| 5909 __ bt(&runtime); |
6031 // Shorter than original string's length: an actual substring. | 5910 // Shorter than original string's length: an actual substring. |
6032 | 5911 |
6033 // Deal with different string types: update the index if necessary | 5912 // Deal with different string types: update the index if necessary |
6034 // and put the underlying string into r5. | 5913 // and put the underlying string into r5. |
6035 // r0: original string | 5914 // r0: original string |
6036 // r1: instance type | 5915 // r1: instance type |
6037 // r2: length | 5916 // r2: length |
6038 // r3: from index (untagged) | 5917 // r3: from index (untagged) |
6039 Label underlying_unpacked, sliced_string, seq_or_external_string; | 5918 Label underlying_unpacked, sliced_string, seq_or_external_string; |
6040 // If the string is not indirect, it can only be sequential or external. | 5919 // If the string is not indirect, it can only be sequential or external. |
(...skipping 11 matching lines...) Expand all Loading... |
6052 __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset)); | 5931 __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset)); |
6053 // Update instance type. | 5932 // Update instance type. |
6054 __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset)); | 5933 __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset)); |
6055 __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset)); | 5934 __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset)); |
6056 __ jmp(&underlying_unpacked); | 5935 __ jmp(&underlying_unpacked); |
6057 | 5936 |
6058 __ bind(&sliced_string); | 5937 __ bind(&sliced_string); |
6059 // Sliced string. Fetch parent and correct start index by offset. | 5938 // Sliced string. Fetch parent and correct start index by offset. |
6060 __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset)); | 5939 __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset)); |
6061 __ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset)); | 5940 __ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset)); |
6062 __ add(r3, r3, Operand(r4, ASR, 1)); // Add offset to index. | 5941 __ asr(r1, r4, Operand(1)); |
| 5942 __ add(r3, r3, r1); // Add offset to index. |
6063 // Update instance type. | 5943 // Update instance type. |
6064 __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset)); | 5944 __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset)); |
6065 __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset)); | 5945 __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset)); |
6066 __ jmp(&underlying_unpacked); | 5946 __ jmp(&underlying_unpacked); |
6067 | 5947 |
6068 __ bind(&seq_or_external_string); | 5948 __ bind(&seq_or_external_string); |
6069 // Sequential or external string. Just move string to the expected register. | 5949 // Sequential or external string. Just move string to the expected register. |
6070 __ mov(r5, r0); | 5950 __ mov(r5, r0); |
6071 | 5951 |
6072 __ bind(&underlying_unpacked); | 5952 __ bind(&underlying_unpacked); |
6073 | 5953 |
6074 if (FLAG_string_slices) { | 5954 if (FLAG_string_slices) { |
6075 Label copy_routine; | 5955 Label copy_routine; |
6076 // r5: underlying subject string | 5956 // r5: underlying subject string |
6077 // r1: instance type of underlying subject string | 5957 // r1: instance type of underlying subject string |
6078 // r2: length | 5958 // r2: length |
6079 // r3: adjusted start index (untagged) | 5959 // r3: adjusted start index (untagged) |
6080 __ cmp(r2, Operand(SlicedString::kMinLength)); | 5960 __ cmpge(r2, Operand(SlicedString::kMinLength)); |
6081 // Short slice. Copy instead of slicing. | 5961 // Short slice. Copy instead of slicing. |
6082 __ b(lt, ©_routine); | 5962 __ bf(©_routine); |
6083 // Allocate new sliced string. At this point we do not reload the instance | 5963 // Allocate new sliced string. At this point we do not reload the instance |
6084 // type including the string encoding because we simply rely on the info | 5964 // type including the string encoding because we simply rely on the info |
6085 // provided by the original string. It does not matter if the original | 5965 // provided by the original string. It does not matter if the original |
6086 // string's encoding is wrong because we always have to recheck encoding of | 5966 // string's encoding is wrong because we always have to recheck encoding of |
6087 // the newly created string's parent anyways due to externalized strings. | 5967 // the newly created string's parent anyways due to externalized strings. |
6088 Label two_byte_slice, set_slice_header; | 5968 Label two_byte_slice, set_slice_header; |
6089 STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0); | 5969 STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0); |
6090 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); | 5970 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); |
6091 __ tst(r1, Operand(kStringEncodingMask)); | 5971 __ tst(r1, Operand(kStringEncodingMask)); |
6092 __ b(eq, &two_byte_slice); | 5972 __ b(eq, &two_byte_slice); |
6093 __ AllocateAsciiSlicedString(r0, r2, r6, r7, &runtime); | 5973 __ AllocateAsciiSlicedString(r0, r2, r6, r7, &runtime); |
6094 __ jmp(&set_slice_header); | 5974 __ jmp(&set_slice_header); |
6095 __ bind(&two_byte_slice); | 5975 __ bind(&two_byte_slice); |
6096 __ AllocateTwoByteSlicedString(r0, r2, r6, r7, &runtime); | 5976 __ AllocateTwoByteSlicedString(r0, r2, r6, r7, &runtime); |
6097 __ bind(&set_slice_header); | 5977 __ bind(&set_slice_header); |
6098 __ mov(r3, Operand(r3, LSL, 1)); | 5978 __ lsl(r3, r3, Operand(1)); |
6099 __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset)); | 5979 __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset)); |
6100 __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset)); | 5980 __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset)); |
6101 __ jmp(&return_r0); | 5981 __ jmp(&return_r0); |
6102 | 5982 |
6103 __ bind(©_routine); | 5983 __ bind(©_routine); |
6104 } | 5984 } |
6105 | 5985 |
6106 // r5: underlying subject string | 5986 // r5: underlying subject string |
6107 // r1: instance type of underlying subject string | 5987 // r1: instance type of underlying subject string |
6108 // r2: length | 5988 // r2: length |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6149 StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, | 6029 StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, |
6150 COPY_ASCII | DEST_ALWAYS_ALIGNED); | 6030 COPY_ASCII | DEST_ALWAYS_ALIGNED); |
6151 __ jmp(&return_r0); | 6031 __ jmp(&return_r0); |
6152 | 6032 |
6153 // Allocate and copy the resulting two-byte string. | 6033 // Allocate and copy the resulting two-byte string. |
6154 __ bind(&two_byte_sequential); | 6034 __ bind(&two_byte_sequential); |
6155 __ AllocateTwoByteString(r0, r2, r4, r6, r7, &runtime); | 6035 __ AllocateTwoByteString(r0, r2, r4, r6, r7, &runtime); |
6156 | 6036 |
6157 // Locate first character of substring to copy. | 6037 // Locate first character of substring to copy. |
6158 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); | 6038 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); |
6159 __ add(r5, r5, Operand(r3, LSL, 1)); | 6039 __ lsl(r1, r3, Operand(1)); |
| 6040 __ add(r5, r5, r1); |
6160 // Locate first character of result. | 6041 // Locate first character of result. |
6161 __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); | 6042 __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); |
6162 | 6043 |
6163 // r0: result string. | 6044 // r0: result string. |
6164 // r1: first character of result. | 6045 // r1: first character of result. |
6165 // r2: result length. | 6046 // r2: result length. |
6166 // r5: first character of substring to copy. | 6047 // r5: first character of substring to copy. |
6167 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); | 6048 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); |
6168 StringHelper::GenerateCopyCharactersLong( | 6049 StringHelper::GenerateCopyCharactersLong( |
6169 masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED); | 6050 masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED); |
(...skipping 16 matching lines...) Expand all Loading... |
6186 Register scratch1, | 6067 Register scratch1, |
6187 Register scratch2, | 6068 Register scratch2, |
6188 Register scratch3) { | 6069 Register scratch3) { |
6189 Register length = scratch1; | 6070 Register length = scratch1; |
6190 | 6071 |
6191 // Compare lengths. | 6072 // Compare lengths. |
6192 Label strings_not_equal, check_zero_length; | 6073 Label strings_not_equal, check_zero_length; |
6193 __ ldr(length, FieldMemOperand(left, String::kLengthOffset)); | 6074 __ ldr(length, FieldMemOperand(left, String::kLengthOffset)); |
6194 __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset)); | 6075 __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset)); |
6195 __ cmp(length, scratch2); | 6076 __ cmp(length, scratch2); |
6196 __ b(eq, &check_zero_length); | 6077 __ b(eq, &check_zero_length, Label::kNear); |
6197 __ bind(&strings_not_equal); | 6078 __ bind(&strings_not_equal); |
6198 __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL))); | 6079 __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL))); |
6199 __ Ret(); | 6080 __ Ret(); |
6200 | 6081 |
6201 // Check if the length is zero. | 6082 // Check if the length is zero. |
6202 Label compare_chars; | 6083 Label compare_chars; |
6203 __ bind(&check_zero_length); | 6084 __ bind(&check_zero_length); |
6204 STATIC_ASSERT(kSmiTag == 0); | 6085 STATIC_ASSERT(kSmiTag == 0); |
6205 __ cmp(length, Operand(0)); | 6086 __ cmpeq(length, Operand(0)); |
6206 __ b(ne, &compare_chars); | 6087 __ b(ne, &compare_chars, Label::kNear); |
6207 __ mov(r0, Operand(Smi::FromInt(EQUAL))); | 6088 __ mov(r0, Operand(Smi::FromInt(EQUAL))); |
6208 __ Ret(); | 6089 __ Ret(); |
6209 | 6090 |
6210 // Compare characters. | 6091 // Compare characters. |
6211 __ bind(&compare_chars); | 6092 __ bind(&compare_chars); |
6212 GenerateAsciiCharsCompareLoop(masm, | 6093 GenerateAsciiCharsCompareLoop(masm, |
6213 left, right, length, scratch2, scratch3, | 6094 left, right, length, scratch2, scratch3, |
6214 &strings_not_equal); | 6095 &strings_not_equal); |
6215 | 6096 |
6216 // Characters are equal. | 6097 // Characters are equal. |
6217 __ mov(r0, Operand(Smi::FromInt(EQUAL))); | 6098 __ mov(r0, Operand(Smi::FromInt(EQUAL))); |
6218 __ Ret(); | 6099 __ Ret(); |
6219 } | 6100 } |
6220 | 6101 |
6221 | 6102 |
6222 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, | 6103 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, |
6223 Register left, | 6104 Register left, |
6224 Register right, | 6105 Register right, |
6225 Register scratch1, | 6106 Register scratch1, |
6226 Register scratch2, | 6107 Register scratch2, |
6227 Register scratch3, | 6108 Register scratch3, |
6228 Register scratch4) { | 6109 Register scratch4) { |
6229 Label result_not_equal, compare_lengths; | 6110 ASSERT(!scratch2.is(r0) && !scratch4.is(r0)); |
| 6111 Label result_not_equal, compare_lengths, skip; |
6230 // Find minimum length and length difference. | 6112 // Find minimum length and length difference. |
6231 __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset)); | 6113 __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset)); |
6232 __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset)); | 6114 __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset)); |
6233 __ sub(scratch3, scratch1, Operand(scratch2), SetCC); | 6115 __ cmpgt(scratch1, scratch2); // for cond mov below |
| 6116 __ sub(scratch3, scratch1, scratch2); |
6234 Register length_delta = scratch3; | 6117 Register length_delta = scratch3; |
6235 __ mov(scratch1, scratch2, LeaveCC, gt); | 6118 __ mov(scratch1, scratch2, t); |
6236 Register min_length = scratch1; | 6119 Register min_length = scratch1; |
6237 STATIC_ASSERT(kSmiTag == 0); | 6120 STATIC_ASSERT(kSmiTag == 0); |
6238 __ cmp(min_length, Operand(0)); | 6121 __ cmpeq(min_length, Operand(0)); |
6239 __ b(eq, &compare_lengths); | 6122 __ b(eq, &compare_lengths); |
6240 | 6123 |
6241 // Compare loop. | 6124 // Compare loop. |
6242 GenerateAsciiCharsCompareLoop(masm, | 6125 GenerateAsciiCharsCompareLoop(masm, |
6243 left, right, min_length, scratch2, scratch4, | 6126 left, right, min_length, scratch2, scratch4, |
6244 &result_not_equal); | 6127 &result_not_equal); |
6245 | 6128 |
6246 // Compare lengths - strings up to min-length are equal. | 6129 // Compare lengths - strings up to min-length are equal. |
6247 __ bind(&compare_lengths); | 6130 __ bind(&compare_lengths); |
6248 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); | 6131 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); |
6249 // Use length_delta as result if it's zero. | 6132 // Use length_delta as result if it's zero. |
6250 __ mov(r0, Operand(length_delta), SetCC); | 6133 __ mov(r0, length_delta); |
| 6134 __ cmpgt(r0, Operand(0)); |
| 6135 __ mov(r0, Operand(Smi::FromInt(GREATER)), t); |
| 6136 __ cmpge(r0, Operand(0)); |
| 6137 __ mov(r0, Operand(Smi::FromInt(LESS)), f); |
| 6138 __ Ret(); |
| 6139 |
6251 __ bind(&result_not_equal); | 6140 __ bind(&result_not_equal); |
6252 // Conditionally update the result based either on length_delta or | 6141 // Conditionally update the result based either on length_delta or |
6253 // the last comparion performed in the loop above. | 6142 // the last comparion performed in the loop above. |
6254 __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt); | 6143 __ cmpgt(scratch2, scratch4); |
6255 __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt); | 6144 __ mov(r0, Operand(Smi::FromInt(GREATER)), t); |
| 6145 __ cmpge(scratch2, scratch4); |
| 6146 __ mov(r0, Operand(Smi::FromInt(LESS)), f); |
6256 __ Ret(); | 6147 __ Ret(); |
6257 } | 6148 } |
6258 | 6149 |
6259 | 6150 |
6260 void StringCompareStub::GenerateAsciiCharsCompareLoop( | 6151 void StringCompareStub::GenerateAsciiCharsCompareLoop( |
6261 MacroAssembler* masm, | 6152 MacroAssembler* masm, |
6262 Register left, | 6153 Register left, |
6263 Register right, | 6154 Register right, |
6264 Register length, | 6155 Register length, |
6265 Register scratch1, | 6156 Register scratch1, |
6266 Register scratch2, | 6157 Register scratch2, |
6267 Label* chars_not_equal) { | 6158 Label* chars_not_equal) { |
6268 // Change index to run from -length to -1 by adding length to string | 6159 // Change index to run from -length to -1 by adding length to string |
6269 // start. This means that loop ends when index reaches zero, which | 6160 // start. This means that loop ends when index reaches zero, which |
6270 // doesn't need an additional compare. | 6161 // doesn't need an additional compare. |
6271 __ SmiUntag(length); | 6162 __ SmiUntag(length); |
6272 __ add(scratch1, length, | 6163 __ add(scratch1, length, |
6273 Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); | 6164 Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); |
6274 __ add(left, left, Operand(scratch1)); | 6165 __ add(left, left, scratch1); |
6275 __ add(right, right, Operand(scratch1)); | 6166 __ add(right, right, scratch1); |
6276 __ rsb(length, length, Operand::Zero()); | 6167 __ rsb(length, length, Operand::Zero()); |
6277 Register index = length; // index = -length; | 6168 Register index = length; // index = -length; |
6278 | 6169 |
6279 // Compare loop. | 6170 // Compare loop. |
6280 Label loop; | 6171 Label loop; |
6281 __ bind(&loop); | 6172 __ bind(&loop); |
6282 __ ldrb(scratch1, MemOperand(left, index)); | 6173 __ ldrb(scratch1, MemOperand(left, index)); |
6283 __ ldrb(scratch2, MemOperand(right, index)); | 6174 __ ldrb(scratch2, MemOperand(right, index)); |
6284 __ cmp(scratch1, scratch2); | 6175 __ cmp(scratch1, scratch2); |
6285 __ b(ne, chars_not_equal); | 6176 __ b(ne, chars_not_equal); |
6286 __ add(index, index, Operand(1), SetCC); | 6177 __ add(index, index, Operand(1)); |
| 6178 __ tst(index, index); |
6287 __ b(ne, &loop); | 6179 __ b(ne, &loop); |
6288 } | 6180 } |
6289 | 6181 |
6290 | 6182 |
6291 void StringCompareStub::Generate(MacroAssembler* masm) { | 6183 void StringCompareStub::Generate(MacroAssembler* masm) { |
6292 Label runtime; | 6184 Label runtime; |
6293 | 6185 |
6294 Counters* counters = masm->isolate()->counters(); | 6186 Counters* counters = masm->isolate()->counters(); |
6295 | 6187 |
6296 // Stack frame on entry. | 6188 // Stack frame on entry. |
6297 // sp[0]: right string | 6189 // sp[0]: right string |
6298 // sp[4]: left string | 6190 // sp[4]: left string |
6299 __ Ldrd(r0 , r1, MemOperand(sp)); // Load right in r0, left in r1. | 6191 __ Ldrd(r0 , r1, MemOperand(sp)); // Load right in r0, left in r1. |
6300 | 6192 |
6301 Label not_same; | 6193 Label not_same; |
6302 __ cmp(r0, r1); | 6194 __ cmp(r0, r1); |
6303 __ b(ne, ¬_same); | 6195 __ b(ne, ¬_same, Label::kNear); |
6304 STATIC_ASSERT(EQUAL == 0); | 6196 STATIC_ASSERT(EQUAL == 0); |
6305 STATIC_ASSERT(kSmiTag == 0); | 6197 STATIC_ASSERT(kSmiTag == 0); |
6306 __ mov(r0, Operand(Smi::FromInt(EQUAL))); | 6198 __ mov(r0, Operand(Smi::FromInt(EQUAL))); |
6307 __ IncrementCounter(counters->string_compare_native(), 1, r1, r2); | 6199 __ IncrementCounter(counters->string_compare_native(), 1, r1, r2); |
6308 __ add(sp, sp, Operand(2 * kPointerSize)); | 6200 __ add(sp, sp, Operand(2 * kPointerSize)); |
6309 __ Ret(); | 6201 __ Ret(); |
6310 | 6202 |
6311 __ bind(¬_same); | 6203 __ bind(¬_same); |
6312 | 6204 |
6313 // Check that both objects are sequential ASCII strings. | 6205 // Check that both objects are sequential ASCII strings. |
(...skipping 29 matching lines...) Expand all Loading... |
6343 if (flags_ == NO_STRING_ADD_FLAGS) { | 6235 if (flags_ == NO_STRING_ADD_FLAGS) { |
6344 __ JumpIfEitherSmi(r0, r1, &call_runtime); | 6236 __ JumpIfEitherSmi(r0, r1, &call_runtime); |
6345 // Load instance types. | 6237 // Load instance types. |
6346 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); | 6238 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); |
6347 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); | 6239 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); |
6348 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); | 6240 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); |
6349 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); | 6241 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); |
6350 STATIC_ASSERT(kStringTag == 0); | 6242 STATIC_ASSERT(kStringTag == 0); |
6351 // If either is not a string, go to runtime. | 6243 // If either is not a string, go to runtime. |
6352 __ tst(r4, Operand(kIsNotStringMask)); | 6244 __ tst(r4, Operand(kIsNotStringMask)); |
6353 __ tst(r5, Operand(kIsNotStringMask), eq); | 6245 __ b(ne, &call_runtime); |
| 6246 __ tst(r5, Operand(kIsNotStringMask)); |
6354 __ b(ne, &call_runtime); | 6247 __ b(ne, &call_runtime); |
6355 } else { | 6248 } else { |
6356 // Here at least one of the arguments is definitely a string. | 6249 // Here at least one of the arguments is definitely a string. |
6357 // We convert the one that is not known to be a string. | 6250 // We convert the one that is not known to be a string. |
6358 if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) { | 6251 if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) { |
6359 ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0); | 6252 ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0); |
6360 GenerateConvertArgument( | 6253 GenerateConvertArgument( |
6361 masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin); | 6254 masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin); |
6362 builtin_id = Builtins::STRING_ADD_RIGHT; | 6255 builtin_id = Builtins::STRING_ADD_RIGHT; |
6363 } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) { | 6256 } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) { |
6364 ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0); | 6257 ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0); |
6365 GenerateConvertArgument( | 6258 GenerateConvertArgument( |
6366 masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin); | 6259 masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin); |
6367 builtin_id = Builtins::STRING_ADD_LEFT; | 6260 builtin_id = Builtins::STRING_ADD_LEFT; |
6368 } | 6261 } |
6369 } | 6262 } |
6370 | 6263 |
6371 // Both arguments are strings. | 6264 // Both arguments are strings. |
6372 // r0: first string | 6265 // r0: first string |
6373 // r1: second string | 6266 // r1: second string |
6374 // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) | 6267 // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) |
6375 // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) | 6268 // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) |
6376 { | 6269 { |
6377 Label strings_not_empty; | 6270 Label strings_not_empty, string_return; |
6378 // Check if either of the strings are empty. In that case return the other. | 6271 // Check if either of the strings are empty. In that case return the other. |
6379 __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset)); | 6272 __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset)); |
6380 __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset)); | 6273 __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset)); |
6381 STATIC_ASSERT(kSmiTag == 0); | 6274 STATIC_ASSERT(kSmiTag == 0); |
6382 __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty. | 6275 __ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty. |
6383 __ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second. | 6276 __ mov(r0, r1, eq); // If first is empty, return second. |
| 6277 __ bt_near(&string_return); |
6384 STATIC_ASSERT(kSmiTag == 0); | 6278 STATIC_ASSERT(kSmiTag == 0); |
6385 // Else test if second string is empty. | 6279 // Else test if second string is empty. |
6386 __ cmp(r3, Operand(Smi::FromInt(0)), ne); | 6280 __ cmp(r3, Operand(Smi::FromInt(0))); |
6387 __ b(ne, &strings_not_empty); // If either string was empty, return r0. | 6281 // If either string was empty, return r0. |
| 6282 __ b(ne, &strings_not_empty, Label::kNear); |
6388 | 6283 |
| 6284 __ bind(&string_return); |
6389 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); | 6285 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); |
6390 __ add(sp, sp, Operand(2 * kPointerSize)); | 6286 __ add(sp, sp, Operand(2 * kPointerSize)); |
6391 __ Ret(); | 6287 __ Ret(); |
6392 | 6288 |
6393 __ bind(&strings_not_empty); | 6289 __ bind(&strings_not_empty); |
6394 } | 6290 } |
6395 | 6291 |
6396 __ mov(r2, Operand(r2, ASR, kSmiTagSize)); | 6292 __ asr(r2, r2, Operand(kSmiTagSize)); |
6397 __ mov(r3, Operand(r3, ASR, kSmiTagSize)); | 6293 __ asr(r3, r3, Operand(kSmiTagSize)); |
6398 // Both strings are non-empty. | 6294 // Both strings are non-empty. |
6399 // r0: first string | 6295 // r0: first string |
6400 // r1: second string | 6296 // r1: second string |
6401 // r2: length of first string | 6297 // r2: length of first string |
6402 // r3: length of second string | 6298 // r3: length of second string |
6403 // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) | 6299 // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) |
6404 // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) | 6300 // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) |
6405 // Look at the length of the result of adding the two strings. | 6301 // Look at the length of the result of adding the two strings. |
6406 Label string_add_flat_result, longer_than_two; | 6302 Label string_add_flat_result, longer_than_two; |
6407 // Adding two lengths can't overflow. | 6303 // Adding two lengths can't overflow. |
6408 STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2); | 6304 STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2); |
6409 __ add(r6, r2, Operand(r3)); | 6305 __ add(r6, r2, r3); |
6410 // Use the symbol table when adding two one character strings, as it | 6306 // Use the symbol table when adding two one character strings, as it |
6411 // helps later optimizations to return a symbol here. | 6307 // helps later optimizations to return a symbol here. |
6412 __ cmp(r6, Operand(2)); | 6308 __ cmp(r6, Operand(2)); |
6413 __ b(ne, &longer_than_two); | 6309 __ b(ne, &longer_than_two); |
6414 | 6310 |
6415 // Check that both strings are non-external ASCII strings. | 6311 // Check that both strings are non-external ASCII strings. |
6416 if (flags_ != NO_STRING_ADD_FLAGS) { | 6312 if (flags_ != NO_STRING_ADD_FLAGS) { |
6417 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); | 6313 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); |
6418 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); | 6314 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); |
6419 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); | 6315 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); |
(...skipping 23 matching lines...) Expand all Loading... |
6443 // in a little endian mode) | 6339 // in a little endian mode) |
6444 __ mov(r6, Operand(2)); | 6340 __ mov(r6, Operand(2)); |
6445 __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime); | 6341 __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime); |
6446 __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); | 6342 __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); |
6447 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); | 6343 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); |
6448 __ add(sp, sp, Operand(2 * kPointerSize)); | 6344 __ add(sp, sp, Operand(2 * kPointerSize)); |
6449 __ Ret(); | 6345 __ Ret(); |
6450 | 6346 |
6451 __ bind(&longer_than_two); | 6347 __ bind(&longer_than_two); |
6452 // Check if resulting string will be flat. | 6348 // Check if resulting string will be flat. |
6453 __ cmp(r6, Operand(ConsString::kMinLength)); | 6349 __ cmpge(r6, Operand(ConsString::kMinLength)); |
6454 __ b(lt, &string_add_flat_result); | 6350 __ bf(&string_add_flat_result); |
6455 // Handle exceptionally long strings in the runtime system. | 6351 // Handle exceptionally long strings in the runtime system. |
6456 STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0); | 6352 STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0); |
6457 ASSERT(IsPowerOf2(String::kMaxLength + 1)); | 6353 ASSERT(IsPowerOf2(String::kMaxLength + 1)); |
6458 // kMaxLength + 1 is representable as shifted literal, kMaxLength is not. | 6354 // kMaxLength + 1 is representable as shifted literal, kMaxLength is not. |
6459 __ cmp(r6, Operand(String::kMaxLength + 1)); | 6355 __ cmphs(r6, Operand(String::kMaxLength + 1)); |
6460 __ b(hs, &call_runtime); | 6356 __ bt(&call_runtime); |
6461 | 6357 |
6462 // If result is not supposed to be flat, allocate a cons string object. | 6358 // If result is not supposed to be flat, allocate a cons string object. |
6463 // If both strings are ASCII the result is an ASCII cons string. | 6359 // If both strings are ASCII the result is an ASCII cons string. |
6464 if (flags_ != NO_STRING_ADD_FLAGS) { | 6360 if (flags_ != NO_STRING_ADD_FLAGS) { |
6465 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); | 6361 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); |
6466 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); | 6362 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); |
6467 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); | 6363 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); |
6468 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); | 6364 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); |
6469 } | 6365 } |
6470 Label non_ascii, allocated, ascii_data; | 6366 Label non_ascii, allocated, ascii_data; |
6471 STATIC_ASSERT(kTwoByteStringTag == 0); | 6367 STATIC_ASSERT(kTwoByteStringTag == 0); |
6472 __ tst(r4, Operand(kStringEncodingMask)); | 6368 __ tst(r4, Operand(kStringEncodingMask)); |
6473 __ tst(r5, Operand(kStringEncodingMask), ne); | 6369 __ bt_near(&non_ascii); |
6474 __ b(eq, &non_ascii); | 6370 __ tst(r5, Operand(kStringEncodingMask)); |
| 6371 __ b(eq, &non_ascii, Label::kNear); |
6475 | 6372 |
6476 // Allocate an ASCII cons string. | 6373 // Allocate an ASCII cons string. |
6477 __ bind(&ascii_data); | 6374 __ bind(&ascii_data); |
6478 __ AllocateAsciiConsString(r7, r6, r4, r5, &call_runtime); | 6375 __ AllocateAsciiConsString(r7, r6, r4, r5, &call_runtime); |
6479 __ bind(&allocated); | 6376 __ bind(&allocated); |
6480 // Fill the fields of the cons string. | 6377 // Fill the fields of the cons string. |
6481 __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset)); | 6378 __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset)); |
6482 __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset)); | 6379 __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset)); |
6483 __ mov(r0, Operand(r7)); | 6380 __ mov(r0, r7); |
6484 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); | 6381 __ IncrementCounter(counters->string_add_native(), 1, r2, r3); |
6485 __ add(sp, sp, Operand(2 * kPointerSize)); | 6382 __ add(sp, sp, Operand(2 * kPointerSize)); |
6486 __ Ret(); | 6383 __ Ret(); |
6487 | 6384 |
6488 __ bind(&non_ascii); | 6385 __ bind(&non_ascii); |
6489 // At least one of the strings is two-byte. Check whether it happens | 6386 // At least one of the strings is two-byte. Check whether it happens |
6490 // to contain only ASCII characters. | 6387 // to contain only ASCII characters. |
6491 // r4: first instance type. | 6388 // r4: first instance type. |
6492 // r5: second instance type. | 6389 // r5: second instance type. |
6493 __ tst(r4, Operand(kAsciiDataHintMask)); | 6390 __ tst(r4, Operand(kAsciiDataHintMask)); |
6494 __ tst(r5, Operand(kAsciiDataHintMask), ne); | |
6495 __ b(ne, &ascii_data); | 6391 __ b(ne, &ascii_data); |
6496 __ eor(r4, r4, Operand(r5)); | 6392 __ tst(r5, Operand(kAsciiDataHintMask)); |
| 6393 __ b(ne, &ascii_data); |
| 6394 __ eor(r4, r4, r5); |
6497 STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); | 6395 STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); |
6498 __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag)); | 6396 __ land(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag)); |
6499 __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag)); | 6397 __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag)); |
6500 __ b(eq, &ascii_data); | 6398 __ b(eq, &ascii_data); |
6501 | 6399 |
6502 // Allocate a two byte cons string. | 6400 // Allocate a two byte cons string. |
6503 __ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime); | 6401 __ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime); |
6504 __ jmp(&allocated); | 6402 __ jmp(&allocated); |
6505 | 6403 |
6506 // We cannot encounter sliced strings or cons strings here since: | 6404 // We cannot encounter sliced strings or cons strings here since: |
6507 STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength); | 6405 STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength); |
6508 // Handle creating a flat result from either external or sequential strings. | 6406 // Handle creating a flat result from either external or sequential strings. |
6509 // Locate the first characters' locations. | 6407 // Locate the first characters' locations. |
6510 // r0: first string | 6408 // r0: first string |
6511 // r1: second string | 6409 // r1: second string |
6512 // r2: length of first string | 6410 // r2: length of first string |
6513 // r3: length of second string | 6411 // r3: length of second string |
6514 // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) | 6412 // r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS) |
6515 // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) | 6413 // r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS) |
6516 // r6: sum of lengths. | 6414 // r6: sum of lengths. |
6517 Label first_prepared, second_prepared; | 6415 Label first_prepared, second_prepared; |
6518 __ bind(&string_add_flat_result); | 6416 __ bind(&string_add_flat_result); |
6519 if (flags_ != NO_STRING_ADD_FLAGS) { | 6417 if (flags_ != NO_STRING_ADD_FLAGS) { |
6520 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); | 6418 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); |
6521 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); | 6419 __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); |
6522 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); | 6420 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); |
6523 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); | 6421 __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); |
6524 } | 6422 } |
6525 | 6423 |
6526 // Check whether both strings have same encoding | 6424 // Check whether both strings have same encoding |
6527 __ eor(r7, r4, Operand(r5)); | 6425 Label skip; |
| 6426 __ eor(r7, r4, r5); |
6528 __ tst(r7, Operand(kStringEncodingMask)); | 6427 __ tst(r7, Operand(kStringEncodingMask)); |
6529 __ b(ne, &call_runtime); | 6428 __ b(ne, &call_runtime); |
6530 | 6429 |
6531 STATIC_ASSERT(kSeqStringTag == 0); | 6430 STATIC_ASSERT(kSeqStringTag == 0); |
6532 __ tst(r4, Operand(kStringRepresentationMask)); | 6431 __ tst(r4, Operand(kStringRepresentationMask)); |
| 6432 __ bf(&skip); |
6533 STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); | 6433 STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); |
6534 __ add(r7, | 6434 __ add(r7, |
6535 r0, | 6435 r0, |
6536 Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag), | 6436 Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); |
6537 LeaveCC, | 6437 __ b(&first_prepared); |
6538 eq); | 6438 __ bind(&skip); |
6539 __ b(eq, &first_prepared); | |
6540 // External string: rule out short external string and load string resource. | 6439 // External string: rule out short external string and load string resource. |
6541 STATIC_ASSERT(kShortExternalStringTag != 0); | 6440 STATIC_ASSERT(kShortExternalStringTag != 0); |
6542 __ tst(r4, Operand(kShortExternalStringMask)); | 6441 __ tst(r4, Operand(kShortExternalStringMask)); |
6543 __ b(ne, &call_runtime); | 6442 __ b(ne, &call_runtime); |
6544 __ ldr(r7, FieldMemOperand(r0, ExternalString::kResourceDataOffset)); | 6443 __ ldr(r7, FieldMemOperand(r0, ExternalString::kResourceDataOffset)); |
6545 __ bind(&first_prepared); | 6444 __ bind(&first_prepared); |
6546 | 6445 |
6547 STATIC_ASSERT(kSeqStringTag == 0); | 6446 STATIC_ASSERT(kSeqStringTag == 0); |
| 6447 Label skip2; |
6548 __ tst(r5, Operand(kStringRepresentationMask)); | 6448 __ tst(r5, Operand(kStringRepresentationMask)); |
| 6449 __ bf(&skip2); |
6549 STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); | 6450 STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); |
6550 __ add(r1, | 6451 __ add(r1, |
6551 r1, | 6452 r1, |
6552 Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag), | 6453 Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); |
6553 LeaveCC, | 6454 __ b(&second_prepared); |
6554 eq); | 6455 __ bind(&skip2); |
6555 __ b(eq, &second_prepared); | |
6556 // External string: rule out short external string and load string resource. | 6456 // External string: rule out short external string and load string resource. |
6557 STATIC_ASSERT(kShortExternalStringTag != 0); | 6457 STATIC_ASSERT(kShortExternalStringTag != 0); |
6558 __ tst(r5, Operand(kShortExternalStringMask)); | 6458 __ tst(r5, Operand(kShortExternalStringMask)); |
6559 __ b(ne, &call_runtime); | 6459 __ b(ne, &call_runtime); |
6560 __ ldr(r1, FieldMemOperand(r1, ExternalString::kResourceDataOffset)); | 6460 __ ldr(r1, FieldMemOperand(r1, ExternalString::kResourceDataOffset)); |
6561 __ bind(&second_prepared); | 6461 __ bind(&second_prepared); |
6562 | 6462 |
6563 Label non_ascii_string_add_flat_result; | 6463 Label non_ascii_string_add_flat_result; |
6564 // r7: first character of first string | 6464 // r7: first character of first string |
6565 // r1: first character of second string | 6465 // r1: first character of second string |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6617 int stack_offset, | 6517 int stack_offset, |
6618 Register arg, | 6518 Register arg, |
6619 Register scratch1, | 6519 Register scratch1, |
6620 Register scratch2, | 6520 Register scratch2, |
6621 Register scratch3, | 6521 Register scratch3, |
6622 Register scratch4, | 6522 Register scratch4, |
6623 Label* slow) { | 6523 Label* slow) { |
6624 // First check if the argument is already a string. | 6524 // First check if the argument is already a string. |
6625 Label not_string, done; | 6525 Label not_string, done; |
6626 __ JumpIfSmi(arg, ¬_string); | 6526 __ JumpIfSmi(arg, ¬_string); |
6627 __ CompareObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE); | 6527 __ CompareObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE, ge); |
6628 __ b(lt, &done); | 6528 __ bf(&done); |
6629 | 6529 |
6630 // Check the number to string cache. | 6530 // Check the number to string cache. |
6631 Label not_cached; | 6531 Label not_cached; |
6632 __ bind(¬_string); | 6532 __ bind(¬_string); |
6633 // Puts the cached result into scratch1. | 6533 // Puts the cached result into scratch1. |
6634 NumberToStringStub::GenerateLookupNumberStringCache(masm, | 6534 NumberToStringStub::GenerateLookupNumberStringCache(masm, |
6635 arg, | 6535 arg, |
6636 scratch1, | 6536 scratch1, |
6637 scratch2, | 6537 scratch2, |
6638 scratch3, | 6538 scratch3, |
6639 scratch4, | 6539 scratch4, |
6640 false, | 6540 false, |
6641 ¬_cached); | 6541 ¬_cached); |
6642 __ mov(arg, scratch1); | 6542 __ mov(arg, scratch1); |
6643 __ str(arg, MemOperand(sp, stack_offset)); | 6543 __ str(arg, MemOperand(sp, stack_offset)); |
6644 __ jmp(&done); | 6544 __ jmp(&done); |
6645 | 6545 |
6646 // Check if the argument is a safe string wrapper. | 6546 // Check if the argument is a safe string wrapper. |
6647 __ bind(¬_cached); | 6547 __ bind(¬_cached); |
6648 __ JumpIfSmi(arg, slow); | 6548 __ JumpIfSmi(arg, slow); |
6649 __ CompareObjectType( | 6549 __ CompareObjectType( |
6650 arg, scratch1, scratch2, JS_VALUE_TYPE); // map -> scratch1. | 6550 arg, scratch1, scratch2, JS_VALUE_TYPE, eq); // map -> scratch1. |
6651 __ b(ne, slow); | 6551 __ b(ne, slow); |
6652 __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset)); | 6552 __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset)); |
6653 __ and_(scratch2, | 6553 __ land(scratch2, |
6654 scratch2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf)); | 6554 scratch2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf)); |
6655 __ cmp(scratch2, | 6555 __ cmp(scratch2, |
6656 Operand(1 << Map::kStringWrapperSafeForDefaultValueOf)); | 6556 Operand(1 << Map::kStringWrapperSafeForDefaultValueOf)); |
6657 __ b(ne, slow); | 6557 __ b(ne, slow); |
6658 __ ldr(arg, FieldMemOperand(arg, JSValue::kValueOffset)); | 6558 __ ldr(arg, FieldMemOperand(arg, JSValue::kValueOffset)); |
6659 __ str(arg, MemOperand(sp, stack_offset)); | 6559 __ str(arg, MemOperand(sp, stack_offset)); |
6660 | 6560 |
6661 __ bind(&done); | 6561 __ bind(&done); |
6662 } | 6562 } |
6663 | 6563 |
6664 | 6564 |
6665 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { | 6565 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { |
6666 ASSERT(state_ == CompareIC::SMIS); | 6566 ASSERT(state_ == CompareIC::SMIS); |
6667 Label miss; | 6567 Label miss; |
6668 __ orr(r2, r1, r0); | 6568 __ orr(r2, r1, r0); |
6669 __ JumpIfNotSmi(r2, &miss); | 6569 __ JumpIfNotSmi(r2, &miss); |
6670 | 6570 |
6671 if (GetCondition() == eq) { | 6571 if (GetCondition() == eq) { |
6672 // For equality we do not care about the sign of the result. | 6572 // For equality we do not care about the sign of the result. |
6673 __ sub(r0, r0, r1, SetCC); | 6573 __ sub(r0, r0, r1); |
| 6574 __ tst(r0, r0); // TODO(stm): why setting CC? is it used? |
6674 } else { | 6575 } else { |
6675 // Untag before subtracting to avoid handling overflow. | 6576 // Untag before subtracting to avoid handling overflow. |
6676 __ SmiUntag(r1); | 6577 __ SmiUntag(r1); |
6677 __ sub(r0, r1, SmiUntagOperand(r0)); | 6578 __ SmiUntag(r0); |
| 6579 __ sub(r0, r1, r0); |
6678 } | 6580 } |
6679 __ Ret(); | 6581 __ Ret(); |
6680 | 6582 |
6681 __ bind(&miss); | 6583 __ bind(&miss); |
6682 GenerateMiss(masm); | 6584 GenerateMiss(masm); |
6683 } | 6585 } |
6684 | 6586 |
6685 | 6587 |
6686 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { | 6588 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { |
6687 ASSERT(state_ == CompareIC::HEAP_NUMBERS); | 6589 ASSERT(state_ == CompareIC::HEAP_NUMBERS); |
6688 | 6590 |
6689 Label generic_stub; | 6591 Label generic_stub; |
6690 Label unordered, maybe_undefined1, maybe_undefined2; | 6592 Label unordered, maybe_undefined1, maybe_undefined2; |
6691 Label miss; | 6593 Label miss; |
6692 __ and_(r2, r1, Operand(r0)); | 6594 __ land(r2, r1, r0); |
6693 __ JumpIfSmi(r2, &generic_stub); | 6595 __ JumpIfSmi(r2, &generic_stub, Label::kNear); |
6694 | 6596 |
6695 __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE); | 6597 __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE, eq); |
6696 __ b(ne, &maybe_undefined1); | 6598 __ b(ne, &maybe_undefined1, Label::kNear); |
6697 __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE); | 6599 __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE, eq); |
6698 __ b(ne, &maybe_undefined2); | 6600 __ b(ne, &maybe_undefined2, Label::kNear); |
6699 | 6601 |
6700 // Inlining the double comparison and falling back to the general compare | 6602 // Inlining the double comparison and falling back to the general compare |
6701 // stub if NaN is involved or VFP3 is unsupported. | 6603 // stub if NaN is involved or FPU is unsupported. |
6702 if (CpuFeatures::IsSupported(VFP2)) { | 6604 if (CpuFeatures::IsSupported(FPU)) { |
6703 CpuFeatures::Scope scope(VFP2); | 6605 // Load left and right operand |
| 6606 __ sub(r2, r1, Operand(kHeapObjectTag - HeapNumber::kValueOffset)); |
| 6607 __ dldr(dr0, MemOperand(r2, 0), r2); |
| 6608 __ sub(r2, r0, Operand(kHeapObjectTag - HeapNumber::kValueOffset)); |
| 6609 __ dldr(dr2, MemOperand(r2, 0), r2); |
6704 | 6610 |
6705 // Load left and right operand | 6611 Label unordered; |
6706 __ sub(r2, r1, Operand(kHeapObjectTag)); | 6612 __ dcmpeq(dr0, dr0); |
6707 __ vldr(d0, r2, HeapNumber::kValueOffset); | 6613 __ bf_near(&unordered); |
6708 __ sub(r2, r0, Operand(kHeapObjectTag)); | 6614 __ dcmpeq(dr2, dr2); |
6709 __ vldr(d1, r2, HeapNumber::kValueOffset); | 6615 __ bf_near(&unordered); |
6710 | 6616 |
6711 // Compare operands | 6617 // Test for eq, lt and gt |
6712 __ VFPCompareAndSetFlags(d0, d1); | 6618 Label equal, greater; |
| 6619 __ dcmpeq(dr0, dr2); |
| 6620 __ bt_near(&equal); |
| 6621 __ dcmpgt(dr0, dr2); |
| 6622 __ bt_near(&greater); |
6713 | 6623 |
6714 // Don't base result on status bits when a NaN is involved. | 6624 __ mov(r0, Operand(LESS)); |
6715 __ b(vs, &unordered); | 6625 __ rts(); |
6716 | 6626 |
6717 // Return a result of -1, 0, or 1, based on status bits. | 6627 __ bind(&equal); |
6718 __ mov(r0, Operand(EQUAL), LeaveCC, eq); | 6628 __ mov(r0, Operand(EQUAL)); |
6719 __ mov(r0, Operand(LESS), LeaveCC, lt); | 6629 __ rts(); |
6720 __ mov(r0, Operand(GREATER), LeaveCC, gt); | 6630 |
6721 __ Ret(); | 6631 __ bind(&greater); |
| 6632 __ mov(r0, Operand(GREATER)); |
| 6633 __ rts(); |
| 6634 |
| 6635 __ bind(&unordered); |
6722 } | 6636 } |
6723 | 6637 |
6724 __ bind(&unordered); | |
6725 CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0); | 6638 CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0); |
6726 __ bind(&generic_stub); | 6639 __ bind(&generic_stub); |
6727 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); | 6640 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); |
6728 | 6641 |
6729 __ bind(&maybe_undefined1); | 6642 __ bind(&maybe_undefined1); |
6730 if (Token::IsOrderedRelationalCompareOp(op_)) { | 6643 if (Token::IsOrderedRelationalCompareOp(op_)) { |
6731 __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); | 6644 __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); |
6732 __ b(ne, &miss); | 6645 __ b(ne, &miss); |
6733 __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE); | 6646 __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE, eq); |
6734 __ b(ne, &maybe_undefined2); | 6647 __ b(ne, &maybe_undefined2); |
6735 __ jmp(&unordered); | 6648 __ jmp(&unordered); |
6736 } | 6649 } |
6737 | 6650 |
6738 __ bind(&maybe_undefined2); | 6651 __ bind(&maybe_undefined2); |
6739 if (Token::IsOrderedRelationalCompareOp(op_)) { | 6652 if (Token::IsOrderedRelationalCompareOp(op_)) { |
6740 __ CompareRoot(r1, Heap::kUndefinedValueRootIndex); | 6653 __ CompareRoot(r1, Heap::kUndefinedValueRootIndex); |
6741 __ b(eq, &unordered); | 6654 __ b(eq, &unordered); |
6742 } | 6655 } |
6743 | 6656 |
6744 __ bind(&miss); | 6657 __ bind(&miss); |
6745 GenerateMiss(masm); | 6658 GenerateMiss(masm); |
6746 } | 6659 } |
6747 | 6660 |
6748 | 6661 |
6749 void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { | 6662 void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { |
6750 ASSERT(state_ == CompareIC::SYMBOLS); | 6663 ASSERT(state_ == CompareIC::SYMBOLS); |
6751 Label miss; | 6664 Label miss; |
6752 | 6665 |
6753 // Registers containing left and right operands respectively. | 6666 // Registers containing left and right operands respectively. |
6754 Register left = r1; | 6667 Register left = r1; |
6755 Register right = r0; | 6668 Register right = r0; |
6756 Register tmp1 = r2; | 6669 Register tmp1 = r2; |
6757 Register tmp2 = r3; | 6670 Register tmp2 = r3; |
6758 | 6671 |
6759 // Check that both operands are heap objects. | 6672 // Check that both operands are heap objects. |
6760 __ JumpIfEitherSmi(left, right, &miss); | 6673 __ JumpIfEitherSmi(left, right, &miss, Label::kNear); |
6761 | 6674 |
6762 // Check that both operands are symbols. | 6675 // Check that both operands are symbols. |
6763 __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); | 6676 __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); |
6764 __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); | 6677 __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); |
6765 __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); | 6678 __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); |
6766 __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); | 6679 __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); |
6767 STATIC_ASSERT(kSymbolTag != 0); | 6680 STATIC_ASSERT(kSymbolTag != 0); |
6768 __ and_(tmp1, tmp1, Operand(tmp2)); | 6681 __ land(tmp1, tmp1, tmp2); |
6769 __ tst(tmp1, Operand(kIsSymbolMask)); | 6682 __ tst(tmp1, Operand(kIsSymbolMask)); |
6770 __ b(eq, &miss); | 6683 __ b(eq, &miss, Label::kNear); |
6771 | 6684 |
6772 // Symbols are compared by identity. | 6685 // Symbols are compared by identity. |
6773 __ cmp(left, right); | 6686 __ cmp(left, right); |
6774 // Make sure r0 is non-zero. At this point input operands are | 6687 // Make sure r0 is non-zero. At this point input operands are |
6775 // guaranteed to be non-zero. | 6688 // guaranteed to be non-zero. |
6776 ASSERT(right.is(r0)); | 6689 ASSERT(right.is(r0)); |
6777 STATIC_ASSERT(EQUAL == 0); | 6690 STATIC_ASSERT(EQUAL == 0); |
6778 STATIC_ASSERT(kSmiTag == 0); | 6691 STATIC_ASSERT(kSmiTag == 0); |
6779 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq); | 6692 __ mov(r0, Operand(Smi::FromInt(EQUAL)), eq); |
6780 __ Ret(); | 6693 __ Ret(); |
6781 | 6694 |
6782 __ bind(&miss); | 6695 __ bind(&miss); |
6783 GenerateMiss(masm); | 6696 GenerateMiss(masm); |
6784 } | 6697 } |
6785 | 6698 |
6786 | 6699 |
6787 void ICCompareStub::GenerateStrings(MacroAssembler* masm) { | 6700 void ICCompareStub::GenerateStrings(MacroAssembler* masm) { |
6788 ASSERT(state_ == CompareIC::STRINGS); | 6701 ASSERT(state_ == CompareIC::STRINGS); |
6789 Label miss; | 6702 Label miss; |
(...skipping 19 matching lines...) Expand all Loading... |
6809 __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); | 6722 __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); |
6810 STATIC_ASSERT(kNotStringTag != 0); | 6723 STATIC_ASSERT(kNotStringTag != 0); |
6811 __ orr(tmp3, tmp1, tmp2); | 6724 __ orr(tmp3, tmp1, tmp2); |
6812 __ tst(tmp3, Operand(kIsNotStringMask)); | 6725 __ tst(tmp3, Operand(kIsNotStringMask)); |
6813 __ b(ne, &miss); | 6726 __ b(ne, &miss); |
6814 | 6727 |
6815 // Fast check for identical strings. | 6728 // Fast check for identical strings. |
6816 __ cmp(left, right); | 6729 __ cmp(left, right); |
6817 STATIC_ASSERT(EQUAL == 0); | 6730 STATIC_ASSERT(EQUAL == 0); |
6818 STATIC_ASSERT(kSmiTag == 0); | 6731 STATIC_ASSERT(kSmiTag == 0); |
6819 __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq); | 6732 __ mov(r0, Operand(Smi::FromInt(EQUAL)), eq); |
6820 __ Ret(eq); | 6733 __ Ret(eq); |
6821 | 6734 |
6822 // Handle not identical strings. | 6735 // Handle not identical strings. |
6823 | 6736 |
6824 // Check that both strings are symbols. If they are, we're done | 6737 // Check that both strings are symbols. If they are, we're done |
6825 // because we already know they are not identical. | 6738 // because we already know they are not identical. |
6826 if (equality) { | 6739 if (equality) { |
6827 ASSERT(GetCondition() == eq); | 6740 ASSERT(GetCondition() == eq); |
6828 STATIC_ASSERT(kSymbolTag != 0); | 6741 STATIC_ASSERT(kSymbolTag != 0); |
6829 __ and_(tmp3, tmp1, Operand(tmp2)); | 6742 __ land(tmp3, tmp1, tmp2); |
6830 __ tst(tmp3, Operand(kIsSymbolMask)); | 6743 __ tst(tmp3, Operand(kIsSymbolMask)); |
6831 // Make sure r0 is non-zero. At this point input operands are | 6744 // Make sure r0 is non-zero. At this point input operands are |
6832 // guaranteed to be non-zero. | 6745 // guaranteed to be non-zero. |
6833 ASSERT(right.is(r0)); | 6746 ASSERT(right.is(r0)); |
6834 __ Ret(ne); | 6747 __ Ret(ne); |
6835 } | 6748 } |
6836 | 6749 |
6837 // Check that both strings are sequential ASCII. | 6750 // Check that both strings are sequential ASCII. |
6838 Label runtime; | 6751 Label runtime; |
6839 __ JumpIfBothInstanceTypesAreNotSequentialAscii( | 6752 __ JumpIfBothInstanceTypesAreNotSequentialAscii( |
(...skipping 18 matching lines...) Expand all Loading... |
6858 } | 6771 } |
6859 | 6772 |
6860 __ bind(&miss); | 6773 __ bind(&miss); |
6861 GenerateMiss(masm); | 6774 GenerateMiss(masm); |
6862 } | 6775 } |
6863 | 6776 |
6864 | 6777 |
6865 void ICCompareStub::GenerateObjects(MacroAssembler* masm) { | 6778 void ICCompareStub::GenerateObjects(MacroAssembler* masm) { |
6866 ASSERT(state_ == CompareIC::OBJECTS); | 6779 ASSERT(state_ == CompareIC::OBJECTS); |
6867 Label miss; | 6780 Label miss; |
6868 __ and_(r2, r1, Operand(r0)); | 6781 __ land(r2, r1, r0); |
6869 __ JumpIfSmi(r2, &miss); | 6782 __ JumpIfSmi(r2, &miss, Label::kNear); |
6870 | 6783 |
6871 __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE); | 6784 __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE, eq); |
6872 __ b(ne, &miss); | 6785 __ b(ne, &miss, Label::kNear); |
6873 __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE); | 6786 __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE, eq); |
6874 __ b(ne, &miss); | 6787 __ b(ne, &miss, Label::kNear); |
6875 | 6788 |
6876 ASSERT(GetCondition() == eq); | 6789 ASSERT(GetCondition() == eq); |
6877 __ sub(r0, r0, Operand(r1)); | 6790 __ sub(r0, r0, r1); |
6878 __ Ret(); | 6791 __ Ret(); |
6879 | 6792 |
6880 __ bind(&miss); | 6793 __ bind(&miss); |
6881 GenerateMiss(masm); | 6794 GenerateMiss(masm); |
6882 } | 6795 } |
6883 | 6796 |
6884 | 6797 |
6885 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) { | 6798 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) { |
6886 Label miss; | 6799 Label miss; |
6887 __ and_(r2, r1, Operand(r0)); | 6800 __ land(r2, r1, r0); |
6888 __ JumpIfSmi(r2, &miss); | 6801 __ JumpIfSmi(r2, &miss); |
6889 __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); | 6802 __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); |
6890 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); | 6803 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); |
6891 __ cmp(r2, Operand(known_map_)); | 6804 __ cmp(r2, Operand(known_map_)); |
6892 __ b(ne, &miss); | 6805 __ b(ne, &miss); |
6893 __ cmp(r3, Operand(known_map_)); | 6806 __ cmp(r3, Operand(known_map_)); |
6894 __ b(ne, &miss); | 6807 __ b(ne, &miss); |
6895 | 6808 |
6896 __ sub(r0, r0, Operand(r1)); | 6809 __ sub(r0, r0, r1); |
6897 __ Ret(); | 6810 __ Ret(); |
6898 | 6811 |
6899 __ bind(&miss); | 6812 __ bind(&miss); |
6900 GenerateMiss(masm); | 6813 GenerateMiss(masm); |
6901 } | 6814 } |
6902 | 6815 |
6903 | 6816 |
6904 | |
6905 void ICCompareStub::GenerateMiss(MacroAssembler* masm) { | 6817 void ICCompareStub::GenerateMiss(MacroAssembler* masm) { |
6906 { | 6818 { |
6907 // Call the runtime system in a fresh internal frame. | 6819 // Call the runtime system in a fresh internal frame. |
6908 ExternalReference miss = | 6820 ExternalReference miss = |
6909 ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate()); | 6821 ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate()); |
6910 | 6822 |
6911 FrameScope scope(masm, StackFrame::INTERNAL); | 6823 FrameScope scope(masm, StackFrame::INTERNAL); |
6912 __ Push(r1, r0); | 6824 __ Push(r1, r0); |
6913 __ push(lr); | 6825 __ push(pr); |
6914 __ Push(r1, r0); | 6826 __ Push(r1, r0); |
6915 __ mov(ip, Operand(Smi::FromInt(op_))); | 6827 __ mov(ip, Operand(Smi::FromInt(op_))); |
6916 __ push(ip); | 6828 __ push(ip); |
6917 __ CallExternalReference(miss, 3); | 6829 __ CallExternalReference(miss, 3); |
6918 // Compute the entry point of the rewritten stub. | 6830 // Compute the entry point of the rewritten stub. |
6919 __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); | 6831 __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); |
6920 // Restore registers. | 6832 // Restore registers. |
6921 __ pop(lr); | 6833 __ pop(pr); |
6922 __ pop(r0); | 6834 __ pop(r0); |
6923 __ pop(r1); | 6835 __ pop(r1); |
6924 } | 6836 } |
6925 | 6837 |
6926 __ Jump(r2); | 6838 __ jmp(r2); |
6927 } | 6839 } |
6928 | 6840 |
6929 | 6841 |
6930 void DirectCEntryStub::Generate(MacroAssembler* masm) { | 6842 void DirectCEntryStub::Generate(MacroAssembler* masm) { |
6931 __ ldr(pc, MemOperand(sp, 0)); | 6843 __ ldr(scratch_, MemOperand(sp, 0), scratch_); |
| 6844 __ jmp(scratch_); |
6932 } | 6845 } |
6933 | 6846 |
6934 | 6847 |
6935 void DirectCEntryStub::GenerateCall(MacroAssembler* masm, | 6848 void DirectCEntryStub::GenerateCall(MacroAssembler* masm, |
6936 ExternalReference function) { | 6849 ExternalReference function, |
6937 __ mov(r2, Operand(function)); | 6850 Register scratch1, |
6938 GenerateCall(masm, r2); | 6851 Register scratch2) { |
| 6852 __ mov(scratch1, Operand(function)); |
| 6853 GenerateCall(masm, scratch1, scratch2); |
6939 } | 6854 } |
6940 | 6855 |
6941 | 6856 |
6942 void DirectCEntryStub::GenerateCall(MacroAssembler* masm, | 6857 void DirectCEntryStub::GenerateCall(MacroAssembler* masm, |
6943 Register target) { | 6858 Register target, |
6944 __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()), | 6859 Register scratch1) { |
6945 RelocInfo::CODE_TARGET)); | 6860 ASSERT(!target.is(scratch_)); |
6946 | 6861 ASSERT(!target.is(scratch1)); |
6947 // Prevent literal pool emission during calculation of return address. | 6862 ASSERT(!scratch1.is(scratch_)); |
6948 Assembler::BlockConstPoolScope block_const_pool(masm); | 6863 // Get pr (pointing to DirectCEntryStub::Generate) into scratch1 |
6949 | 6864 // pr can't be used directly as it is clobbered by addpc later |
| 6865 __ mov(scratch1, Operand(reinterpret_cast<intptr_t>(GetCode().location()), |
| 6866 RelocInfo::CODE_TARGET)); |
| 6867 int return_address_offset = 4 * Assembler::kInstrSize; |
| 6868 Label start; |
6950 // Push return address (accessible to GC through exit frame pc). | 6869 // Push return address (accessible to GC through exit frame pc). |
6951 // Note that using pc with str is deprecated. | 6870 __ addpc(scratch_, return_address_offset, pr); |
6952 Label start; | |
6953 __ bind(&start); | 6871 __ bind(&start); |
6954 __ add(ip, pc, Operand(Assembler::kInstrSize)); | 6872 // restore the right pr (pointing to DirectCEntryStub::Generate) |
6955 __ str(ip, MemOperand(sp, 0)); | 6873 __ mov(pr, scratch1); |
6956 __ Jump(target); // Call the C++ function. | 6874 __ str(scratch_, MemOperand(sp, 0), no_reg); |
6957 ASSERT_EQ(Assembler::kInstrSize + Assembler::kPcLoadDelta, | 6875 __ jmp(target); // Call the api function. |
| 6876 ASSERT_EQ(return_address_offset, |
6958 masm->SizeOfCodeGeneratedSince(&start)); | 6877 masm->SizeOfCodeGeneratedSince(&start)); |
6959 } | 6878 } |
6960 | 6879 |
6961 | 6880 |
6962 void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, | 6881 void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, |
6963 Label* miss, | 6882 Label* miss, |
6964 Label* done, | 6883 Label* done, |
6965 Register receiver, | 6884 Register receiver, |
6966 Register properties, | 6885 Register properties, |
6967 Handle<String> name, | 6886 Handle<String> name, |
6968 Register scratch0) { | 6887 Register scratch0) { |
| 6888 ASSERT(!scratch0.is(ip)); |
6969 // If names of slots in range from 1 to kProbes - 1 for the hash value are | 6889 // If names of slots in range from 1 to kProbes - 1 for the hash value are |
6970 // not equal to the name and kProbes-th slot is not used (its name is the | 6890 // not equal to the name and kProbes-th slot is not used (its name is the |
6971 // undefined value), it guarantees the hash table doesn't contain the | 6891 // undefined value), it guarantees the hash table doesn't contain the |
6972 // property. It's true even if some slots represent deleted properties | 6892 // property. It's true even if some slots represent deleted properties |
6973 // (their names are the hole value). | 6893 // (their names are the hole value). |
6974 for (int i = 0; i < kInlinedProbes; i++) { | 6894 for (int i = 0; i < kInlinedProbes; i++) { |
6975 // scratch0 points to properties hash. | 6895 // scratch0 points to properties hash. |
6976 // Compute the masked index: (hash + i + i * i) & mask. | 6896 // Compute the masked index: (hash + i + i * i) & mask. |
6977 Register index = scratch0; | 6897 Register index = scratch0; |
6978 // Capacity is smi 2^n. | 6898 // Capacity is smi 2^n. |
6979 __ ldr(index, FieldMemOperand(properties, kCapacityOffset)); | 6899 __ ldr(index, FieldMemOperand(properties, kCapacityOffset)); |
6980 __ sub(index, index, Operand(1)); | 6900 __ sub(index, index, Operand(1)); |
6981 __ and_(index, index, Operand( | 6901 __ land(index, index, Operand( |
6982 Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i)))); | 6902 Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i)))); |
6983 | 6903 |
6984 // Scale the index by multiplying by the entry size. | 6904 // Scale the index by multiplying by the entry size. |
6985 ASSERT(StringDictionary::kEntrySize == 3); | 6905 ASSERT(StringDictionary::kEntrySize == 3); |
6986 __ add(index, index, Operand(index, LSL, 1)); // index *= 3. | 6906 __ lsl(ip, index, Operand(1)); |
| 6907 __ add(index, index, ip); // index *= 3. |
6987 | 6908 |
6988 Register entity_name = scratch0; | 6909 Register entity_name = scratch0; |
6989 // Having undefined at this place means the name is not contained. | 6910 // Having undefined at this place means the name is not contained. |
6990 ASSERT_EQ(kSmiTagSize, 1); | 6911 ASSERT_EQ(kSmiTagSize, 1); |
6991 Register tmp = properties; | 6912 Register tmp = properties; |
6992 __ add(tmp, properties, Operand(index, LSL, 1)); | 6913 /* use entity_name as scratch (defined just after) */ |
| 6914 __ lsl(entity_name, index, Operand(1)); |
| 6915 __ add(tmp, properties, entity_name); |
6993 __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); | 6916 __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); |
6994 | 6917 |
6995 ASSERT(!tmp.is(entity_name)); | 6918 ASSERT(!tmp.is(entity_name)); |
6996 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex); | 6919 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex); |
6997 __ cmp(entity_name, tmp); | 6920 __ cmp(entity_name, tmp); |
6998 __ b(eq, done); | 6921 __ b(eq, done); |
6999 | 6922 |
7000 if (i != kInlinedProbes - 1) { | 6923 if (i != kInlinedProbes - 1) { |
7001 // Load the hole ready for use below: | 6924 // Load the hole ready for use below: |
7002 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex); | 6925 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex); |
(...skipping 15 matching lines...) Expand all Loading... |
7018 | 6941 |
7019 __ bind(&the_hole); | 6942 __ bind(&the_hole); |
7020 | 6943 |
7021 // Restore the properties. | 6944 // Restore the properties. |
7022 __ ldr(properties, | 6945 __ ldr(properties, |
7023 FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 6946 FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
7024 } | 6947 } |
7025 } | 6948 } |
7026 | 6949 |
7027 const int spill_mask = | 6950 const int spill_mask = |
7028 (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() | | 6951 (r6.bit() | r5.bit() | r4.bit() | r3.bit() | |
7029 r2.bit() | r1.bit() | r0.bit()); | 6952 r2.bit() | r1.bit() | r0.bit()); |
7030 | 6953 |
7031 __ stm(db_w, sp, spill_mask); | 6954 __ push(pr); |
| 6955 __ pushm(spill_mask); |
7032 __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 6956 __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
7033 __ mov(r1, Operand(Handle<String>(name))); | 6957 __ mov(r1, Operand(Handle<String>(name))); |
7034 StringDictionaryLookupStub stub(NEGATIVE_LOOKUP); | 6958 StringDictionaryLookupStub stub(NEGATIVE_LOOKUP); |
7035 __ CallStub(&stub); | 6959 __ CallStub(&stub); |
7036 __ cmp(r0, Operand(0)); | 6960 __ cmp(r0, Operand(0)); |
7037 __ ldm(ia_w, sp, spill_mask); | 6961 __ popm(spill_mask); |
| 6962 __ pop(pr); |
7038 | 6963 |
7039 __ b(eq, done); | 6964 __ b(eq, done); |
7040 __ b(ne, miss); | 6965 __ b(ne, miss); |
7041 } | 6966 } |
7042 | 6967 |
7043 | 6968 |
7044 // Probe the string dictionary in the |elements| register. Jump to the | 6969 // Probe the string dictionary in the |elements| register. Jump to the |
7045 // |done| label if a property with the given name is found. Jump to | 6970 // |done| label if a property with the given name is found. Jump to |
7046 // the |miss| label otherwise. | 6971 // the |miss| label otherwise. |
7047 // If lookup was successful |scratch2| will be equal to elements + 4 * index. | 6972 // If lookup was successful |scratch2| will be equal to elements + 4 * index. |
7048 void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, | 6973 void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, |
7049 Label* miss, | 6974 Label* miss, |
7050 Label* done, | 6975 Label* done, |
7051 Register elements, | 6976 Register elements, |
7052 Register name, | 6977 Register name, |
7053 Register scratch1, | 6978 Register scratch1, |
7054 Register scratch2) { | 6979 Register scratch2) { |
7055 ASSERT(!elements.is(scratch1)); | 6980 ASSERT(!elements.is(scratch1)); |
7056 ASSERT(!elements.is(scratch2)); | 6981 ASSERT(!elements.is(scratch2)); |
7057 ASSERT(!name.is(scratch1)); | 6982 ASSERT(!name.is(scratch1)); |
7058 ASSERT(!name.is(scratch2)); | 6983 ASSERT(!name.is(scratch2)); |
7059 | 6984 |
7060 __ AssertString(name); | 6985 // Assert that name contains a string. |
| 6986 if (FLAG_debug_code) __ AbortIfNotString(name); |
7061 | 6987 |
7062 // Compute the capacity mask. | 6988 // Compute the capacity mask. |
7063 __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset)); | 6989 __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset)); |
7064 __ mov(scratch1, Operand(scratch1, ASR, kSmiTagSize)); // convert smi to int | 6990 __ asr(scratch1, scratch1, Operand(kSmiTagSize)); // convert smi to int |
7065 __ sub(scratch1, scratch1, Operand(1)); | 6991 __ sub(scratch1, scratch1, Operand(1)); |
7066 | 6992 |
7067 // Generate an unrolled loop that performs a few probes before | 6993 // Generate an unrolled loop that performs a few probes before |
7068 // giving up. Measurements done on Gmail indicate that 2 probes | 6994 // giving up. Measurements done on Gmail indicate that 2 probes |
7069 // cover ~93% of loads from dictionaries. | 6995 // cover ~93% of loads from dictionaries. |
7070 for (int i = 0; i < kInlinedProbes; i++) { | 6996 for (int i = 0; i < kInlinedProbes; i++) { |
7071 // Compute the masked index: (hash + i + i * i) & mask. | 6997 // Compute the masked index: (hash + i + i * i) & mask. |
7072 __ ldr(scratch2, FieldMemOperand(name, String::kHashFieldOffset)); | 6998 __ ldr(scratch2, FieldMemOperand(name, String::kHashFieldOffset)); |
7073 if (i > 0) { | 6999 if (i > 0) { |
7074 // Add the probe offset (i + i * i) left shifted to avoid right shifting | 7000 // Add the probe offset (i + i * i) left shifted to avoid right shifting |
7075 // the hash in a separate instruction. The value hash + i + i * i is right | 7001 // the hash in a separate instruction. The value hash + i + i * i is right |
7076 // shifted in the following and instruction. | 7002 // shifted in the following and instruction. |
7077 ASSERT(StringDictionary::GetProbeOffset(i) < | 7003 ASSERT(StringDictionary::GetProbeOffset(i) < |
7078 1 << (32 - String::kHashFieldOffset)); | 7004 1 << (32 - String::kHashFieldOffset)); |
7079 __ add(scratch2, scratch2, Operand( | 7005 __ add(scratch2, scratch2, Operand( |
7080 StringDictionary::GetProbeOffset(i) << String::kHashShift)); | 7006 StringDictionary::GetProbeOffset(i) << String::kHashShift)); |
7081 } | 7007 } |
7082 __ and_(scratch2, scratch1, Operand(scratch2, LSR, String::kHashShift)); | 7008 __ lsr(scratch2, scratch2, Operand(String::kHashShift)); |
| 7009 __ land(scratch2, scratch1, scratch2); |
7083 | 7010 |
7084 // Scale the index by multiplying by the element size. | 7011 // Scale the index by multiplying by the element size. |
7085 ASSERT(StringDictionary::kEntrySize == 3); | 7012 ASSERT(StringDictionary::kEntrySize == 3); |
7086 // scratch2 = scratch2 * 3. | 7013 // scratch2 = scratch2 * 3. |
7087 __ add(scratch2, scratch2, Operand(scratch2, LSL, 1)); | 7014 __ lsl(ip, scratch2, Operand(1)); |
| 7015 __ add(scratch2, scratch2, ip); |
7088 | 7016 |
7089 // Check if the key is identical to the name. | 7017 // Check if the key is identical to the name. |
7090 __ add(scratch2, elements, Operand(scratch2, LSL, 2)); | 7018 __ lsl(scratch2, scratch2, Operand(2)); |
| 7019 __ add(scratch2, elements, scratch2); |
7091 __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset)); | 7020 __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset)); |
7092 __ cmp(name, Operand(ip)); | 7021 __ cmp(name, ip); |
7093 __ b(eq, done); | 7022 __ b(eq, done); |
7094 } | 7023 } |
7095 | 7024 |
7096 const int spill_mask = | 7025 const int spill_mask = |
7097 (lr.bit() | r6.bit() | r5.bit() | r4.bit() | | 7026 (r6.bit() | r5.bit() | r4.bit() | |
7098 r3.bit() | r2.bit() | r1.bit() | r0.bit()) & | 7027 r3.bit() | r2.bit() | r1.bit() | r0.bit()) & |
7099 ~(scratch1.bit() | scratch2.bit()); | 7028 ~(scratch1.bit() | scratch2.bit()); |
7100 | 7029 |
7101 __ stm(db_w, sp, spill_mask); | 7030 __ push(pr); |
| 7031 __ pushm(spill_mask); |
7102 if (name.is(r0)) { | 7032 if (name.is(r0)) { |
7103 ASSERT(!elements.is(r1)); | 7033 ASSERT(!elements.is(r1)); |
7104 __ Move(r1, name); | 7034 __ Move(r1, name); |
7105 __ Move(r0, elements); | 7035 __ Move(r0, elements); |
7106 } else { | 7036 } else { |
7107 __ Move(r0, elements); | 7037 __ Move(r0, elements); |
7108 __ Move(r1, name); | 7038 __ Move(r1, name); |
7109 } | 7039 } |
7110 StringDictionaryLookupStub stub(POSITIVE_LOOKUP); | 7040 StringDictionaryLookupStub stub(POSITIVE_LOOKUP); |
7111 __ CallStub(&stub); | 7041 __ CallStub(&stub); |
7112 __ cmp(r0, Operand(0)); | 7042 __ cmp(r0, Operand(0)); |
7113 __ mov(scratch2, Operand(r2)); | 7043 __ mov(scratch2, r2); |
7114 __ ldm(ia_w, sp, spill_mask); | 7044 __ popm(spill_mask); |
| 7045 __ pop(pr); |
7115 | 7046 |
7116 __ b(ne, done); | 7047 __ b(ne, done); |
7117 __ b(eq, miss); | 7048 __ b(eq, miss); |
7118 } | 7049 } |
7119 | 7050 |
7120 | 7051 |
7121 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { | 7052 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { |
7122 // This stub overrides SometimesSetsUpAFrame() to return false. That means | 7053 // This stub overrides SometimesSetsUpAFrame() to return false. That means |
7123 // we cannot call anything that could cause a GC from this stub. | 7054 // we cannot call anything that could cause a GC from this stub. |
7124 // Registers: | 7055 // Registers: |
(...skipping 10 matching lines...) Expand all Loading... |
7135 Register key = r1; | 7066 Register key = r1; |
7136 Register index = r2; | 7067 Register index = r2; |
7137 Register mask = r3; | 7068 Register mask = r3; |
7138 Register hash = r4; | 7069 Register hash = r4; |
7139 Register undefined = r5; | 7070 Register undefined = r5; |
7140 Register entry_key = r6; | 7071 Register entry_key = r6; |
7141 | 7072 |
7142 Label in_dictionary, maybe_in_dictionary, not_in_dictionary; | 7073 Label in_dictionary, maybe_in_dictionary, not_in_dictionary; |
7143 | 7074 |
7144 __ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset)); | 7075 __ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset)); |
7145 __ mov(mask, Operand(mask, ASR, kSmiTagSize)); | 7076 __ asr(mask, mask, Operand(kSmiTagSize)); |
7146 __ sub(mask, mask, Operand(1)); | 7077 __ sub(mask, mask, Operand(1)); |
7147 | 7078 |
7148 __ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset)); | 7079 __ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset)); |
7149 | 7080 |
7150 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); | 7081 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); |
7151 | 7082 |
7152 for (int i = kInlinedProbes; i < kTotalProbes; i++) { | 7083 for (int i = kInlinedProbes; i < kTotalProbes; i++) { |
7153 // Compute the masked index: (hash + i + i * i) & mask. | 7084 // Compute the masked index: (hash + i + i * i) & mask. |
7154 // Capacity is smi 2^n. | 7085 // Capacity is smi 2^n. |
7155 if (i > 0) { | 7086 if (i > 0) { |
7156 // Add the probe offset (i + i * i) left shifted to avoid right shifting | 7087 // Add the probe offset (i + i * i) left shifted to avoid right shifting |
7157 // the hash in a separate instruction. The value hash + i + i * i is right | 7088 // the hash in a separate instruction. The value hash + i + i * i is right |
7158 // shifted in the following and instruction. | 7089 // shifted in the following and instruction. |
7159 ASSERT(StringDictionary::GetProbeOffset(i) < | 7090 ASSERT(StringDictionary::GetProbeOffset(i) < |
7160 1 << (32 - String::kHashFieldOffset)); | 7091 1 << (32 - String::kHashFieldOffset)); |
7161 __ add(index, hash, Operand( | 7092 __ add(index, hash, Operand( |
7162 StringDictionary::GetProbeOffset(i) << String::kHashShift)); | 7093 StringDictionary::GetProbeOffset(i) << String::kHashShift)); |
7163 } else { | 7094 } else { |
7164 __ mov(index, Operand(hash)); | 7095 __ mov(index, hash); |
7165 } | 7096 } |
7166 __ and_(index, mask, Operand(index, LSR, String::kHashShift)); | 7097 __ lsr(index, index, Operand(String::kHashShift)); |
| 7098 __ land(index, mask, index); |
7167 | 7099 |
7168 // Scale the index by multiplying by the entry size. | 7100 // Scale the index by multiplying by the entry size. |
7169 ASSERT(StringDictionary::kEntrySize == 3); | 7101 ASSERT(StringDictionary::kEntrySize == 3); |
7170 __ add(index, index, Operand(index, LSL, 1)); // index *= 3. | 7102 __ lsl(ip, index, Operand(1)); |
| 7103 __ add(index, index, ip); // index *= 3. |
7171 | 7104 |
7172 ASSERT_EQ(kSmiTagSize, 1); | 7105 ASSERT_EQ(kSmiTagSize, 1); |
7173 __ add(index, dictionary, Operand(index, LSL, 2)); | 7106 __ lsl(index, index, Operand(2)); |
| 7107 __ add(index, dictionary, index); |
7174 __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset)); | 7108 __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset)); |
7175 | 7109 |
7176 // Having undefined at this place means the name is not contained. | 7110 // Having undefined at this place means the name is not contained. |
7177 __ cmp(entry_key, Operand(undefined)); | 7111 __ cmp(entry_key, undefined); |
7178 __ b(eq, ¬_in_dictionary); | 7112 __ b(eq, ¬_in_dictionary); |
7179 | 7113 |
7180 // Stop if found the property. | 7114 // Stop if found the property. |
7181 __ cmp(entry_key, Operand(key)); | 7115 __ cmp(entry_key, key); |
7182 __ b(eq, &in_dictionary); | 7116 __ b(eq, &in_dictionary); |
7183 | 7117 |
7184 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) { | 7118 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) { |
7185 // Check if the entry name is not a symbol. | 7119 // Check if the entry name is not a symbol. |
7186 __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset)); | 7120 __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset)); |
7187 __ ldrb(entry_key, | 7121 __ ldrb(entry_key, |
7188 FieldMemOperand(entry_key, Map::kInstanceTypeOffset)); | 7122 FieldMemOperand(entry_key, Map::kInstanceTypeOffset)); |
7189 __ tst(entry_key, Operand(kIsSymbolMask)); | 7123 __ tst(entry_key, Operand(kIsSymbolMask)); |
7190 __ b(eq, &maybe_in_dictionary); | 7124 __ b(eq, &maybe_in_dictionary); |
7191 } | 7125 } |
7192 } | 7126 } |
7193 | 7127 |
7194 __ bind(&maybe_in_dictionary); | 7128 __ bind(&maybe_in_dictionary); |
7195 // If we are doing negative lookup then probing failure should be | 7129 // If we are doing negative lookup then probing failure should be |
7196 // treated as a lookup success. For positive lookup probing failure | 7130 // treated as a lookup success. For positive lookup probing failure |
7197 // should be treated as lookup failure. | 7131 // should be treated as lookup failure. |
7198 if (mode_ == POSITIVE_LOOKUP) { | 7132 if (mode_ == POSITIVE_LOOKUP) { |
7199 __ mov(result, Operand::Zero()); | 7133 __ mov(result, Operand::Zero()); |
7200 __ Ret(); | 7134 __ Ret(); |
7201 } | 7135 } |
7202 | 7136 |
7203 __ bind(&in_dictionary); | 7137 __ bind(&in_dictionary); |
7204 __ mov(result, Operand(1)); | 7138 __ mov(result, Operand(1)); |
7205 __ Ret(); | 7139 __ Ret(); |
7206 | 7140 |
7207 __ bind(¬_in_dictionary); | 7141 __ bind(¬_in_dictionary); |
7208 __ mov(result, Operand::Zero()); | 7142 __ mov(result, Operand(0)); |
7209 __ Ret(); | 7143 __ Ret(); |
7210 } | 7144 } |
7211 | 7145 |
7212 | 7146 |
7213 struct AheadOfTimeWriteBarrierStubList { | 7147 struct AheadOfTimeWriteBarrierStubList { |
7214 Register object, value, address; | 7148 Register object, value, address; |
7215 RememberedSetAction action; | 7149 RememberedSetAction action; |
7216 }; | 7150 }; |
7217 | 7151 |
7218 #define REG(Name) { kRegister_ ## Name ## _Code } | 7152 #define REG(Name) { kRegister_ ## Name ## _Code } |
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7290 entry->value, | 7224 entry->value, |
7291 entry->address, | 7225 entry->address, |
7292 entry->action, | 7226 entry->action, |
7293 kDontSaveFPRegs); | 7227 kDontSaveFPRegs); |
7294 stub.GetCode()->set_is_pregenerated(true); | 7228 stub.GetCode()->set_is_pregenerated(true); |
7295 } | 7229 } |
7296 } | 7230 } |
7297 | 7231 |
7298 | 7232 |
7299 bool CodeStub::CanUseFPRegisters() { | 7233 bool CodeStub::CanUseFPRegisters() { |
7300 return CpuFeatures::IsSupported(VFP2); | 7234 return CpuFeatures::IsSupported(FPU); |
7301 } | 7235 } |
7302 | 7236 |
7303 | 7237 |
7304 // Takes the input in 3 registers: address_ value_ and object_. A pointer to | 7238 // Takes the input in 3 registers: address_ value_ and object_. A pointer to |
7305 // the value has just been written into the object, now this stub makes sure | 7239 // the value has just been written into the object, now this stub makes sure |
7306 // we keep the GC informed. The word in the object where the value has been | 7240 // we keep the GC informed. The word in the object where the value has been |
7307 // written is in the address register. | 7241 // written is in the address register. |
7308 void RecordWriteStub::Generate(MacroAssembler* masm) { | 7242 void RecordWriteStub::Generate(MacroAssembler* masm) { |
7309 Label skip_to_incremental_noncompacting; | 7243 Label skip_to_incremental_noncompacting; |
7310 Label skip_to_incremental_compacting; | 7244 Label skip_to_incremental_compacting; |
7311 | 7245 |
7312 // The first two instructions are generated with labels so as to get the | 7246 // The first two instructions are generated with labels so as to get the |
7313 // offset fixed up correctly by the bind(Label*) call. We patch it back and | 7247 // offset fixed up correctly by the bind(Label*) call. We patch it back and |
7314 // forth between a compare instructions (a nop in this position) and the | 7248 // forth between a compare instructions (a nop in this position) and the |
7315 // real branch when we start and stop incremental heap marking. | 7249 // real branch when we start and stop incremental heap marking. |
7316 // See RecordWriteStub::Patch for details. | 7250 // See RecordWriteStub::Patch for details. |
7317 { | 7251 { |
7318 // Block literal pool emission, as the position of these two instructions | 7252 // Block literal pool emission, as the position of these two instructions |
7319 // is assumed by the patching code. | 7253 // is assumed by the patching code. |
7320 Assembler::BlockConstPoolScope block_const_pool(masm); | |
7321 __ b(&skip_to_incremental_noncompacting); | 7254 __ b(&skip_to_incremental_noncompacting); |
7322 __ b(&skip_to_incremental_compacting); | 7255 __ b(&skip_to_incremental_compacting); |
7323 } | 7256 } |
7324 | 7257 |
7325 if (remembered_set_action_ == EMIT_REMEMBERED_SET) { | 7258 if (remembered_set_action_ == EMIT_REMEMBERED_SET) { |
7326 __ RememberedSetHelper(object_, | 7259 __ RememberedSetHelper(object_, |
7327 address_, | 7260 address_, |
7328 value_, | 7261 value_, |
7329 save_fp_regs_mode_, | 7262 save_fp_regs_mode_, |
7330 MacroAssembler::kReturnAtEnd); | 7263 MacroAssembler::kReturnAtEnd); |
7331 } | 7264 } |
7332 __ Ret(); | 7265 __ Ret(); |
7333 | 7266 |
7334 __ bind(&skip_to_incremental_noncompacting); | 7267 __ bind(&skip_to_incremental_noncompacting); |
7335 GenerateIncremental(masm, INCREMENTAL); | 7268 GenerateIncremental(masm, INCREMENTAL); |
7336 | 7269 |
7337 __ bind(&skip_to_incremental_compacting); | 7270 __ bind(&skip_to_incremental_compacting); |
7338 GenerateIncremental(masm, INCREMENTAL_COMPACTION); | 7271 GenerateIncremental(masm, INCREMENTAL_COMPACTION); |
7339 | 7272 |
7340 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY. | 7273 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY. |
7341 // Will be checked in IncrementalMarking::ActivateGeneratedStub. | 7274 // Will be checked in IncrementalMarking::ActivateGeneratedStub. |
7342 ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12)); | 7275 // TODO(STM): to check soon ! |
7343 ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12)); | 7276 // ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12)); |
7344 PatchBranchIntoNop(masm, 0); | 7277 // ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12)); |
7345 PatchBranchIntoNop(masm, Assembler::kInstrSize); | 7278 // PatchBranchIntoNop(masm, 0); |
| 7279 // PatchBranchIntoNop(masm, Assembler::kInstrSize); |
7346 } | 7280 } |
7347 | 7281 |
7348 | 7282 |
7349 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { | 7283 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { |
7350 regs_.Save(masm); | 7284 regs_.Save(masm); |
7351 | 7285 |
7352 if (remembered_set_action_ == EMIT_REMEMBERED_SET) { | 7286 if (remembered_set_action_ == EMIT_REMEMBERED_SET) { |
7353 Label dont_need_remembered_set; | 7287 Label dont_need_remembered_set; |
7354 | 7288 |
7355 __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0)); | 7289 __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0)); |
(...skipping 24 matching lines...) Expand all Loading... |
7380 | 7314 |
7381 CheckNeedsToInformIncrementalMarker( | 7315 CheckNeedsToInformIncrementalMarker( |
7382 masm, kReturnOnNoNeedToInformIncrementalMarker, mode); | 7316 masm, kReturnOnNoNeedToInformIncrementalMarker, mode); |
7383 InformIncrementalMarker(masm, mode); | 7317 InformIncrementalMarker(masm, mode); |
7384 regs_.Restore(masm); | 7318 regs_.Restore(masm); |
7385 __ Ret(); | 7319 __ Ret(); |
7386 } | 7320 } |
7387 | 7321 |
7388 | 7322 |
7389 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { | 7323 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { |
7390 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); | 7324 __ UNIMPLEMENTED_BREAK(); |
7391 int argument_count = 3; | |
7392 __ PrepareCallCFunction(argument_count, regs_.scratch0()); | |
7393 Register address = | |
7394 r0.is(regs_.address()) ? regs_.scratch0() : regs_.address(); | |
7395 ASSERT(!address.is(regs_.object())); | |
7396 ASSERT(!address.is(r0)); | |
7397 __ Move(address, regs_.address()); | |
7398 __ Move(r0, regs_.object()); | |
7399 if (mode == INCREMENTAL_COMPACTION) { | |
7400 __ Move(r1, address); | |
7401 } else { | |
7402 ASSERT(mode == INCREMENTAL); | |
7403 __ ldr(r1, MemOperand(address, 0)); | |
7404 } | |
7405 __ mov(r2, Operand(ExternalReference::isolate_address())); | |
7406 | |
7407 AllowExternalCallThatCantCauseGC scope(masm); | |
7408 if (mode == INCREMENTAL_COMPACTION) { | |
7409 __ CallCFunction( | |
7410 ExternalReference::incremental_evacuation_record_write_function( | |
7411 masm->isolate()), | |
7412 argument_count); | |
7413 } else { | |
7414 ASSERT(mode == INCREMENTAL); | |
7415 __ CallCFunction( | |
7416 ExternalReference::incremental_marking_record_write_function( | |
7417 masm->isolate()), | |
7418 argument_count); | |
7419 } | |
7420 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); | |
7421 } | 7325 } |
7422 | 7326 |
7423 | 7327 |
7424 void RecordWriteStub::CheckNeedsToInformIncrementalMarker( | 7328 void RecordWriteStub::CheckNeedsToInformIncrementalMarker( |
7425 MacroAssembler* masm, | 7329 MacroAssembler* masm, |
7426 OnNoNeedToInformIncrementalMarker on_no_need, | 7330 OnNoNeedToInformIncrementalMarker on_no_need, |
7427 Mode mode) { | 7331 Mode mode) { |
7428 Label on_black; | 7332 Label on_black; |
7429 Label need_incremental; | 7333 Label need_incremental; |
7430 Label need_incremental_pop_scratch; | 7334 Label need_incremental_pop_scratch; |
7431 | 7335 |
7432 __ and_(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask)); | 7336 __ land(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask)); |
7433 __ ldr(regs_.scratch1(), | 7337 __ ldr(regs_.scratch1(), |
7434 MemOperand(regs_.scratch0(), | 7338 MemOperand(regs_.scratch0(), |
7435 MemoryChunk::kWriteBarrierCounterOffset)); | 7339 MemoryChunk::kWriteBarrierCounterOffset)); |
7436 __ sub(regs_.scratch1(), regs_.scratch1(), Operand(1), SetCC); | 7340 __ sub(regs_.scratch1(), regs_.scratch1(), Operand(1)); |
| 7341 __ cmpge(regs_.scratch1(), Operand(0)); |
7437 __ str(regs_.scratch1(), | 7342 __ str(regs_.scratch1(), |
7438 MemOperand(regs_.scratch0(), | 7343 MemOperand(regs_.scratch0(), |
7439 MemoryChunk::kWriteBarrierCounterOffset)); | 7344 MemoryChunk::kWriteBarrierCounterOffset)); |
7440 __ b(mi, &need_incremental); | 7345 __ bf(&need_incremental); |
7441 | 7346 |
7442 // Let's look at the color of the object: If it is not black we don't have | 7347 // Let's look at the color of the object: If it is not black we don't have |
7443 // to inform the incremental marker. | 7348 // to inform the incremental marker. |
7444 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); | 7349 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); |
7445 | 7350 |
7446 regs_.Restore(masm); | 7351 regs_.Restore(masm); |
7447 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { | 7352 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { |
7448 __ RememberedSetHelper(object_, | 7353 __ RememberedSetHelper(object_, |
7449 address_, | 7354 address_, |
7450 value_, | 7355 value_, |
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7533 // call. | 7438 // call. |
7534 __ Push(r1, r3, r0); | 7439 __ Push(r1, r3, r0); |
7535 __ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); | 7440 __ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
7536 __ ldr(r5, FieldMemOperand(r5, JSFunction::kLiteralsOffset)); | 7441 __ ldr(r5, FieldMemOperand(r5, JSFunction::kLiteralsOffset)); |
7537 __ Push(r5, r4); | 7442 __ Push(r5, r4); |
7538 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); | 7443 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); |
7539 | 7444 |
7540 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. | 7445 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. |
7541 __ bind(&fast_elements); | 7446 __ bind(&fast_elements); |
7542 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); | 7447 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); |
7543 __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); | 7448 __ lsl(r6, r3, Operand(kPointerSizeLog2 - kSmiTagSize)); |
| 7449 __ add(r6, r5, r6); |
7544 __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 7450 __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
7545 __ str(r0, MemOperand(r6, 0)); | 7451 __ str(r0, MemOperand(r6, 0)); |
7546 // Update the write barrier for the array store. | 7452 // Update the write barrier for the array store. |
7547 __ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs, | 7453 __ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs, |
7548 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); | 7454 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); |
7549 __ Ret(); | 7455 __ Ret(); |
7550 | 7456 |
7551 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS, | 7457 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS, |
7552 // and value is Smi. | 7458 // and value is Smi. |
7553 __ bind(&smi_element); | 7459 __ bind(&smi_element); |
7554 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); | 7460 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); |
7555 __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); | 7461 __ lsl(r6, r3, Operand(kPointerSizeLog2 - kSmiTagSize)); |
| 7462 __ add(r6, r5, r6); |
7556 __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize)); | 7463 __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize)); |
7557 __ Ret(); | 7464 __ Ret(); |
7558 | 7465 |
7559 // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS. | 7466 // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS. |
7560 __ bind(&double_elements); | 7467 __ bind(&double_elements); |
7561 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); | 7468 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); |
7562 __ StoreNumberToDoubleElements(r0, r3, r1, | 7469 __ StoreNumberToDoubleElements(r0, r3, r1, |
7563 // Overwrites all regs after this. | 7470 // Overwrites all regs after this. |
7564 r5, r6, r7, r9, r2, | 7471 r5, r6, r7, r9, r2, |
7565 &slow_elements); | 7472 &slow_elements); |
7566 __ Ret(); | 7473 __ Ret(); |
7567 } | 7474 } |
7568 | 7475 |
7569 | 7476 |
7570 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { | 7477 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { |
7571 if (entry_hook_ != NULL) { | 7478 if (entry_hook_ != NULL) { |
7572 PredictableCodeSizeScope predictable(masm); | |
7573 ProfileEntryHookStub stub; | 7479 ProfileEntryHookStub stub; |
7574 __ push(lr); | 7480 __ push(lr); |
7575 __ CallStub(&stub); | 7481 __ CallStub(&stub); |
7576 __ pop(lr); | 7482 __ pop(lr); |
7577 } | 7483 } |
7578 } | 7484 } |
7579 | 7485 |
7580 | 7486 |
7581 void ProfileEntryHookStub::Generate(MacroAssembler* masm) { | 7487 void ProfileEntryHookStub::Generate(MacroAssembler* masm) { |
7582 // The entry hook is a "push lr" instruction, followed by a call. | 7488 // The entry hook is a "push lr" instruction, followed by a call. |
7583 const int32_t kReturnAddressDistanceFromFunctionStart = | 7489 const int32_t kReturnAddressDistanceFromFunctionStart = |
7584 3 * Assembler::kInstrSize; | 7490 Assembler::kCallTargetAddressOffset + Assembler::kInstrSize; |
7585 | 7491 |
7586 // Save live volatile registers. | 7492 // Save live volatile registers. |
7587 __ Push(lr, r5, r1); | 7493 __ Push(pr, r5, r1); |
7588 const int32_t kNumSavedRegs = 3; | 7494 const int32_t kNumSavedRegs = 3; |
7589 | 7495 |
7590 // Compute the function's address for the first argument. | 7496 // Compute the function's address for the first argument. |
7591 __ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart)); | 7497 __ sub(r0, pr, Operand(kReturnAddressDistanceFromFunctionStart)); |
7592 | 7498 |
7593 // The caller's return address is above the saved temporaries. | 7499 // The caller's return address is above the saved temporaries. |
7594 // Grab that for the second argument to the hook. | 7500 // Grab that for the second argument to the hook. |
7595 __ add(r1, sp, Operand(kNumSavedRegs * kPointerSize)); | 7501 __ add(r1, sp, Operand(kNumSavedRegs * kPointerSize)); |
7596 | 7502 |
7597 // Align the stack if necessary. | 7503 // Align the stack if necessary. |
7598 int frame_alignment = masm->ActivationFrameAlignment(); | 7504 int frame_alignment = OS::ActivationFrameAlignment(); |
7599 if (frame_alignment > kPointerSize) { | 7505 if (frame_alignment > kPointerSize) { |
7600 __ mov(r5, sp); | 7506 __ mov(r5, sp); |
7601 ASSERT(IsPowerOf2(frame_alignment)); | 7507 ASSERT(IsPowerOf2(frame_alignment)); |
7602 __ and_(sp, sp, Operand(-frame_alignment)); | 7508 __ land(sp, sp, Operand(-frame_alignment)); |
7603 } | 7509 } |
7604 | 7510 |
7605 #if defined(V8_HOST_ARCH_ARM) | |
7606 __ mov(ip, Operand(reinterpret_cast<int32_t>(&entry_hook_))); | 7511 __ mov(ip, Operand(reinterpret_cast<int32_t>(&entry_hook_))); |
7607 __ ldr(ip, MemOperand(ip)); | 7512 __ ldr(ip, MemOperand(ip)); |
7608 #else | 7513 |
7609 // Under the simulator we need to indirect the entry hook through a | 7514 __ jsr(ip); |
7610 // trampoline function at a known address. | |
7611 Address trampoline_address = reinterpret_cast<Address>( | |
7612 reinterpret_cast<intptr_t>(EntryHookTrampoline)); | |
7613 ApiFunction dispatcher(trampoline_address); | |
7614 __ mov(ip, Operand(ExternalReference(&dispatcher, | |
7615 ExternalReference::BUILTIN_CALL, | |
7616 masm->isolate()))); | |
7617 #endif | |
7618 __ Call(ip); | |
7619 | 7515 |
7620 // Restore the stack pointer if needed. | 7516 // Restore the stack pointer if needed. |
7621 if (frame_alignment > kPointerSize) { | 7517 if (frame_alignment > kPointerSize) { |
7622 __ mov(sp, r5); | 7518 __ mov(sp, r5); |
7623 } | 7519 } |
7624 | 7520 |
7625 __ Pop(lr, r5, r1); | 7521 __ Pop(pr, r5, r1); |
7626 __ Ret(); | 7522 __ Ret(); |
7627 } | 7523 } |
7628 | 7524 |
7629 #undef __ | 7525 #undef __ |
7630 | 7526 |
7631 } } // namespace v8::internal | 7527 } } // namespace v8::internal |
7632 | 7528 |
7633 #endif // V8_TARGET_ARCH_ARM | 7529 #endif // V8_TARGET_ARCH_SH4 |
OLD | NEW |