OLD | NEW |
---|---|
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
115 __ LoadRoot(r2, Heap::kTheHoleValueRootIndex); | 115 __ LoadRoot(r2, Heap::kTheHoleValueRootIndex); |
116 __ LoadRoot(r4, Heap::kUndefinedValueRootIndex); | 116 __ LoadRoot(r4, Heap::kUndefinedValueRootIndex); |
117 __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset)); | 117 __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset)); |
118 __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset)); | 118 __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset)); |
119 __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset)); | 119 __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset)); |
120 __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset)); | 120 __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset)); |
121 __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset)); | 121 __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset)); |
122 __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset)); | 122 __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset)); |
123 __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset)); | 123 __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset)); |
124 | 124 |
125 | |
126 // Initialize the code pointer in the function to be the one | 125 // Initialize the code pointer in the function to be the one |
127 // found in the shared function info object. | 126 // found in the shared function info object. |
128 __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset)); | 127 __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset)); |
129 __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); | 128 __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); |
130 __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset)); | 129 __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset)); |
131 | 130 |
132 // Return result. The argument function info has been popped already. | 131 // Return result. The argument function info has been popped already. |
133 __ Ret(); | 132 __ Ret(); |
134 | 133 |
135 // Create a new closure through the slower runtime call. | 134 // Create a new closure through the slower runtime call. |
(...skipping 14 matching lines...) Expand all Loading... | |
150 r0, | 149 r0, |
151 r1, | 150 r1, |
152 r2, | 151 r2, |
153 &gc, | 152 &gc, |
154 TAG_OBJECT); | 153 TAG_OBJECT); |
155 | 154 |
156 // Load the function from the stack. | 155 // Load the function from the stack. |
157 __ ldr(r3, MemOperand(sp, 0)); | 156 __ ldr(r3, MemOperand(sp, 0)); |
158 | 157 |
159 // Set up the object header. | 158 // Set up the object header. |
160 __ LoadRoot(r2, Heap::kFunctionContextMapRootIndex); | 159 __ LoadRoot(r1, Heap::kFunctionContextMapRootIndex); |
161 __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); | |
162 __ mov(r2, Operand(Smi::FromInt(length))); | 160 __ mov(r2, Operand(Smi::FromInt(length))); |
163 __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset)); | 161 __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset)); |
162 __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); | |
164 | 163 |
165 // Set up the fixed slots. | 164 // Set up the fixed slots, copy the global object from the previous context. |
165 __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); | |
166 __ mov(r1, Operand(Smi::FromInt(0))); | 166 __ mov(r1, Operand(Smi::FromInt(0))); |
167 __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX))); | 167 __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX))); |
168 __ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX))); | 168 __ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX))); |
169 __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX))); | 169 __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX))); |
170 | 170 __ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX))); |
171 // Copy the global object from the previous context. | |
172 __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); | |
173 __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX))); | |
174 | 171 |
175 // Initialize the rest of the slots to undefined. | 172 // Initialize the rest of the slots to undefined. |
176 __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); | 173 __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); |
177 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { | 174 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { |
178 __ str(r1, MemOperand(r0, Context::SlotOffset(i))); | 175 __ str(r1, MemOperand(r0, Context::SlotOffset(i))); |
179 } | 176 } |
180 | 177 |
181 // Remove the on-stack argument and return. | 178 // Remove the on-stack argument and return. |
182 __ mov(cp, r0); | 179 __ mov(cp, r0); |
183 __ pop(); | 180 __ pop(); |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
222 if (FLAG_debug_code) { | 219 if (FLAG_debug_code) { |
223 const char* message = "Expected 0 as a Smi sentinel"; | 220 const char* message = "Expected 0 as a Smi sentinel"; |
224 __ cmp(r3, Operand::Zero()); | 221 __ cmp(r3, Operand::Zero()); |
225 __ Assert(eq, message); | 222 __ Assert(eq, message); |
226 } | 223 } |
227 __ ldr(r3, GlobalObjectOperand()); | 224 __ ldr(r3, GlobalObjectOperand()); |
228 __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset)); | 225 __ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset)); |
229 __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX)); | 226 __ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX)); |
230 __ bind(&after_sentinel); | 227 __ bind(&after_sentinel); |
231 | 228 |
232 // Set up the fixed slots. | 229 // Set up the fixed slots, copy the global object from the previous context. |
230 __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX)); | |
233 __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX)); | 231 __ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX)); |
234 __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX)); | 232 __ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX)); |
235 __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX)); | 233 __ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX)); |
236 | 234 __ str(r2, ContextOperand(r0, Context::GLOBAL_INDEX)); |
237 // Copy the global object from the previous context. | |
238 __ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX)); | |
239 __ str(r1, ContextOperand(r0, Context::GLOBAL_INDEX)); | |
240 | 235 |
241 // Initialize the rest of the slots to the hole value. | 236 // Initialize the rest of the slots to the hole value. |
242 __ LoadRoot(r1, Heap::kTheHoleValueRootIndex); | 237 __ LoadRoot(r1, Heap::kTheHoleValueRootIndex); |
243 for (int i = 0; i < slots_; i++) { | 238 for (int i = 0; i < slots_; i++) { |
244 __ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS)); | 239 __ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS)); |
245 } | 240 } |
246 | 241 |
247 // Remove the on-stack argument and return. | 242 // Remove the on-stack argument and return. |
248 __ mov(cp, r0); | 243 __ mov(cp, r0); |
249 __ add(sp, sp, Operand(2 * kPointerSize)); | 244 __ add(sp, sp, Operand(2 * kPointerSize)); |
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
319 __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 314 __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
320 __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); | 315 __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); |
321 __ CompareRoot(r3, Heap::kUndefinedValueRootIndex); | 316 __ CompareRoot(r3, Heap::kUndefinedValueRootIndex); |
322 __ b(eq, &slow_case); | 317 __ b(eq, &slow_case); |
323 | 318 |
324 FastCloneShallowArrayStub::Mode mode = mode_; | 319 FastCloneShallowArrayStub::Mode mode = mode_; |
325 if (mode == CLONE_ANY_ELEMENTS) { | 320 if (mode == CLONE_ANY_ELEMENTS) { |
326 Label double_elements, check_fast_elements; | 321 Label double_elements, check_fast_elements; |
327 __ ldr(r0, FieldMemOperand(r3, JSArray::kElementsOffset)); | 322 __ ldr(r0, FieldMemOperand(r3, JSArray::kElementsOffset)); |
328 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); | 323 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); |
329 __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex); | 324 __ CompareRoot(r0, Heap::kFixedCOWArrayMapRootIndex); |
330 __ cmp(r0, ip); | |
331 __ b(ne, &check_fast_elements); | 325 __ b(ne, &check_fast_elements); |
332 GenerateFastCloneShallowArrayCommon(masm, 0, | 326 GenerateFastCloneShallowArrayCommon(masm, 0, |
333 COPY_ON_WRITE_ELEMENTS, &slow_case); | 327 COPY_ON_WRITE_ELEMENTS, &slow_case); |
334 // Return and remove the on-stack parameters. | 328 // Return and remove the on-stack parameters. |
335 __ add(sp, sp, Operand(3 * kPointerSize)); | 329 __ add(sp, sp, Operand(3 * kPointerSize)); |
336 __ Ret(); | 330 __ Ret(); |
337 | 331 |
338 __ bind(&check_fast_elements); | 332 __ bind(&check_fast_elements); |
339 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); | 333 __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex); |
340 __ cmp(r0, ip); | |
341 __ b(ne, &double_elements); | 334 __ b(ne, &double_elements); |
342 GenerateFastCloneShallowArrayCommon(masm, length_, | 335 GenerateFastCloneShallowArrayCommon(masm, length_, |
343 CLONE_ELEMENTS, &slow_case); | 336 CLONE_ELEMENTS, &slow_case); |
344 // Return and remove the on-stack parameters. | 337 // Return and remove the on-stack parameters. |
345 __ add(sp, sp, Operand(3 * kPointerSize)); | 338 __ add(sp, sp, Operand(3 * kPointerSize)); |
346 __ Ret(); | 339 __ Ret(); |
347 | 340 |
348 __ bind(&double_elements); | 341 __ bind(&double_elements); |
349 mode = CLONE_DOUBLE_ELEMENTS; | 342 mode = CLONE_DOUBLE_ELEMENTS; |
350 // Fall through to generate the code to handle double elements. | 343 // Fall through to generate the code to handle double elements. |
(...skipping 232 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
583 Register scratch2, | 576 Register scratch2, |
584 Label* not_number) { | 577 Label* not_number) { |
585 if (FLAG_debug_code) { | 578 if (FLAG_debug_code) { |
586 __ AbortIfNotRootValue(heap_number_map, | 579 __ AbortIfNotRootValue(heap_number_map, |
587 Heap::kHeapNumberMapRootIndex, | 580 Heap::kHeapNumberMapRootIndex, |
588 "HeapNumberMap register clobbered."); | 581 "HeapNumberMap register clobbered."); |
589 } | 582 } |
590 | 583 |
591 Label is_smi, done; | 584 Label is_smi, done; |
592 | 585 |
593 __ JumpIfSmi(object, &is_smi); | 586 // Smi-check |
587 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); | |
ulan
2012/01/27 13:19:11
Can we introduce UntagAndJumpIfSmi for these tree
| |
588 __ mov(scratch1, Operand(object, ASR, kSmiTagSize), SetCC); | |
589 __ b(cc, &is_smi); // Shifter carry is not set for a smi. | |
590 // Heap number check | |
594 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); | 591 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); |
595 | 592 |
596 // Handle loading a double from a heap number. | 593 // Handle loading a double from a heap number. |
597 if (CpuFeatures::IsSupported(VFP3) && | 594 if (CpuFeatures::IsSupported(VFP3) && |
598 destination == kVFPRegisters) { | 595 destination == kVFPRegisters) { |
599 CpuFeatures::Scope scope(VFP3); | 596 CpuFeatures::Scope scope(VFP3); |
600 // Load the double from tagged HeapNumber to double register. | 597 // Load the double from tagged HeapNumber to double register. |
601 __ sub(scratch1, object, Operand(kHeapObjectTag)); | 598 __ sub(scratch1, object, Operand(kHeapObjectTag)); |
602 __ vldr(dst, scratch1, HeapNumber::kValueOffset); | 599 __ vldr(dst, scratch1, HeapNumber::kValueOffset); |
603 } else { | 600 } else { |
604 ASSERT(destination == kCoreRegisters); | 601 ASSERT(destination == kCoreRegisters); |
605 // Load the double from heap number to dst1 and dst2 in double format. | 602 // Load the double from heap number to dst1 and dst2 in double format. |
606 __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); | 603 __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); |
607 } | 604 } |
608 __ jmp(&done); | 605 __ jmp(&done); |
609 | 606 |
610 // Handle loading a double from a smi. | 607 // Handle loading a double from a smi. |
611 __ bind(&is_smi); | 608 __ bind(&is_smi); |
612 if (CpuFeatures::IsSupported(VFP3)) { | 609 if (CpuFeatures::IsSupported(VFP3)) { |
613 CpuFeatures::Scope scope(VFP3); | 610 CpuFeatures::Scope scope(VFP3); |
614 // Convert smi to double using VFP instructions. | 611 // Convert smi to double using VFP instructions. |
615 __ SmiUntag(scratch1, object); | |
616 __ vmov(dst.high(), scratch1); | 612 __ vmov(dst.high(), scratch1); |
617 __ vcvt_f64_s32(dst, dst.high()); | 613 __ vcvt_f64_s32(dst, dst.high()); |
618 if (destination == kCoreRegisters) { | 614 if (destination == kCoreRegisters) { |
619 // Load the converted smi to dst1 and dst2 in double format. | 615 // Load the converted smi to dst1 and dst2 in double format. |
620 __ vmov(dst1, dst2, dst); | 616 __ vmov(dst1, dst2, dst); |
621 } | 617 } |
622 } else { | 618 } else { |
623 ASSERT(destination == kCoreRegisters); | 619 ASSERT(destination == kCoreRegisters); |
624 // Write smi to dst1 and dst2 double format. | 620 // Write smi to dst1 and dst2 double format. |
625 __ mov(scratch1, Operand(object)); | 621 __ mov(scratch1, Operand(object)); |
(...skipping 14 matching lines...) Expand all Loading... | |
640 Register scratch1, | 636 Register scratch1, |
641 Register scratch2, | 637 Register scratch2, |
642 Register scratch3, | 638 Register scratch3, |
643 DwVfpRegister double_scratch, | 639 DwVfpRegister double_scratch, |
644 Label* not_number) { | 640 Label* not_number) { |
645 if (FLAG_debug_code) { | 641 if (FLAG_debug_code) { |
646 __ AbortIfNotRootValue(heap_number_map, | 642 __ AbortIfNotRootValue(heap_number_map, |
647 Heap::kHeapNumberMapRootIndex, | 643 Heap::kHeapNumberMapRootIndex, |
648 "HeapNumberMap register clobbered."); | 644 "HeapNumberMap register clobbered."); |
649 } | 645 } |
650 Label is_smi; | |
651 Label done; | 646 Label done; |
652 Label not_in_int32_range; | 647 Label not_in_int32_range; |
653 | 648 |
654 __ JumpIfSmi(object, &is_smi); | 649 // Smi-check |
650 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); | |
ulan
2012/01/27 13:19:11
UntagAndJumpIfSmi
| |
651 __ mov(dst, Operand(object, ASR, kSmiTagSize), SetCC); | |
652 __ b(cc, &done); // Shifter carry is not set for a smi. | |
655 __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset)); | 653 __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset)); |
656 __ cmp(scratch1, heap_number_map); | 654 __ cmp(scratch1, heap_number_map); |
657 __ b(ne, not_number); | 655 __ b(ne, not_number); |
658 __ ConvertToInt32(object, | 656 __ ConvertToInt32(object, |
659 dst, | 657 dst, |
660 scratch1, | 658 scratch1, |
661 scratch2, | 659 scratch2, |
662 double_scratch, | 660 double_scratch, |
663 ¬_in_int32_range); | 661 ¬_in_int32_range); |
664 __ jmp(&done); | 662 __ jmp(&done); |
665 | 663 |
666 __ bind(¬_in_int32_range); | 664 __ bind(¬_in_int32_range); |
667 __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); | 665 __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); |
668 __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset)); | 666 __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset)); |
669 | 667 |
670 __ EmitOutOfInt32RangeTruncate(dst, | 668 __ EmitOutOfInt32RangeTruncate(dst, |
671 scratch1, | 669 scratch1, |
672 scratch2, | 670 scratch2, |
673 scratch3); | 671 scratch3); |
674 __ jmp(&done); | 672 __ jmp(&done); |
675 | 673 |
676 __ bind(&is_smi); | |
677 __ SmiUntag(dst, object); | |
678 __ bind(&done); | 674 __ bind(&done); |
679 } | 675 } |
680 | 676 |
681 | 677 |
682 void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, | 678 void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, |
683 Register int_scratch, | 679 Register int_scratch, |
684 Destination destination, | 680 Destination destination, |
685 DwVfpRegister double_dst, | 681 DwVfpRegister double_dst, |
686 Register dst1, | 682 Register dst1, |
687 Register dst2, | 683 Register dst2, |
(...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
840 DwVfpRegister double_scratch, | 836 DwVfpRegister double_scratch, |
841 Label* not_int32) { | 837 Label* not_int32) { |
842 ASSERT(!dst.is(object)); | 838 ASSERT(!dst.is(object)); |
843 ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object)); | 839 ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object)); |
844 ASSERT(!scratch1.is(scratch2) && | 840 ASSERT(!scratch1.is(scratch2) && |
845 !scratch1.is(scratch3) && | 841 !scratch1.is(scratch3) && |
846 !scratch2.is(scratch3)); | 842 !scratch2.is(scratch3)); |
847 | 843 |
848 Label done; | 844 Label done; |
849 | 845 |
850 // Untag the object into the destination register. | 846 // If the object is a smi, return it in destination register, untagged. |
ulan
2012/01/27 13:19:11
UntagAndJumpIfSmi
| |
851 __ SmiUntag(dst, object); | 847 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); |
852 // Just return if the object is a smi. | 848 __ mov(dst, Operand(object, ASR, kSmiTagSize), SetCC); |
853 __ JumpIfSmi(object, &done); | 849 __ b(cc, &done); // Shifter carry is not set for a smi. |
854 | 850 |
855 if (FLAG_debug_code) { | 851 if (FLAG_debug_code) { |
856 __ AbortIfNotRootValue(heap_number_map, | 852 __ AbortIfNotRootValue(heap_number_map, |
857 Heap::kHeapNumberMapRootIndex, | 853 Heap::kHeapNumberMapRootIndex, |
858 "HeapNumberMap register clobbered."); | 854 "HeapNumberMap register clobbered."); |
859 } | 855 } |
860 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); | 856 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); |
861 | 857 |
862 // Object is a heap number. | 858 // Object is a heap number. |
863 // Convert the floating point value to a 32-bit integer. | 859 // Convert the floating point value to a 32-bit integer. |
(...skipping 2439 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3303 CHECK_EQ(2 * kIntSize, elem_out - elem_start); | 3299 CHECK_EQ(2 * kIntSize, elem_out - elem_start); |
3304 } | 3300 } |
3305 #endif | 3301 #endif |
3306 | 3302 |
3307 // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12]. | 3303 // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12]. |
3308 __ add(r1, r1, Operand(r1, LSL, 1)); | 3304 __ add(r1, r1, Operand(r1, LSL, 1)); |
3309 __ add(cache_entry, cache_entry, Operand(r1, LSL, 2)); | 3305 __ add(cache_entry, cache_entry, Operand(r1, LSL, 2)); |
3310 // Check if cache matches: Double value is stored in uint32_t[2] array. | 3306 // Check if cache matches: Double value is stored in uint32_t[2] array. |
3311 __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit()); | 3307 __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit()); |
3312 __ cmp(r2, r4); | 3308 __ cmp(r2, r4); |
3313 __ b(ne, &calculate); | 3309 __ cmp(r3, r5, eq); |
3314 __ cmp(r3, r5); | |
3315 __ b(ne, &calculate); | 3310 __ b(ne, &calculate); |
3316 // Cache hit. Load result, cleanup and return. | 3311 // Cache hit. Load result, cleanup and return. |
3317 Counters* counters = masm->isolate()->counters(); | 3312 Counters* counters = masm->isolate()->counters(); |
3318 __ IncrementCounter( | 3313 __ IncrementCounter( |
3319 counters->transcendental_cache_hit(), 1, scratch0, scratch1); | 3314 counters->transcendental_cache_hit(), 1, scratch0, scratch1); |
3320 if (tagged) { | 3315 if (tagged) { |
3321 // Pop input value from stack and load result into r0. | 3316 // Pop input value from stack and load result into r0. |
3322 __ pop(); | 3317 __ pop(); |
3323 __ mov(r0, Operand(r6)); | 3318 __ mov(r0, Operand(r6)); |
3324 } else { | 3319 } else { |
(...skipping 136 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3461 const Register heapnumbermap = r5; | 3456 const Register heapnumbermap = r5; |
3462 const Register heapnumber = r0; | 3457 const Register heapnumber = r0; |
3463 const DoubleRegister double_base = d1; | 3458 const DoubleRegister double_base = d1; |
3464 const DoubleRegister double_exponent = d2; | 3459 const DoubleRegister double_exponent = d2; |
3465 const DoubleRegister double_result = d3; | 3460 const DoubleRegister double_result = d3; |
3466 const DoubleRegister double_scratch = d0; | 3461 const DoubleRegister double_scratch = d0; |
3467 const SwVfpRegister single_scratch = s0; | 3462 const SwVfpRegister single_scratch = s0; |
3468 const Register scratch = r9; | 3463 const Register scratch = r9; |
3469 const Register scratch2 = r7; | 3464 const Register scratch2 = r7; |
3470 | 3465 |
3471 Label call_runtime, done, exponent_not_smi, int_exponent; | 3466 Label call_runtime, done, int_exponent; |
3472 if (exponent_type_ == ON_STACK) { | 3467 if (exponent_type_ == ON_STACK) { |
3473 Label base_is_smi, unpack_exponent; | 3468 Label base_is_smi, unpack_exponent; |
3474 // The exponent and base are supplied as arguments on the stack. | 3469 // The exponent and base are supplied as arguments on the stack. |
3475 // This can only happen if the stub is called from non-optimized code. | 3470 // This can only happen if the stub is called from non-optimized code. |
3476 // Load input parameters from stack to double registers. | 3471 // Load input parameters from stack to double registers. |
3477 __ ldr(base, MemOperand(sp, 1 * kPointerSize)); | 3472 __ ldr(base, MemOperand(sp, 1 * kPointerSize)); |
3478 __ ldr(exponent, MemOperand(sp, 0 * kPointerSize)); | 3473 __ ldr(exponent, MemOperand(sp, 0 * kPointerSize)); |
3479 | 3474 |
3480 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex); | 3475 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex); |
3481 | 3476 |
3482 __ JumpIfSmi(base, &base_is_smi); | 3477 __ mov(scratch, Operand(base, ASR, kSmiTagSize), SetCC); |
ulan
2012/01/27 13:19:11
UntagAndJumpIfSmi
| |
3478 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); | |
3479 __ b(cc, &base_is_smi); // Shifter carry is not set for a smi. | |
3480 | |
3483 __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset)); | 3481 __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset)); |
3484 __ cmp(scratch, heapnumbermap); | 3482 __ cmp(scratch, heapnumbermap); |
3485 __ b(ne, &call_runtime); | 3483 __ b(ne, &call_runtime); |
3486 | 3484 |
3487 __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset)); | 3485 __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset)); |
3488 __ jmp(&unpack_exponent); | 3486 __ jmp(&unpack_exponent); |
3489 | 3487 |
3490 __ bind(&base_is_smi); | 3488 __ bind(&base_is_smi); |
3491 __ SmiUntag(base); | 3489 __ vmov(single_scratch, scratch); |
3492 __ vmov(single_scratch, base); | |
3493 __ vcvt_f64_s32(double_base, single_scratch); | 3490 __ vcvt_f64_s32(double_base, single_scratch); |
3494 __ bind(&unpack_exponent); | 3491 __ bind(&unpack_exponent); |
3495 | 3492 |
3496 __ JumpIfNotSmi(exponent, &exponent_not_smi); | 3493 __ mov(scratch, Operand(exponent, ASR, kSmiTagSize), SetCC); |
ulan
2012/01/27 13:19:11
UntagAndJumpIfSmi
| |
3497 __ SmiUntag(exponent); | 3494 __ b(cc, &int_exponent); |
3498 __ jmp(&int_exponent); | |
3499 | 3495 |
3500 __ bind(&exponent_not_smi); | |
3501 __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset)); | 3496 __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset)); |
3502 __ cmp(scratch, heapnumbermap); | 3497 __ cmp(scratch, heapnumbermap); |
3503 __ b(ne, &call_runtime); | 3498 __ b(ne, &call_runtime); |
3504 __ vldr(double_exponent, | 3499 __ vldr(double_exponent, |
3505 FieldMemOperand(exponent, HeapNumber::kValueOffset)); | 3500 FieldMemOperand(exponent, HeapNumber::kValueOffset)); |
3506 } else if (exponent_type_ == TAGGED) { | 3501 } else if (exponent_type_ == TAGGED) { |
3507 // Base is already in double_base. | 3502 // Base is already in double_base. |
3508 __ JumpIfNotSmi(exponent, &exponent_not_smi); | 3503 __ mov(scratch, Operand(exponent, ASR, kSmiTagSize), SetCC); |
ulan
2012/01/27 13:19:11
UntagAndJumpIfSmi
| |
3509 __ SmiUntag(exponent); | 3504 __ b(cc, &int_exponent); |
3510 __ jmp(&int_exponent); | |
3511 | 3505 |
3512 __ bind(&exponent_not_smi); | |
3513 __ vldr(double_exponent, | 3506 __ vldr(double_exponent, |
3514 FieldMemOperand(exponent, HeapNumber::kValueOffset)); | 3507 FieldMemOperand(exponent, HeapNumber::kValueOffset)); |
3515 } | 3508 } |
3516 | 3509 |
3517 if (exponent_type_ != INTEGER) { | 3510 if (exponent_type_ != INTEGER) { |
3518 Label int_exponent_convert; | 3511 Label int_exponent_convert; |
3519 // Detect integer exponents stored as double. | 3512 // Detect integer exponents stored as double. |
3520 __ vcvt_u32_f64(single_scratch, double_exponent); | 3513 __ vcvt_u32_f64(single_scratch, double_exponent); |
3521 // We do not check for NaN or Infinity here because comparing numbers on | 3514 // We do not check for NaN or Infinity here because comparing numbers on |
3522 // ARM correctly distinguishes NaNs. We end up calling the built-in. | 3515 // ARM correctly distinguishes NaNs. We end up calling the built-in. |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3575 __ CallCFunction( | 3568 __ CallCFunction( |
3576 ExternalReference::power_double_double_function(masm->isolate()), | 3569 ExternalReference::power_double_double_function(masm->isolate()), |
3577 0, 2); | 3570 0, 2); |
3578 } | 3571 } |
3579 __ pop(lr); | 3572 __ pop(lr); |
3580 __ GetCFunctionDoubleResult(double_result); | 3573 __ GetCFunctionDoubleResult(double_result); |
3581 __ jmp(&done); | 3574 __ jmp(&done); |
3582 | 3575 |
3583 __ bind(&int_exponent_convert); | 3576 __ bind(&int_exponent_convert); |
3584 __ vcvt_u32_f64(single_scratch, double_exponent); | 3577 __ vcvt_u32_f64(single_scratch, double_exponent); |
3585 __ vmov(exponent, single_scratch); | 3578 __ vmov(scratch, single_scratch); |
3586 } | 3579 } |
3587 | 3580 |
3588 // Calculate power with integer exponent. | 3581 // Calculate power with integer exponent. |
3589 __ bind(&int_exponent); | 3582 __ bind(&int_exponent); |
3590 | 3583 // Exponent has been stored into scratch as untagged integer. |
3591 __ mov(scratch, exponent); // Back up exponent. | 3584 __ mov(exponent, scratch); // Back up exponent. |
3592 __ vmov(double_scratch, double_base); // Back up base. | 3585 __ vmov(double_scratch, double_base); // Back up base. |
3593 __ vmov(double_result, 1.0); | 3586 __ vmov(double_result, 1.0); |
3594 | 3587 |
3595 // Get absolute value of exponent. | 3588 // Get absolute value of exponent. |
3596 __ cmp(scratch, Operand(0)); | 3589 __ cmp(scratch, Operand(0)); |
3597 __ mov(scratch2, Operand(0), LeaveCC, mi); | 3590 __ mov(scratch2, Operand(0), LeaveCC, mi); |
3598 __ sub(scratch, scratch2, scratch, LeaveCC, mi); | 3591 __ sub(scratch, scratch2, scratch, LeaveCC, mi); |
3599 | 3592 |
3600 Label while_true; | 3593 Label while_true; |
3601 __ bind(&while_true); | 3594 __ bind(&while_true); |
(...skipping 489 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4091 } | 4084 } |
4092 | 4085 |
4093 // Check that the left hand is a JS object and load map. | 4086 // Check that the left hand is a JS object and load map. |
4094 __ JumpIfSmi(object, ¬_js_object); | 4087 __ JumpIfSmi(object, ¬_js_object); |
4095 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object); | 4088 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object); |
4096 | 4089 |
4097 // If there is a call site cache don't look in the global cache, but do the | 4090 // If there is a call site cache don't look in the global cache, but do the |
4098 // real lookup and update the call site cache. | 4091 // real lookup and update the call site cache. |
4099 if (!HasCallSiteInlineCheck()) { | 4092 if (!HasCallSiteInlineCheck()) { |
4100 Label miss; | 4093 Label miss; |
4101 __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex); | 4094 __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex); |
4102 __ cmp(function, ip); | |
4103 __ b(ne, &miss); | 4095 __ b(ne, &miss); |
4104 __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex); | 4096 __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex); |
4105 __ cmp(map, ip); | |
4106 __ b(ne, &miss); | 4097 __ b(ne, &miss); |
4107 __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); | 4098 __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); |
4108 __ Ret(HasArgsInRegisters() ? 0 : 2); | 4099 __ Ret(HasArgsInRegisters() ? 0 : 2); |
4109 | 4100 |
4110 __ bind(&miss); | 4101 __ bind(&miss); |
4111 } | 4102 } |
4112 | 4103 |
4113 // Get the prototype of the function. | 4104 // Get the prototype of the function. |
4114 __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true); | 4105 __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true); |
4115 | 4106 |
(...skipping 604 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4720 // regexp_data: RegExp data (FixedArray) | 4711 // regexp_data: RegExp data (FixedArray) |
4721 // Check that the fourth object is a JSArray object. | 4712 // Check that the fourth object is a JSArray object. |
4722 __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); | 4713 __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); |
4723 __ JumpIfSmi(r0, &runtime); | 4714 __ JumpIfSmi(r0, &runtime); |
4724 __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE); | 4715 __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE); |
4725 __ b(ne, &runtime); | 4716 __ b(ne, &runtime); |
4726 // Check that the JSArray is in fast case. | 4717 // Check that the JSArray is in fast case. |
4727 __ ldr(last_match_info_elements, | 4718 __ ldr(last_match_info_elements, |
4728 FieldMemOperand(r0, JSArray::kElementsOffset)); | 4719 FieldMemOperand(r0, JSArray::kElementsOffset)); |
4729 __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); | 4720 __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); |
4730 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); | 4721 __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex); |
4731 __ cmp(r0, ip); | |
4732 __ b(ne, &runtime); | 4722 __ b(ne, &runtime); |
4733 // Check that the last match info has space for the capture registers and the | 4723 // Check that the last match info has space for the capture registers and the |
4734 // additional information. | 4724 // additional information. |
4735 __ ldr(r0, | 4725 __ ldr(r0, |
4736 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); | 4726 FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); |
4737 __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead)); | 4727 __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead)); |
4738 __ cmp(r2, Operand(r0, ASR, kSmiTagSize)); | 4728 __ cmp(r2, Operand(r0, ASR, kSmiTagSize)); |
4739 __ b(gt, &runtime); | 4729 __ b(gt, &runtime); |
4740 | 4730 |
4741 // Reset offset for possibly sliced string. | 4731 // Reset offset for possibly sliced string. |
(...skipping 333 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
5075 __ add(r3, r0, Operand(JSRegExpResult::kSize)); | 5065 __ add(r3, r0, Operand(JSRegExpResult::kSize)); |
5076 __ mov(r4, Operand(factory->empty_fixed_array())); | 5066 __ mov(r4, Operand(factory->empty_fixed_array())); |
5077 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset)); | 5067 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset)); |
5078 __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset)); | 5068 __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset)); |
5079 __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX)); | 5069 __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX)); |
5080 __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset)); | 5070 __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset)); |
5081 __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); | 5071 __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); |
5082 | 5072 |
5083 // Set input, index and length fields from arguments. | 5073 // Set input, index and length fields from arguments. |
5084 __ ldr(r1, MemOperand(sp, kPointerSize * 0)); | 5074 __ ldr(r1, MemOperand(sp, kPointerSize * 0)); |
5075 __ ldr(r2, MemOperand(sp, kPointerSize * 1)); | |
5076 __ ldr(r6, MemOperand(sp, kPointerSize * 2)); | |
5085 __ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset)); | 5077 __ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset)); |
5086 __ ldr(r1, MemOperand(sp, kPointerSize * 1)); | 5078 __ str(r2, FieldMemOperand(r0, JSRegExpResult::kIndexOffset)); |
5087 __ str(r1, FieldMemOperand(r0, JSRegExpResult::kIndexOffset)); | 5079 __ str(r6, FieldMemOperand(r0, JSArray::kLengthOffset)); |
5088 __ ldr(r1, MemOperand(sp, kPointerSize * 2)); | |
5089 __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset)); | |
5090 | 5080 |
5091 // Fill out the elements FixedArray. | 5081 // Fill out the elements FixedArray. |
5092 // r0: JSArray, tagged. | 5082 // r0: JSArray, tagged. |
5093 // r3: FixedArray, tagged. | 5083 // r3: FixedArray, tagged. |
5094 // r5: Number of elements in array, untagged. | 5084 // r5: Number of elements in array, untagged. |
5095 | 5085 |
5096 // Set map. | 5086 // Set map. |
5097 __ mov(r2, Operand(factory->fixed_array_map())); | 5087 __ mov(r2, Operand(factory->fixed_array_map())); |
5098 __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); | 5088 __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); |
5099 // Set FixedArray length. | 5089 // Set FixedArray length. |
(...skipping 263 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
5363 __ tst(code_, | 5353 __ tst(code_, |
5364 Operand(kSmiTagMask | | 5354 Operand(kSmiTagMask | |
5365 ((~String::kMaxAsciiCharCode) << kSmiTagSize))); | 5355 ((~String::kMaxAsciiCharCode) << kSmiTagSize))); |
5366 __ b(ne, &slow_case_); | 5356 __ b(ne, &slow_case_); |
5367 | 5357 |
5368 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); | 5358 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); |
5369 // At this point code register contains smi tagged ASCII char code. | 5359 // At this point code register contains smi tagged ASCII char code. |
5370 STATIC_ASSERT(kSmiTag == 0); | 5360 STATIC_ASSERT(kSmiTag == 0); |
5371 __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize)); | 5361 __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize)); |
5372 __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); | 5362 __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); |
5373 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | 5363 __ CompareRoot(result_, Heap::kUndefinedValueRootIndex); |
5374 __ cmp(result_, Operand(ip)); | |
5375 __ b(eq, &slow_case_); | 5364 __ b(eq, &slow_case_); |
5376 __ bind(&exit_); | 5365 __ bind(&exit_); |
5377 } | 5366 } |
5378 | 5367 |
5379 | 5368 |
5380 void StringCharFromCodeGenerator::GenerateSlow( | 5369 void StringCharFromCodeGenerator::GenerateSlow( |
5381 MacroAssembler* masm, | 5370 MacroAssembler* masm, |
5382 const RuntimeCallHelper& call_helper) { | 5371 const RuntimeCallHelper& call_helper) { |
5383 __ Abort("Unexpected fallthrough to CharFromCode slow case"); | 5372 __ Abort("Unexpected fallthrough to CharFromCode slow case"); |
5384 | 5373 |
(...skipping 407 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
5792 __ Ldrd(r2, r3, MemOperand(sp, kToOffset)); | 5781 __ Ldrd(r2, r3, MemOperand(sp, kToOffset)); |
5793 STATIC_ASSERT(kFromOffset == kToOffset + 4); | 5782 STATIC_ASSERT(kFromOffset == kToOffset + 4); |
5794 STATIC_ASSERT(kSmiTag == 0); | 5783 STATIC_ASSERT(kSmiTag == 0); |
5795 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); | 5784 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); |
5796 | 5785 |
5797 // I.e., arithmetic shift right by one un-smi-tags. | 5786 // I.e., arithmetic shift right by one un-smi-tags. |
5798 __ mov(r2, Operand(r2, ASR, 1), SetCC); | 5787 __ mov(r2, Operand(r2, ASR, 1), SetCC); |
5799 __ mov(r3, Operand(r3, ASR, 1), SetCC, cc); | 5788 __ mov(r3, Operand(r3, ASR, 1), SetCC, cc); |
5800 // If either to or from had the smi tag bit set, then carry is set now. | 5789 // If either to or from had the smi tag bit set, then carry is set now. |
5801 __ b(cs, &runtime); // Either "from" or "to" is not a smi. | 5790 __ b(cs, &runtime); // Either "from" or "to" is not a smi. |
5802 __ b(mi, &runtime); // From is negative. | 5791 // We want to bailout to runtime here if From is negative. In that case, the |
5803 | 5792 // next instruction is not executed and we fall through to bailing out to |
5793 // runtime. pl is the opposite of mi. | |
5804 // Both r2 and r3 are untagged integers. | 5794 // Both r2 and r3 are untagged integers. |
5805 __ sub(r2, r2, Operand(r3), SetCC); | 5795 __ sub(r2, r2, Operand(r3), SetCC, pl); |
5806 __ b(mi, &runtime); // Fail if from > to. | 5796 __ b(mi, &runtime); // Fail if from > to. |
5807 | 5797 |
5808 // Make sure first argument is a string. | 5798 // Make sure first argument is a string. |
5809 __ ldr(r0, MemOperand(sp, kStringOffset)); | 5799 __ ldr(r0, MemOperand(sp, kStringOffset)); |
5810 STATIC_ASSERT(kSmiTag == 0); | 5800 STATIC_ASSERT(kSmiTag == 0); |
5811 __ JumpIfSmi(r0, &runtime); | 5801 __ JumpIfSmi(r0, &runtime); |
5812 Condition is_string = masm->IsObjectStringType(r0, r1); | 5802 Condition is_string = masm->IsObjectStringType(r0, r1); |
5813 __ b(NegateCondition(is_string), &runtime); | 5803 __ b(NegateCondition(is_string), &runtime); |
5814 | 5804 |
5815 // Short-cut for the case of trivial substring. | 5805 // Short-cut for the case of trivial substring. |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
5868 __ CompareRoot(r5, Heap::kEmptyStringRootIndex); | 5858 __ CompareRoot(r5, Heap::kEmptyStringRootIndex); |
5869 __ b(ne, &runtime); | 5859 __ b(ne, &runtime); |
5870 __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset)); | 5860 __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset)); |
5871 // Update instance type. | 5861 // Update instance type. |
5872 __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset)); | 5862 __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset)); |
5873 __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset)); | 5863 __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset)); |
5874 __ jmp(&underlying_unpacked); | 5864 __ jmp(&underlying_unpacked); |
5875 | 5865 |
5876 __ bind(&sliced_string); | 5866 __ bind(&sliced_string); |
5877 // Sliced string. Fetch parent and correct start index by offset. | 5867 // Sliced string. Fetch parent and correct start index by offset. |
5878 __ ldr(r5, FieldMemOperand(r0, SlicedString::kOffsetOffset)); | 5868 __ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset)); |
5879 __ add(r3, r3, Operand(r5, ASR, 1)); | |
5880 __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset)); | 5869 __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset)); |
5870 __ add(r3, r3, Operand(r4, ASR, 1)); // Add offset to index. | |
5881 // Update instance type. | 5871 // Update instance type. |
5882 __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset)); | 5872 __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset)); |
5883 __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset)); | 5873 __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset)); |
5884 __ jmp(&underlying_unpacked); | 5874 __ jmp(&underlying_unpacked); |
5885 | 5875 |
5886 __ bind(&seq_or_external_string); | 5876 __ bind(&seq_or_external_string); |
5887 // Sequential or external string. Just move string to the expected register. | 5877 // Sequential or external string. Just move string to the expected register. |
5888 __ mov(r5, r0); | 5878 __ mov(r5, r0); |
5889 | 5879 |
5890 __ bind(&underlying_unpacked); | 5880 __ bind(&underlying_unpacked); |
(...skipping 1423 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
7314 __ StoreNumberToDoubleElements(r0, r3, r1, r5, r6, r7, r9, r10, | 7304 __ StoreNumberToDoubleElements(r0, r3, r1, r5, r6, r7, r9, r10, |
7315 &slow_elements); | 7305 &slow_elements); |
7316 __ Ret(); | 7306 __ Ret(); |
7317 } | 7307 } |
7318 | 7308 |
7319 #undef __ | 7309 #undef __ |
7320 | 7310 |
7321 } } // namespace v8::internal | 7311 } } // namespace v8::internal |
7322 | 7312 |
7323 #endif // V8_TARGET_ARCH_ARM | 7313 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |