OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
49 root_array_available_(true) { | 49 root_array_available_(true) { |
50 if (isolate() != NULL) { | 50 if (isolate() != NULL) { |
51 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), | 51 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), |
52 isolate()); | 52 isolate()); |
53 } | 53 } |
54 } | 54 } |
55 | 55 |
56 | 56 |
57 static const int kInvalidRootRegisterDelta = -1; | 57 static const int kInvalidRootRegisterDelta = -1; |
58 | 58 |
| 59 #define __k |
| 60 #define __q |
| 61 #define __n |
59 | 62 |
60 intptr_t MacroAssembler::RootRegisterDelta(ExternalReference other) { | 63 int64_t MacroAssembler::RootRegisterDelta(ExternalReference other) { |
61 if (predictable_code_size() && | 64 if (predictable_code_size() && |
62 (other.address() < reinterpret_cast<Address>(isolate()) || | 65 (other.address() < reinterpret_cast<Address>(isolate()) || |
63 other.address() >= reinterpret_cast<Address>(isolate() + 1))) { | 66 other.address() >= reinterpret_cast<Address>(isolate() + 1))) { |
64 return kInvalidRootRegisterDelta; | 67 return kInvalidRootRegisterDelta; |
65 } | 68 } |
66 Address roots_register_value = kRootRegisterBias + | 69 Address roots_register_value = kRootRegisterBias + |
67 reinterpret_cast<Address>(isolate()->heap()->roots_array_start()); | 70 reinterpret_cast<Address>(isolate()->heap()->roots_array_start()); |
| 71 #ifndef V8_TARGET_ARCH_X32 |
68 intptr_t delta = other.address() - roots_register_value; | 72 intptr_t delta = other.address() - roots_register_value; |
| 73 #else |
| 74 uint64_t o = reinterpret_cast<uint32_t>(other.address()); |
| 75 uint64_t r = reinterpret_cast<uint32_t>(roots_register_value); |
| 76 int64_t delta = o - r; |
| 77 #endif |
69 return delta; | 78 return delta; |
70 } | 79 } |
71 | 80 |
72 | 81 |
73 Operand MacroAssembler::ExternalOperand(ExternalReference target, | 82 Operand MacroAssembler::ExternalOperand(ExternalReference target, |
74 Register scratch) { | 83 Register scratch) { |
75 if (root_array_available_ && !Serializer::enabled()) { | 84 if (root_array_available_ && !Serializer::enabled()) { |
76 intptr_t delta = RootRegisterDelta(target); | 85 int64_t delta = RootRegisterDelta(target); |
77 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) { | 86 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) { |
78 Serializer::TooLateToEnableNow(); | 87 Serializer::TooLateToEnableNow(); |
79 return Operand(kRootRegister, static_cast<int32_t>(delta)); | 88 return Operand(kRootRegister, static_cast<int32_t>(delta)); |
80 } | 89 } |
81 } | 90 } |
82 movq(scratch, target); | 91 movq(scratch, target); |
83 return Operand(scratch, 0); | 92 return Operand(scratch, 0); |
84 } | 93 } |
85 | 94 |
86 | 95 |
87 void MacroAssembler::Load(Register destination, ExternalReference source) { | 96 void MacroAssembler::Load(Register destination, ExternalReference source) { |
88 if (root_array_available_ && !Serializer::enabled()) { | 97 if (root_array_available_ && !Serializer::enabled()) { |
89 intptr_t delta = RootRegisterDelta(source); | 98 int64_t delta = RootRegisterDelta(source); |
90 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) { | 99 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) { |
91 Serializer::TooLateToEnableNow(); | 100 Serializer::TooLateToEnableNow(); |
92 movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta))); | 101 movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta))); |
93 return; | 102 return; |
94 } | 103 } |
95 } | 104 } |
96 // Safe code. | 105 // Safe code. |
97 if (destination.is(rax)) { | 106 if (destination.is(rax)) { |
98 load_rax(source); | 107 load_rax(source); |
99 } else { | 108 } else { |
100 movq(kScratchRegister, source); | 109 movq(kScratchRegister, source); |
101 movq(destination, Operand(kScratchRegister, 0)); | 110 movq(destination, Operand(kScratchRegister, 0)); |
102 } | 111 } |
103 } | 112 } |
104 | 113 |
105 | 114 |
106 void MacroAssembler::Store(ExternalReference destination, Register source) { | 115 void MacroAssembler::Store(ExternalReference destination, Register source) { |
107 if (root_array_available_ && !Serializer::enabled()) { | 116 if (root_array_available_ && !Serializer::enabled()) { |
108 intptr_t delta = RootRegisterDelta(destination); | 117 int64_t delta = RootRegisterDelta(destination); |
109 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) { | 118 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) { |
110 Serializer::TooLateToEnableNow(); | 119 Serializer::TooLateToEnableNow(); |
111 movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source); | 120 movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source); |
112 return; | 121 return; |
113 } | 122 } |
114 } | 123 } |
115 // Safe code. | 124 // Safe code. |
116 if (source.is(rax)) { | 125 if (source.is(rax)) { |
117 store_rax(destination); | 126 store_rax(destination); |
118 } else { | 127 } else { |
119 movq(kScratchRegister, destination); | 128 movq(kScratchRegister, destination); |
120 movq(Operand(kScratchRegister, 0), source); | 129 movq(Operand(kScratchRegister, 0), source); |
121 } | 130 } |
122 } | 131 } |
123 | 132 |
124 | 133 |
125 void MacroAssembler::LoadAddress(Register destination, | 134 void MacroAssembler::LoadAddress(Register destination, |
126 ExternalReference source) { | 135 ExternalReference source) { |
127 if (root_array_available_ && !Serializer::enabled()) { | 136 if (root_array_available_ && !Serializer::enabled()) { |
128 intptr_t delta = RootRegisterDelta(source); | 137 int64_t delta = RootRegisterDelta(source); |
129 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) { | 138 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) { |
130 Serializer::TooLateToEnableNow(); | 139 Serializer::TooLateToEnableNow(); |
131 lea(destination, Operand(kRootRegister, static_cast<int32_t>(delta))); | 140 lea(destination, Operand(kRootRegister, static_cast<int32_t>(delta))); |
132 return; | 141 return; |
133 } | 142 } |
134 } | 143 } |
135 // Safe code. | 144 // Safe code. |
136 movq(destination, source); | 145 movq(destination, source); |
137 } | 146 } |
138 | 147 |
139 | 148 |
140 int MacroAssembler::LoadAddressSize(ExternalReference source) { | 149 int MacroAssembler::LoadAddressSize(ExternalReference source) { |
141 if (root_array_available_ && !Serializer::enabled()) { | 150 if (root_array_available_ && !Serializer::enabled()) { |
142 // This calculation depends on the internals of LoadAddress. | 151 // This calculation depends on the internals of LoadAddress. |
143 // It's correctness is ensured by the asserts in the Call | 152 // It's correctness is ensured by the asserts in the Call |
144 // instruction below. | 153 // instruction below. |
145 intptr_t delta = RootRegisterDelta(source); | 154 int64_t delta = RootRegisterDelta(source); |
146 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) { | 155 if (delta != kInvalidRootRegisterDelta && is_int32(delta)) { |
147 Serializer::TooLateToEnableNow(); | 156 Serializer::TooLateToEnableNow(); |
148 // Operand is lea(scratch, Operand(kRootRegister, delta)); | 157 // Operand is lea(scratch, Operand(kRootRegister, delta)); |
149 // Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7. | 158 // Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7. |
150 int size = 4; | 159 int size = 4; |
151 if (!is_int8(static_cast<int32_t>(delta))) { | 160 if (!is_int8(static_cast<int32_t>(delta))) { |
152 size += 3; // Need full four-byte displacement in lea. | 161 size += 3; // Need full four-byte displacement in lea. |
153 } | 162 } |
154 return size; | 163 return size; |
155 } | 164 } |
156 } | 165 } |
| 166 #ifndef V8_TARGET_ARCH_X32 |
157 // Size of movq(destination, src); | 167 // Size of movq(destination, src); |
158 return 10; | 168 return 10; |
| 169 #else |
| 170 // Size of movl(destination, src); |
| 171 return 6; |
| 172 #endif |
159 } | 173 } |
160 | 174 |
161 | 175 |
162 void MacroAssembler::PushAddress(ExternalReference source) { | 176 void MacroAssembler::PushAddress(ExternalReference source) { |
163 int64_t address = reinterpret_cast<int64_t>(source.address()); | 177 int64_t address = reinterpret_cast<int64_t>(source.address()); |
164 if (is_int32(address) && !Serializer::enabled()) { | 178 if (is_int32(address) && !Serializer::enabled()) { |
165 if (emit_debug_code()) { | 179 if (emit_debug_code()) { |
166 movq(kScratchRegister, BitCast<int64_t>(kZapValue), RelocInfo::NONE64); | 180 movq(kScratchRegister, BitCast<int64_t>(kZapValue), RelocInfo::NONE64); |
167 } | 181 } |
168 push(Immediate(static_cast<int32_t>(address))); | 182 push(Immediate(static_cast<int32_t>(address))); |
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
278 movq(kScratchRegister, ExternalReference::new_space_mask(isolate())); | 292 movq(kScratchRegister, ExternalReference::new_space_mask(isolate())); |
279 and_(scratch, kScratchRegister); | 293 and_(scratch, kScratchRegister); |
280 } else { | 294 } else { |
281 movq(scratch, ExternalReference::new_space_mask(isolate())); | 295 movq(scratch, ExternalReference::new_space_mask(isolate())); |
282 and_(scratch, object); | 296 and_(scratch, object); |
283 } | 297 } |
284 movq(kScratchRegister, ExternalReference::new_space_start(isolate())); | 298 movq(kScratchRegister, ExternalReference::new_space_start(isolate())); |
285 cmpq(scratch, kScratchRegister); | 299 cmpq(scratch, kScratchRegister); |
286 j(cc, branch, distance); | 300 j(cc, branch, distance); |
287 } else { | 301 } else { |
| 302 #ifndef V8_TARGET_ARCH_X32 |
288 ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask()))); | 303 ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask()))); |
| 304 #endif |
289 intptr_t new_space_start = | 305 intptr_t new_space_start = |
290 reinterpret_cast<intptr_t>(HEAP->NewSpaceStart()); | 306 reinterpret_cast<intptr_t>(HEAP->NewSpaceStart()); |
291 movq(kScratchRegister, -new_space_start, RelocInfo::NONE64); | 307 __n movq(kScratchRegister, -new_space_start, RelocInfo::NONE64); |
292 if (scratch.is(object)) { | 308 if (scratch.is(object)) { |
293 addq(scratch, kScratchRegister); | 309 addq(scratch, kScratchRegister); |
294 } else { | 310 } else { |
295 lea(scratch, Operand(object, kScratchRegister, times_1, 0)); | 311 lea(scratch, Operand(object, kScratchRegister, times_1, 0)); |
296 } | 312 } |
297 and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask()))); | 313 and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask()))); |
298 j(cc, branch, distance); | 314 j(cc, branch, distance); |
299 } | 315 } |
300 } | 316 } |
301 | 317 |
(...skipping 216 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
518 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag; | 534 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag; |
519 // Note: p0 might not be a valid Smi _value_, but it has a valid Smi tag. | 535 // Note: p0 might not be a valid Smi _value_, but it has a valid Smi tag. |
520 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi()); | 536 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi()); |
521 #ifdef DEBUG | 537 #ifdef DEBUG |
522 if (msg != NULL) { | 538 if (msg != NULL) { |
523 RecordComment("Abort message: "); | 539 RecordComment("Abort message: "); |
524 RecordComment(msg); | 540 RecordComment(msg); |
525 } | 541 } |
526 #endif | 542 #endif |
527 push(rax); | 543 push(rax); |
528 movq(kScratchRegister, p0, RelocInfo::NONE64); | 544 __n movq(kScratchRegister, p0, RelocInfo::NONE64); |
529 push(kScratchRegister); | 545 push(kScratchRegister); |
| 546 #ifndef V8_TARGET_ARCH_X32 |
530 movq(kScratchRegister, | 547 movq(kScratchRegister, |
531 reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))), | 548 reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))), |
532 RelocInfo::NONE64); | 549 RelocInfo::NONE64); |
| 550 #else |
| 551 movl(kScratchRegister, |
| 552 reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))), |
| 553 RelocInfo::NONE32); |
| 554 #endif |
533 push(kScratchRegister); | 555 push(kScratchRegister); |
534 | 556 |
535 if (!has_frame_) { | 557 if (!has_frame_) { |
536 // We don't actually want to generate a pile of code for this, so just | 558 // We don't actually want to generate a pile of code for this, so just |
537 // claim there is a stack frame, without generating one. | 559 // claim there is a stack frame, without generating one. |
538 FrameScope scope(this, StackFrame::NONE); | 560 FrameScope scope(this, StackFrame::NONE); |
539 CallRuntime(Runtime::kAbort, 2); | 561 CallRuntime(Runtime::kAbort, 2); |
540 } else { | 562 } else { |
541 CallRuntime(Runtime::kAbort, 2); | 563 CallRuntime(Runtime::kAbort, 2); |
542 } | 564 } |
(...skipping 207 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
750 cmpb(Operand(rax, 0), Immediate(0)); | 772 cmpb(Operand(rax, 0), Immediate(0)); |
751 j(zero, &profiler_disabled); | 773 j(zero, &profiler_disabled); |
752 | 774 |
753 // Third parameter is the address of the actual getter function. | 775 // Third parameter is the address of the actual getter function. |
754 movq(thunk_last_arg, function_address, RelocInfo::EXTERNAL_REFERENCE); | 776 movq(thunk_last_arg, function_address, RelocInfo::EXTERNAL_REFERENCE); |
755 movq(rax, thunk_address, RelocInfo::EXTERNAL_REFERENCE); | 777 movq(rax, thunk_address, RelocInfo::EXTERNAL_REFERENCE); |
756 jmp(&end_profiler_check); | 778 jmp(&end_profiler_check); |
757 | 779 |
758 bind(&profiler_disabled); | 780 bind(&profiler_disabled); |
759 // Call the api function! | 781 // Call the api function! |
| 782 #ifndef V8_TARGET_ARCH_X32 |
760 movq(rax, reinterpret_cast<int64_t>(function_address), | 783 movq(rax, reinterpret_cast<int64_t>(function_address), |
761 RelocInfo::EXTERNAL_REFERENCE); | 784 RelocInfo::EXTERNAL_REFERENCE); |
| 785 #else |
| 786 movl(rax, reinterpret_cast<uint32_t>(function_address), |
| 787 RelocInfo::EXTERNAL_REFERENCE); |
| 788 #endif |
762 | 789 |
763 bind(&end_profiler_check); | 790 bind(&end_profiler_check); |
764 | 791 |
765 // Call the api function! | 792 // Call the api function! |
766 call(rax); | 793 call(rax); |
767 | 794 |
768 if (FLAG_log_timer_events) { | 795 if (FLAG_log_timer_events) { |
769 FrameScope frame(this, StackFrame::MANUAL); | 796 FrameScope frame(this, StackFrame::MANUAL); |
770 PushSafepointRegisters(); | 797 PushSafepointRegisters(); |
771 PrepareCallCFunction(1); | 798 PrepareCallCFunction(1); |
(...skipping 13 matching lines...) Expand all Loading... |
785 #endif | 812 #endif |
786 // Check if the result handle holds 0. | 813 // Check if the result handle holds 0. |
787 testq(rax, rax); | 814 testq(rax, rax); |
788 j(zero, &empty_result); | 815 j(zero, &empty_result); |
789 // It was non-zero. Dereference to get the result value. | 816 // It was non-zero. Dereference to get the result value. |
790 movq(rax, Operand(rax, 0)); | 817 movq(rax, Operand(rax, 0)); |
791 jmp(&prologue); | 818 jmp(&prologue); |
792 bind(&empty_result); | 819 bind(&empty_result); |
793 } | 820 } |
794 // Load the value from ReturnValue | 821 // Load the value from ReturnValue |
| 822 #ifndef V8_TARGET_ARCH_X32 |
795 movq(rax, Operand(rbp, return_value_offset * kPointerSize)); | 823 movq(rax, Operand(rbp, return_value_offset * kPointerSize)); |
| 824 #else |
| 825 movl(rax, |
| 826 Operand(rbp, 2 * kHWRegSize + (return_value_offset - 2) * kPointerSize)); |
| 827 #endif |
796 bind(&prologue); | 828 bind(&prologue); |
797 | 829 |
798 // No more valid handles (the result handle was the last one). Restore | 830 // No more valid handles (the result handle was the last one). Restore |
799 // previous handle scope. | 831 // previous handle scope. |
800 subl(Operand(base_reg, kLevelOffset), Immediate(1)); | 832 subl(Operand(base_reg, kLevelOffset), Immediate(1)); |
801 movq(Operand(base_reg, kNextOffset), prev_next_address_reg); | 833 movq(Operand(base_reg, kNextOffset), prev_next_address_reg); |
802 cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset)); | 834 cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset)); |
803 j(not_equal, &delete_allocated_handles); | 835 j(not_equal, &delete_allocated_handles); |
804 bind(&leave_exit_frame); | 836 bind(&leave_exit_frame); |
805 | 837 |
(...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
919 void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, | 951 void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, |
920 Register exclusion1, | 952 Register exclusion1, |
921 Register exclusion2, | 953 Register exclusion2, |
922 Register exclusion3) { | 954 Register exclusion3) { |
923 // We don't allow a GC during a store buffer overflow so there is no need to | 955 // We don't allow a GC during a store buffer overflow so there is no need to |
924 // store the registers in any particular way, but we do have to store and | 956 // store the registers in any particular way, but we do have to store and |
925 // restore them. | 957 // restore them. |
926 for (int i = 0; i < kNumberOfSavedRegs; i++) { | 958 for (int i = 0; i < kNumberOfSavedRegs; i++) { |
927 Register reg = saved_regs[i]; | 959 Register reg = saved_regs[i]; |
928 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) { | 960 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) { |
929 push(reg); | 961 __k push(reg); |
930 } | 962 } |
931 } | 963 } |
932 // R12 to r15 are callee save on all platforms. | 964 // R12 to r15 are callee save on all platforms. |
933 if (fp_mode == kSaveFPRegs) { | 965 if (fp_mode == kSaveFPRegs) { |
934 subq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters)); | 966 subq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters)); |
935 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) { | 967 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) { |
936 XMMRegister reg = XMMRegister::from_code(i); | 968 XMMRegister reg = XMMRegister::from_code(i); |
937 movsd(Operand(rsp, i * kDoubleSize), reg); | 969 movsd(Operand(rsp, i * kDoubleSize), reg); |
938 } | 970 } |
939 } | 971 } |
940 } | 972 } |
941 | 973 |
942 | 974 |
943 void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, | 975 void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, |
944 Register exclusion1, | 976 Register exclusion1, |
945 Register exclusion2, | 977 Register exclusion2, |
946 Register exclusion3) { | 978 Register exclusion3) { |
947 if (fp_mode == kSaveFPRegs) { | 979 if (fp_mode == kSaveFPRegs) { |
948 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) { | 980 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) { |
949 XMMRegister reg = XMMRegister::from_code(i); | 981 XMMRegister reg = XMMRegister::from_code(i); |
950 movsd(reg, Operand(rsp, i * kDoubleSize)); | 982 movsd(reg, Operand(rsp, i * kDoubleSize)); |
951 } | 983 } |
952 addq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters)); | 984 addq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters)); |
953 } | 985 } |
954 for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) { | 986 for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) { |
955 Register reg = saved_regs[i]; | 987 Register reg = saved_regs[i]; |
956 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) { | 988 if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) { |
957 pop(reg); | 989 __k pop(reg); |
958 } | 990 } |
959 } | 991 } |
960 } | 992 } |
961 | 993 |
962 | 994 |
963 void MacroAssembler::Set(Register dst, int64_t x) { | 995 void MacroAssembler::Set(Register dst, int64_t x) { |
964 if (x == 0) { | 996 if (x == 0) { |
965 xorl(dst, dst); | 997 xorl(dst, dst); |
966 } else if (is_uint32(x)) { | 998 } else if (is_uint32(x)) { |
967 movl(dst, Immediate(static_cast<uint32_t>(x))); | 999 movl(dst, Immediate(static_cast<uint32_t>(x))); |
968 } else if (is_int32(x)) { | 1000 } else if (is_int32(x)) { |
969 movq(dst, Immediate(static_cast<int32_t>(x))); | 1001 __k movq(dst, Immediate(static_cast<int32_t>(x))); |
970 } else { | 1002 } else { |
971 movq(dst, x, RelocInfo::NONE64); | 1003 __k movq(dst, x, RelocInfo::NONE64); |
972 } | 1004 } |
973 } | 1005 } |
974 | 1006 |
| 1007 |
| 1008 #ifndef V8_TARGET_ARCH_X32 |
975 void MacroAssembler::Set(const Operand& dst, int64_t x) { | 1009 void MacroAssembler::Set(const Operand& dst, int64_t x) { |
976 if (is_int32(x)) { | 1010 if (is_int32(x)) { |
977 movq(dst, Immediate(static_cast<int32_t>(x))); | 1011 movq(dst, Immediate(static_cast<int32_t>(x))); |
978 } else { | 1012 } else { |
979 Set(kScratchRegister, x); | 1013 Set(kScratchRegister, x); |
980 movq(dst, kScratchRegister); | 1014 movq(dst, kScratchRegister); |
981 } | 1015 } |
982 } | 1016 } |
| 1017 #else |
| 1018 void MacroAssembler::Set(const Operand& dst, int32_t x) { |
| 1019 movl(dst, Immediate(x)); |
| 1020 } |
| 1021 #endif |
983 | 1022 |
984 | 1023 |
985 bool MacroAssembler::IsUnsafeInt(const int x) { | 1024 bool MacroAssembler::IsUnsafeInt(const int x) { |
986 static const int kMaxBits = 17; | 1025 static const int kMaxBits = 17; |
987 return !is_intn(x, kMaxBits); | 1026 return !is_intn(x, kMaxBits); |
988 } | 1027 } |
989 | 1028 |
990 | 1029 |
| 1030 #ifndef V8_TARGET_ARCH_X32 |
991 void MacroAssembler::SafeMove(Register dst, Smi* src) { | 1031 void MacroAssembler::SafeMove(Register dst, Smi* src) { |
992 ASSERT(!dst.is(kScratchRegister)); | 1032 ASSERT(!dst.is(kScratchRegister)); |
993 ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi. | 1033 ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi. |
994 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { | 1034 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { |
995 Move(dst, Smi::FromInt(src->value() ^ jit_cookie())); | 1035 Move(dst, Smi::FromInt(src->value() ^ jit_cookie())); |
996 Move(kScratchRegister, Smi::FromInt(jit_cookie())); | 1036 Move(kScratchRegister, Smi::FromInt(jit_cookie())); |
997 xor_(dst, kScratchRegister); | 1037 xor_(dst, kScratchRegister); |
998 } else { | 1038 } else { |
999 Move(dst, src); | 1039 Move(dst, src); |
1000 } | 1040 } |
1001 } | 1041 } |
| 1042 #else |
| 1043 void MacroAssembler::SafeMove(Register dst, Smi* src) { |
| 1044 ASSERT(!dst.is(kScratchRegister)); |
| 1045 ASSERT(kSmiValueSize == 31); // JIT cookie can be converted to Smi. |
| 1046 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { |
| 1047 movl(dst, Immediate(reinterpret_cast<int32_t>(src) ^ jit_cookie())); |
| 1048 xorl(dst, Immediate(jit_cookie())); |
| 1049 } else { |
| 1050 Move(dst, src); |
| 1051 } |
| 1052 } |
| 1053 #endif |
1002 | 1054 |
1003 | 1055 |
| 1056 #ifndef V8_TARGET_ARCH_X32 |
1004 void MacroAssembler::SafePush(Smi* src) { | 1057 void MacroAssembler::SafePush(Smi* src) { |
1005 ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi. | 1058 ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi. |
1006 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { | 1059 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { |
1007 Push(Smi::FromInt(src->value() ^ jit_cookie())); | 1060 Push(Smi::FromInt(src->value() ^ jit_cookie())); |
1008 Move(kScratchRegister, Smi::FromInt(jit_cookie())); | 1061 Move(kScratchRegister, Smi::FromInt(jit_cookie())); |
1009 xor_(Operand(rsp, 0), kScratchRegister); | 1062 xor_(Operand(rsp, 0), kScratchRegister); |
1010 } else { | 1063 } else { |
1011 Push(src); | 1064 Push(src); |
1012 } | 1065 } |
1013 } | 1066 } |
| 1067 #else |
| 1068 void MacroAssembler::SafePush(Smi* src) { |
| 1069 ASSERT(kSmiValueSize == 31); // JIT cookie can be converted to Smi. |
| 1070 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { |
| 1071 Push(Immediate(reinterpret_cast<int32_t>(src) ^ jit_cookie())); |
| 1072 xorl(Operand(rsp, 0), Immediate(jit_cookie())); |
| 1073 } else { |
| 1074 Push(src); |
| 1075 } |
| 1076 } |
| 1077 #endif |
1014 | 1078 |
1015 | 1079 |
1016 // ---------------------------------------------------------------------------- | 1080 // ---------------------------------------------------------------------------- |
1017 // Smi tagging, untagging and tag detection. | 1081 // Smi tagging, untagging and tag detection. |
1018 | 1082 |
1019 Register MacroAssembler::GetSmiConstant(Smi* source) { | 1083 Register MacroAssembler::GetSmiConstant(Smi* source) { |
1020 int value = source->value(); | 1084 int value = source->value(); |
1021 if (value == 0) { | 1085 if (value == 0) { |
1022 xorl(kScratchRegister, kScratchRegister); | 1086 xorl(kScratchRegister, kScratchRegister); |
1023 return kScratchRegister; | 1087 return kScratchRegister; |
1024 } | 1088 } |
1025 if (value == 1) { | 1089 if (value == 1) { |
1026 return kSmiConstantRegister; | 1090 return kSmiConstantRegister; |
1027 } | 1091 } |
1028 LoadSmiConstant(kScratchRegister, source); | 1092 LoadSmiConstant(kScratchRegister, source); |
1029 return kScratchRegister; | 1093 return kScratchRegister; |
1030 } | 1094 } |
1031 | 1095 |
1032 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) { | 1096 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) { |
| 1097 #ifndef V8_TARGET_ARCH_X32 |
1033 if (emit_debug_code()) { | 1098 if (emit_debug_code()) { |
1034 movq(dst, | 1099 movq(dst, |
1035 reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)), | 1100 reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)), |
1036 RelocInfo::NONE64); | 1101 RelocInfo::NONE64); |
1037 cmpq(dst, kSmiConstantRegister); | 1102 cmpq(dst, kSmiConstantRegister); |
1038 if (allow_stub_calls()) { | 1103 if (allow_stub_calls()) { |
1039 Assert(equal, "Uninitialized kSmiConstantRegister"); | 1104 Assert(equal, "Uninitialized kSmiConstantRegister"); |
1040 } else { | 1105 } else { |
1041 Label ok; | 1106 Label ok; |
1042 j(equal, &ok, Label::kNear); | 1107 j(equal, &ok, Label::kNear); |
1043 int3(); | 1108 int3(); |
1044 bind(&ok); | 1109 bind(&ok); |
1045 } | 1110 } |
1046 } | 1111 } |
| 1112 #else |
| 1113 // Disable check for Uninitialized kSmiConstantRegister for X32. |
| 1114 #endif |
1047 int value = source->value(); | 1115 int value = source->value(); |
1048 if (value == 0) { | 1116 if (value == 0) { |
1049 xorl(dst, dst); | 1117 xorl(dst, dst); |
1050 return; | 1118 return; |
1051 } | 1119 } |
1052 bool negative = value < 0; | 1120 bool negative = value < 0; |
1053 unsigned int uvalue = negative ? -value : value; | 1121 unsigned int uvalue = negative ? -value : value; |
1054 | 1122 |
1055 switch (uvalue) { | 1123 switch (uvalue) { |
1056 case 9: | 1124 case 9: |
1057 lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0)); | 1125 lea(dst, |
| 1126 Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0)); |
1058 break; | 1127 break; |
1059 case 8: | 1128 case 8: |
1060 xorl(dst, dst); | 1129 xorl(dst, dst); |
1061 lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0)); | 1130 lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0)); |
1062 break; | 1131 break; |
1063 case 4: | 1132 case 4: |
1064 xorl(dst, dst); | 1133 xorl(dst, dst); |
1065 lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0)); | 1134 lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0)); |
1066 break; | 1135 break; |
1067 case 5: | 1136 case 5: |
1068 lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0)); | 1137 lea(dst, |
| 1138 Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0)); |
1069 break; | 1139 break; |
1070 case 3: | 1140 case 3: |
1071 lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0)); | 1141 lea(dst, |
| 1142 Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0)); |
1072 break; | 1143 break; |
1073 case 2: | 1144 case 2: |
1074 lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0)); | 1145 lea(dst, |
| 1146 Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0)); |
1075 break; | 1147 break; |
1076 case 1: | 1148 case 1: |
1077 movq(dst, kSmiConstantRegister); | 1149 movq(dst, kSmiConstantRegister); |
1078 break; | 1150 break; |
1079 case 0: | 1151 case 0: |
1080 UNREACHABLE(); | 1152 UNREACHABLE(); |
1081 return; | 1153 return; |
1082 default: | 1154 default: |
1083 movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE64); | 1155 __k movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE64); |
1084 return; | 1156 return; |
1085 } | 1157 } |
1086 if (negative) { | 1158 if (negative) { |
1087 neg(dst); | 1159 neg(dst); |
1088 } | 1160 } |
1089 } | 1161 } |
1090 | 1162 |
1091 | 1163 |
1092 void MacroAssembler::Integer32ToSmi(Register dst, Register src) { | 1164 void MacroAssembler::Integer32ToSmi(Register dst, Register src) { |
1093 STATIC_ASSERT(kSmiTag == 0); | 1165 STATIC_ASSERT(kSmiTag == 0); |
1094 if (!dst.is(src)) { | 1166 if (!dst.is(src)) { |
1095 movl(dst, src); | 1167 movl(dst, src); |
1096 } | 1168 } |
1097 shl(dst, Immediate(kSmiShift)); | 1169 shl(dst, Immediate(kSmiShift)); |
1098 } | 1170 } |
1099 | 1171 |
1100 | 1172 |
1101 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) { | 1173 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) { |
1102 if (emit_debug_code()) { | 1174 if (emit_debug_code()) { |
1103 testb(dst, Immediate(0x01)); | 1175 testb(dst, Immediate(0x01)); |
1104 Label ok; | 1176 Label ok; |
1105 j(zero, &ok, Label::kNear); | 1177 j(zero, &ok, Label::kNear); |
1106 if (allow_stub_calls()) { | 1178 if (allow_stub_calls()) { |
1107 Abort("Integer32ToSmiField writing to non-smi location"); | 1179 Abort("Integer32ToSmiField writing to non-smi location"); |
1108 } else { | 1180 } else { |
1109 int3(); | 1181 int3(); |
1110 } | 1182 } |
1111 bind(&ok); | 1183 bind(&ok); |
1112 } | 1184 } |
| 1185 #ifndef V8_TARGET_ARCH_X32 |
1113 ASSERT(kSmiShift % kBitsPerByte == 0); | 1186 ASSERT(kSmiShift % kBitsPerByte == 0); |
1114 movl(Operand(dst, kSmiShift / kBitsPerByte), src); | 1187 movl(Operand(dst, kSmiShift / kBitsPerByte), src); |
| 1188 #else |
| 1189 Integer32ToSmi(kScratchRegister, src); |
| 1190 movl(dst, kScratchRegister); |
| 1191 #endif |
1115 } | 1192 } |
1116 | 1193 |
1117 | 1194 |
1118 void MacroAssembler::Integer64PlusConstantToSmi(Register dst, | 1195 void MacroAssembler::Integer64PlusConstantToSmi(Register dst, |
1119 Register src, | 1196 Register src, |
1120 int constant) { | 1197 int constant) { |
1121 if (dst.is(src)) { | 1198 if (dst.is(src)) { |
1122 addl(dst, Immediate(constant)); | 1199 addl(dst, Immediate(constant)); |
1123 } else { | 1200 } else { |
1124 leal(dst, Operand(src, constant)); | 1201 leal(dst, Operand(src, constant)); |
1125 } | 1202 } |
1126 shl(dst, Immediate(kSmiShift)); | 1203 shl(dst, Immediate(kSmiShift)); |
1127 } | 1204 } |
1128 | 1205 |
1129 | 1206 |
1130 void MacroAssembler::SmiToInteger32(Register dst, Register src) { | 1207 void MacroAssembler::SmiToInteger32(Register dst, Register src) { |
1131 STATIC_ASSERT(kSmiTag == 0); | 1208 STATIC_ASSERT(kSmiTag == 0); |
1132 if (!dst.is(src)) { | 1209 if (!dst.is(src)) { |
1133 movq(dst, src); | 1210 movq(dst, src); |
1134 } | 1211 } |
| 1212 #ifndef V8_TARGET_ARCH_X32 |
1135 shr(dst, Immediate(kSmiShift)); | 1213 shr(dst, Immediate(kSmiShift)); |
| 1214 #else |
| 1215 sarl(dst, Immediate(kSmiShift)); |
| 1216 #endif |
1136 } | 1217 } |
1137 | 1218 |
1138 | 1219 |
1139 void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) { | 1220 void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) { |
| 1221 #ifndef V8_TARGET_ARCH_X32 |
1140 movl(dst, Operand(src, kSmiShift / kBitsPerByte)); | 1222 movl(dst, Operand(src, kSmiShift / kBitsPerByte)); |
| 1223 #else |
| 1224 movl(dst, src); |
| 1225 sarl(dst, Immediate(kSmiShift)); |
| 1226 #endif |
1141 } | 1227 } |
1142 | 1228 |
1143 | 1229 |
1144 void MacroAssembler::SmiToInteger64(Register dst, Register src) { | 1230 void MacroAssembler::SmiToInteger64(Register dst, Register src) { |
1145 STATIC_ASSERT(kSmiTag == 0); | 1231 STATIC_ASSERT(kSmiTag == 0); |
1146 if (!dst.is(src)) { | 1232 if (!dst.is(src)) { |
1147 movq(dst, src); | 1233 movq(dst, src); |
1148 } | 1234 } |
| 1235 #ifndef V8_TARGET_ARCH_X32 |
1149 sar(dst, Immediate(kSmiShift)); | 1236 sar(dst, Immediate(kSmiShift)); |
| 1237 #else |
| 1238 shl(dst, Immediate(32)); |
| 1239 sar(dst, Immediate(32 + kSmiShift)); |
| 1240 #endif |
1150 } | 1241 } |
1151 | 1242 |
1152 | 1243 |
1153 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) { | 1244 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) { |
| 1245 #ifndef V8_TARGET_ARCH_X32 |
1154 movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte)); | 1246 movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte)); |
| 1247 #else |
| 1248 movl(dst, src); |
| 1249 SmiToInteger64(dst, dst); |
| 1250 #endif |
1155 } | 1251 } |
1156 | 1252 |
1157 | 1253 |
1158 void MacroAssembler::SmiTest(Register src) { | 1254 void MacroAssembler::SmiTest(Register src) { |
1159 testq(src, src); | 1255 testq(src, src); |
1160 } | 1256 } |
1161 | 1257 |
1162 | 1258 |
1163 void MacroAssembler::SmiCompare(Register smi1, Register smi2) { | 1259 void MacroAssembler::SmiCompare(Register smi1, Register smi2) { |
1164 AssertSmi(smi1); | 1260 AssertSmi(smi1); |
(...skipping 28 matching lines...) Expand all Loading... |
1193 | 1289 |
1194 void MacroAssembler::SmiCompare(const Operand& dst, Register src) { | 1290 void MacroAssembler::SmiCompare(const Operand& dst, Register src) { |
1195 AssertSmi(dst); | 1291 AssertSmi(dst); |
1196 AssertSmi(src); | 1292 AssertSmi(src); |
1197 cmpq(dst, src); | 1293 cmpq(dst, src); |
1198 } | 1294 } |
1199 | 1295 |
1200 | 1296 |
1201 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) { | 1297 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) { |
1202 AssertSmi(dst); | 1298 AssertSmi(dst); |
| 1299 #ifndef V8_TARGET_ARCH_X32 |
1203 cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value())); | 1300 cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value())); |
| 1301 #else |
| 1302 cmpl(dst, Immediate(src)); |
| 1303 #endif |
1204 } | 1304 } |
1205 | 1305 |
1206 | 1306 |
1207 void MacroAssembler::Cmp(const Operand& dst, Smi* src) { | 1307 void MacroAssembler::Cmp(const Operand& dst, Smi* src) { |
1208 // The Operand cannot use the smi register. | 1308 // The Operand cannot use the smi register. |
1209 Register smi_reg = GetSmiConstant(src); | 1309 Register smi_reg = GetSmiConstant(src); |
1210 ASSERT(!dst.AddressUsesRegister(smi_reg)); | 1310 ASSERT(!dst.AddressUsesRegister(smi_reg)); |
1211 cmpq(dst, smi_reg); | 1311 cmpq(dst, smi_reg); |
1212 } | 1312 } |
1213 | 1313 |
1214 | 1314 |
1215 void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) { | 1315 void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) { |
| 1316 #ifndef V8_TARGET_ARCH_X32 |
1216 cmpl(Operand(dst, kSmiShift / kBitsPerByte), src); | 1317 cmpl(Operand(dst, kSmiShift / kBitsPerByte), src); |
| 1318 #else |
| 1319 SmiToInteger32(kScratchRegister, dst); |
| 1320 cmpl(kScratchRegister, src); |
| 1321 #endif |
1217 } | 1322 } |
1218 | 1323 |
1219 | 1324 |
1220 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst, | 1325 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst, |
1221 Register src, | 1326 Register src, |
1222 int power) { | 1327 int power) { |
1223 ASSERT(power >= 0); | 1328 ASSERT(power >= 0); |
1224 ASSERT(power < 64); | 1329 ASSERT(power < 64); |
1225 if (power == 0) { | 1330 if (power == 0) { |
1226 SmiToInteger64(dst, src); | 1331 SmiToInteger64(dst, src); |
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1289 testb(kScratchRegister, Immediate(3)); | 1394 testb(kScratchRegister, Immediate(3)); |
1290 return zero; | 1395 return zero; |
1291 } | 1396 } |
1292 | 1397 |
1293 | 1398 |
1294 Condition MacroAssembler::CheckBothSmi(Register first, Register second) { | 1399 Condition MacroAssembler::CheckBothSmi(Register first, Register second) { |
1295 if (first.is(second)) { | 1400 if (first.is(second)) { |
1296 return CheckSmi(first); | 1401 return CheckSmi(first); |
1297 } | 1402 } |
1298 STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3); | 1403 STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3); |
| 1404 #ifndef V8_TARGET_ARCH_X32 |
1299 leal(kScratchRegister, Operand(first, second, times_1, 0)); | 1405 leal(kScratchRegister, Operand(first, second, times_1, 0)); |
1300 testb(kScratchRegister, Immediate(0x03)); | 1406 testb(kScratchRegister, Immediate(0x03)); |
| 1407 #else |
| 1408 movl(kScratchRegister, first); |
| 1409 orl(kScratchRegister, second); |
| 1410 testb(kScratchRegister, Immediate(kSmiTagMask)); |
| 1411 #endif |
1301 return zero; | 1412 return zero; |
1302 } | 1413 } |
1303 | 1414 |
1304 | 1415 |
1305 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first, | 1416 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first, |
1306 Register second) { | 1417 Register second) { |
1307 if (first.is(second)) { | 1418 if (first.is(second)) { |
1308 return CheckNonNegativeSmi(first); | 1419 return CheckNonNegativeSmi(first); |
1309 } | 1420 } |
1310 movq(kScratchRegister, first); | 1421 movq(kScratchRegister, first); |
(...skipping 24 matching lines...) Expand all Loading... |
1335 | 1446 |
1336 | 1447 |
1337 Condition MacroAssembler::CheckIsMinSmi(Register src) { | 1448 Condition MacroAssembler::CheckIsMinSmi(Register src) { |
1338 ASSERT(!src.is(kScratchRegister)); | 1449 ASSERT(!src.is(kScratchRegister)); |
1339 // If we overflow by subtracting one, it's the minimal smi value. | 1450 // If we overflow by subtracting one, it's the minimal smi value. |
1340 cmpq(src, kSmiConstantRegister); | 1451 cmpq(src, kSmiConstantRegister); |
1341 return overflow; | 1452 return overflow; |
1342 } | 1453 } |
1343 | 1454 |
1344 | 1455 |
| 1456 #ifndef V8_TARGET_ARCH_X32 |
1345 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) { | 1457 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) { |
1346 // A 32-bit integer value can always be converted to a smi. | 1458 // A 32-bit integer value can always be converted to a smi. |
1347 return always; | 1459 return always; |
1348 } | 1460 } |
1349 | 1461 |
1350 | 1462 |
1351 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) { | 1463 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) { |
1352 // An unsigned 32-bit integer value is valid as long as the high bit | 1464 // An unsigned 32-bit integer value is valid as long as the high bit |
1353 // is not set. | 1465 // is not set. |
1354 testl(src, src); | 1466 testl(src, src); |
1355 return positive; | 1467 return positive; |
1356 } | 1468 } |
| 1469 #else |
| 1470 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) { |
| 1471 cmpl(src, Immediate(0xc0000000)); |
| 1472 return positive; |
| 1473 } |
| 1474 |
| 1475 |
| 1476 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) { |
| 1477 testl(src, Immediate(0xc0000000)); |
| 1478 return zero; |
| 1479 } |
| 1480 #endif |
1357 | 1481 |
1358 | 1482 |
1359 void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) { | 1483 void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) { |
1360 if (dst.is(src)) { | 1484 if (dst.is(src)) { |
1361 andl(dst, Immediate(kSmiTagMask)); | 1485 andl(dst, Immediate(kSmiTagMask)); |
1362 } else { | 1486 } else { |
1363 movl(dst, Immediate(kSmiTagMask)); | 1487 movl(dst, Immediate(kSmiTagMask)); |
1364 andl(dst, src); | 1488 andl(dst, src); |
1365 } | 1489 } |
1366 } | 1490 } |
(...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1510 LoadSmiConstant(dst, constant); | 1634 LoadSmiConstant(dst, constant); |
1511 addq(dst, src); | 1635 addq(dst, src); |
1512 return; | 1636 return; |
1513 } | 1637 } |
1514 } | 1638 } |
1515 } | 1639 } |
1516 | 1640 |
1517 | 1641 |
1518 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) { | 1642 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) { |
1519 if (constant->value() != 0) { | 1643 if (constant->value() != 0) { |
| 1644 #ifndef V8_TARGET_ARCH_X32 |
1520 addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value())); | 1645 addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value())); |
| 1646 #else |
| 1647 addl(dst, Immediate(constant)); |
| 1648 #endif |
1521 } | 1649 } |
1522 } | 1650 } |
1523 | 1651 |
1524 | 1652 |
1525 void MacroAssembler::SmiAddConstant(Register dst, | 1653 void MacroAssembler::SmiAddConstant(Register dst, |
1526 Register src, | 1654 Register src, |
1527 Smi* constant, | 1655 Smi* constant, |
1528 Label* on_not_smi_result, | 1656 Label* on_not_smi_result, |
1529 Label::Distance near_jump) { | 1657 Label::Distance near_jump) { |
1530 if (constant->value() == 0) { | 1658 if (constant->value() == 0) { |
(...skipping 304 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1835 } | 1963 } |
1836 SmiToInteger32(rax, src1); | 1964 SmiToInteger32(rax, src1); |
1837 // We need to rule out dividing Smi::kMinValue by -1, since that would | 1965 // We need to rule out dividing Smi::kMinValue by -1, since that would |
1838 // overflow in idiv and raise an exception. | 1966 // overflow in idiv and raise an exception. |
1839 // We combine this with negative zero test (negative zero only happens | 1967 // We combine this with negative zero test (negative zero only happens |
1840 // when dividing zero by a negative number). | 1968 // when dividing zero by a negative number). |
1841 | 1969 |
1842 // We overshoot a little and go to slow case if we divide min-value | 1970 // We overshoot a little and go to slow case if we divide min-value |
1843 // by any negative value, not just -1. | 1971 // by any negative value, not just -1. |
1844 Label safe_div; | 1972 Label safe_div; |
| 1973 #ifndef V8_TARGET_ARCH_X32 |
1845 testl(rax, Immediate(0x7fffffff)); | 1974 testl(rax, Immediate(0x7fffffff)); |
| 1975 #else |
| 1976 testl(rax, Immediate(0x3fffffff)); |
| 1977 #endif |
1846 j(not_zero, &safe_div, Label::kNear); | 1978 j(not_zero, &safe_div, Label::kNear); |
1847 testq(src2, src2); | 1979 testq(src2, src2); |
1848 if (src1.is(rax)) { | 1980 if (src1.is(rax)) { |
1849 j(positive, &safe_div, Label::kNear); | 1981 j(positive, &safe_div, Label::kNear); |
1850 movq(src1, kScratchRegister); | 1982 movq(src1, kScratchRegister); |
1851 jmp(on_not_smi_result, near_jump); | 1983 jmp(on_not_smi_result, near_jump); |
1852 } else { | 1984 } else { |
1853 j(negative, on_not_smi_result, near_jump); | 1985 j(negative, on_not_smi_result, near_jump); |
1854 } | 1986 } |
1855 bind(&safe_div); | 1987 bind(&safe_div); |
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1930 j(negative, on_not_smi_result, near_jump); | 2062 j(negative, on_not_smi_result, near_jump); |
1931 bind(&smi_result); | 2063 bind(&smi_result); |
1932 Integer32ToSmi(dst, rdx); | 2064 Integer32ToSmi(dst, rdx); |
1933 } | 2065 } |
1934 | 2066 |
1935 | 2067 |
1936 void MacroAssembler::SmiNot(Register dst, Register src) { | 2068 void MacroAssembler::SmiNot(Register dst, Register src) { |
1937 ASSERT(!dst.is(kScratchRegister)); | 2069 ASSERT(!dst.is(kScratchRegister)); |
1938 ASSERT(!src.is(kScratchRegister)); | 2070 ASSERT(!src.is(kScratchRegister)); |
1939 // Set tag and padding bits before negating, so that they are zero afterwards. | 2071 // Set tag and padding bits before negating, so that they are zero afterwards. |
| 2072 #ifndef V8_TARGET_ARCH_X32 |
1940 movl(kScratchRegister, Immediate(~0)); | 2073 movl(kScratchRegister, Immediate(~0)); |
| 2074 #else |
| 2075 movl(kScratchRegister, Immediate(1)); |
| 2076 #endif |
1941 if (dst.is(src)) { | 2077 if (dst.is(src)) { |
1942 xor_(dst, kScratchRegister); | 2078 xor_(dst, kScratchRegister); |
1943 } else { | 2079 } else { |
1944 lea(dst, Operand(src, kScratchRegister, times_1, 0)); | 2080 lea(dst, Operand(src, kScratchRegister, times_1, 0)); |
1945 } | 2081 } |
1946 not_(dst); | 2082 not_(dst); |
1947 } | 2083 } |
1948 | 2084 |
1949 | 2085 |
1950 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) { | 2086 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) { |
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2045 // Logic right shift interprets its result as an *unsigned* number. | 2181 // Logic right shift interprets its result as an *unsigned* number. |
2046 if (dst.is(src)) { | 2182 if (dst.is(src)) { |
2047 UNIMPLEMENTED(); // Not used. | 2183 UNIMPLEMENTED(); // Not used. |
2048 } else { | 2184 } else { |
2049 movq(dst, src); | 2185 movq(dst, src); |
2050 if (shift_value == 0) { | 2186 if (shift_value == 0) { |
2051 testq(dst, dst); | 2187 testq(dst, dst); |
2052 j(negative, on_not_smi_result, near_jump); | 2188 j(negative, on_not_smi_result, near_jump); |
2053 } | 2189 } |
2054 shr(dst, Immediate(shift_value + kSmiShift)); | 2190 shr(dst, Immediate(shift_value + kSmiShift)); |
| 2191 #ifndef V8_TARGET_ARCH_X32 |
2055 shl(dst, Immediate(kSmiShift)); | 2192 shl(dst, Immediate(kSmiShift)); |
| 2193 #else |
| 2194 testl(dst, Immediate(0xc0000000)); |
| 2195 j(not_zero, on_not_smi_result, near_jump); |
| 2196 shll(dst, Immediate(kSmiShift)); |
| 2197 #endif |
2056 } | 2198 } |
2057 } | 2199 } |
2058 | 2200 |
2059 | 2201 |
| 2202 #ifndef V8_TARGET_ARCH_X32 |
2060 void MacroAssembler::SmiShiftLeft(Register dst, | 2203 void MacroAssembler::SmiShiftLeft(Register dst, |
2061 Register src1, | 2204 Register src1, |
2062 Register src2) { | 2205 Register src2) { |
2063 ASSERT(!dst.is(rcx)); | 2206 ASSERT(!dst.is(rcx)); |
2064 // Untag shift amount. | 2207 // Untag shift amount. |
2065 if (!dst.is(src1)) { | 2208 if (!dst.is(src1)) { |
2066 movq(dst, src1); | 2209 movq(dst, src1); |
2067 } | 2210 } |
2068 SmiToInteger32(rcx, src2); | 2211 SmiToInteger32(rcx, src2); |
2069 // Shift amount specified by lower 5 bits, not six as the shl opcode. | 2212 // Shift amount specified by lower 5 bits, not six as the shl opcode. |
2070 and_(rcx, Immediate(0x1f)); | 2213 and_(rcx, Immediate(0x1f)); |
2071 shl_cl(dst); | 2214 shl_cl(dst); |
2072 } | 2215 } |
| 2216 #else |
| 2217 void MacroAssembler::SmiShiftLeft(Register dst, |
| 2218 Register src1, |
| 2219 Register src2, |
| 2220 Label* on_not_smi_result) { |
| 2221 ASSERT(!dst.is(kScratchRegister)); |
| 2222 ASSERT(!src1.is(kScratchRegister)); |
| 2223 ASSERT(!src2.is(kScratchRegister)); |
| 2224 ASSERT(!dst.is(rcx)); |
| 2225 Label result_ok; |
| 2226 |
| 2227 if (src1.is(rcx) || src2.is(rcx)) { |
| 2228 movl(kScratchRegister, rcx); |
| 2229 } |
| 2230 // Untag shift amount. |
| 2231 if (!dst.is(src1)) { |
| 2232 movl(dst, src1); |
| 2233 } |
| 2234 SmiToInteger32(dst, dst); |
| 2235 SmiToInteger32(rcx, src2); |
| 2236 // Shift amount specified by lower 5 bits, not six as the shl opcode. |
| 2237 andl(rcx, Immediate(0x1f)); |
| 2238 shll_cl(dst); |
| 2239 cmpl(dst, Immediate(0xc0000000)); |
| 2240 j(not_sign, &result_ok); |
| 2241 if (src1.is(rcx) || src2.is(rcx)) { |
| 2242 if (src1.is(rcx)) { |
| 2243 movl(src1, kScratchRegister); |
| 2244 } else { |
| 2245 movl(src2, kScratchRegister); |
| 2246 } |
| 2247 } |
| 2248 jmp(on_not_smi_result); |
| 2249 bind(&result_ok); |
| 2250 Integer32ToSmi(dst, dst); |
| 2251 } |
| 2252 #endif |
2073 | 2253 |
2074 | 2254 |
2075 void MacroAssembler::SmiShiftLogicalRight(Register dst, | 2255 void MacroAssembler::SmiShiftLogicalRight(Register dst, |
2076 Register src1, | 2256 Register src1, |
2077 Register src2, | 2257 Register src2, |
2078 Label* on_not_smi_result, | 2258 Label* on_not_smi_result, |
2079 Label::Distance near_jump) { | 2259 Label::Distance near_jump) { |
| 2260 #ifndef V8_TARGET_ARCH_X32 |
2080 ASSERT(!dst.is(kScratchRegister)); | 2261 ASSERT(!dst.is(kScratchRegister)); |
2081 ASSERT(!src1.is(kScratchRegister)); | 2262 ASSERT(!src1.is(kScratchRegister)); |
2082 ASSERT(!src2.is(kScratchRegister)); | 2263 ASSERT(!src2.is(kScratchRegister)); |
2083 ASSERT(!dst.is(rcx)); | 2264 ASSERT(!dst.is(rcx)); |
2084 // dst and src1 can be the same, because the one case that bails out | 2265 // dst and src1 can be the same, because the one case that bails out |
2085 // is a shift by 0, which leaves dst, and therefore src1, unchanged. | 2266 // is a shift by 0, which leaves dst, and therefore src1, unchanged. |
2086 if (src1.is(rcx) || src2.is(rcx)) { | 2267 if (src1.is(rcx) || src2.is(rcx)) { |
2087 movq(kScratchRegister, rcx); | 2268 movq(kScratchRegister, rcx); |
2088 } | 2269 } |
2089 if (!dst.is(src1)) { | 2270 if (!dst.is(src1)) { |
(...skipping 11 matching lines...) Expand all Loading... |
2101 movq(src1, kScratchRegister); | 2282 movq(src1, kScratchRegister); |
2102 } else { | 2283 } else { |
2103 movq(src2, kScratchRegister); | 2284 movq(src2, kScratchRegister); |
2104 } | 2285 } |
2105 jmp(on_not_smi_result, near_jump); | 2286 jmp(on_not_smi_result, near_jump); |
2106 bind(&positive_result); | 2287 bind(&positive_result); |
2107 } else { | 2288 } else { |
2108 // src2 was zero and src1 negative. | 2289 // src2 was zero and src1 negative. |
2109 j(negative, on_not_smi_result, near_jump); | 2290 j(negative, on_not_smi_result, near_jump); |
2110 } | 2291 } |
| 2292 #else |
| 2293 ASSERT(!dst.is(kScratchRegister)); |
| 2294 ASSERT(!src1.is(kScratchRegister)); |
| 2295 ASSERT(!src2.is(kScratchRegister)); |
| 2296 ASSERT(!dst.is(rcx)); |
| 2297 Label result_ok; |
| 2298 |
| 2299 // dst and src1 can be the same, because the one case that bails out |
| 2300 // is a shift by 0, which leaves dst, and therefore src1, unchanged. |
| 2301 if (src1.is(rcx) || src2.is(rcx)) { |
| 2302 movl(kScratchRegister, rcx); |
| 2303 } |
| 2304 if (!dst.is(src1)) { |
| 2305 movq(dst, src1); |
| 2306 } |
| 2307 SmiToInteger32(rcx, src2); |
| 2308 SmiToInteger32(dst, dst); |
| 2309 shrl_cl(dst); |
| 2310 testl(dst, Immediate(0xc0000000)); |
| 2311 j(zero, &result_ok); |
| 2312 if (src1.is(rcx) || src2.is(rcx)) { |
| 2313 if (src1.is(rcx)) { |
| 2314 movl(src1, kScratchRegister); |
| 2315 } else { |
| 2316 movl(src2, kScratchRegister); |
| 2317 } |
| 2318 } |
| 2319 jmp(on_not_smi_result); |
| 2320 bind(&result_ok); |
| 2321 Integer32ToSmi(dst, dst); |
| 2322 #endif |
2111 } | 2323 } |
2112 | 2324 |
2113 | 2325 |
2114 void MacroAssembler::SmiShiftArithmeticRight(Register dst, | 2326 void MacroAssembler::SmiShiftArithmeticRight(Register dst, |
2115 Register src1, | 2327 Register src1, |
2116 Register src2) { | 2328 Register src2) { |
2117 ASSERT(!dst.is(kScratchRegister)); | 2329 ASSERT(!dst.is(kScratchRegister)); |
2118 ASSERT(!src1.is(kScratchRegister)); | 2330 ASSERT(!src1.is(kScratchRegister)); |
2119 ASSERT(!src2.is(kScratchRegister)); | 2331 ASSERT(!src2.is(kScratchRegister)); |
2120 ASSERT(!dst.is(rcx)); | 2332 ASSERT(!dst.is(rcx)); |
2121 if (src1.is(rcx)) { | 2333 if (src1.is(rcx)) { |
2122 movq(kScratchRegister, src1); | 2334 movq(kScratchRegister, src1); |
2123 } else if (src2.is(rcx)) { | 2335 } else if (src2.is(rcx)) { |
2124 movq(kScratchRegister, src2); | 2336 movq(kScratchRegister, src2); |
2125 } | 2337 } |
2126 if (!dst.is(src1)) { | 2338 if (!dst.is(src1)) { |
2127 movq(dst, src1); | 2339 movq(dst, src1); |
2128 } | 2340 } |
2129 SmiToInteger32(rcx, src2); | 2341 SmiToInteger32(rcx, src2); |
| 2342 #ifndef V8_TARGET_ARCH_X32 |
2130 orl(rcx, Immediate(kSmiShift)); | 2343 orl(rcx, Immediate(kSmiShift)); |
| 2344 #else |
| 2345 SmiToInteger32(dst, dst); |
| 2346 #endif |
2131 sar_cl(dst); // Shift 32 + original rcx & 0x1f. | 2347 sar_cl(dst); // Shift 32 + original rcx & 0x1f. |
2132 shl(dst, Immediate(kSmiShift)); | 2348 shl(dst, Immediate(kSmiShift)); |
2133 if (src1.is(rcx)) { | 2349 if (src1.is(rcx)) { |
2134 movq(src1, kScratchRegister); | 2350 movq(src1, kScratchRegister); |
2135 } else if (src2.is(rcx)) { | 2351 } else if (src2.is(rcx)) { |
2136 movq(src2, kScratchRegister); | 2352 movq(src2, kScratchRegister); |
2137 } | 2353 } |
2138 } | 2354 } |
2139 | 2355 |
2140 | 2356 |
(...skipping 28 matching lines...) Expand all Loading... |
2169 subq(kScratchRegister, Immediate(1)); | 2385 subq(kScratchRegister, Immediate(1)); |
2170 // If src1 is a smi, then scratch register all 1s, else it is all 0s. | 2386 // If src1 is a smi, then scratch register all 1s, else it is all 0s. |
2171 movq(dst, src1); | 2387 movq(dst, src1); |
2172 xor_(dst, src2); | 2388 xor_(dst, src2); |
2173 and_(dst, kScratchRegister); | 2389 and_(dst, kScratchRegister); |
2174 // If src1 is a smi, dst holds src1 ^ src2, else it is zero. | 2390 // If src1 is a smi, dst holds src1 ^ src2, else it is zero. |
2175 xor_(dst, src1); | 2391 xor_(dst, src1); |
2176 // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi. | 2392 // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi. |
2177 } | 2393 } |
2178 | 2394 |
2179 | 2395 #ifndef V8_TARGET_ARCH_X32 |
2180 SmiIndex MacroAssembler::SmiToIndex(Register dst, | 2396 SmiIndex MacroAssembler::SmiToIndex(Register dst, |
2181 Register src, | 2397 Register src, |
2182 int shift) { | 2398 int shift) { |
2183 ASSERT(is_uint6(shift)); | 2399 ASSERT(is_uint6(shift)); |
2184 // There is a possible optimization if shift is in the range 60-63, but that | 2400 // There is a possible optimization if shift is in the range 60-63, but that |
2185 // will (and must) never happen. | 2401 // will (and must) never happen. |
2186 if (!dst.is(src)) { | 2402 if (!dst.is(src)) { |
2187 movq(dst, src); | 2403 movq(dst, src); |
2188 } | 2404 } |
2189 if (shift < kSmiShift) { | 2405 if (shift < kSmiShift) { |
2190 sar(dst, Immediate(kSmiShift - shift)); | 2406 sar(dst, Immediate(kSmiShift - shift)); |
2191 } else { | 2407 } else { |
2192 shl(dst, Immediate(shift - kSmiShift)); | 2408 shl(dst, Immediate(shift - kSmiShift)); |
2193 } | 2409 } |
2194 return SmiIndex(dst, times_1); | 2410 return SmiIndex(dst, times_1); |
2195 } | 2411 } |
| 2412 #else |
| 2413 SmiIndex MacroAssembler::SmiToIndex(Register dst, |
| 2414 Register src, |
| 2415 int shift) { |
| 2416 ASSERT(shift >= times_1 && shift <= times_8); |
| 2417 if (!dst.is(src)) { |
| 2418 movl(dst, src); |
| 2419 } |
| 2420 if (shift == times_1) { |
| 2421 sarl(dst, Immediate(kSmiShift)); |
| 2422 return SmiIndex(dst, times_1); |
| 2423 } |
| 2424 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1)); |
| 2425 } |
| 2426 #endif |
2196 | 2427 |
| 2428 |
| 2429 #ifndef V8_TARGET_ARCH_X32 |
2197 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst, | 2430 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst, |
2198 Register src, | 2431 Register src, |
2199 int shift) { | 2432 int shift) { |
2200 // Register src holds a positive smi. | 2433 // Register src holds a positive smi. |
2201 ASSERT(is_uint6(shift)); | 2434 ASSERT(is_uint6(shift)); |
2202 if (!dst.is(src)) { | 2435 if (!dst.is(src)) { |
2203 movq(dst, src); | 2436 movq(dst, src); |
2204 } | 2437 } |
2205 neg(dst); | 2438 neg(dst); |
2206 if (shift < kSmiShift) { | 2439 if (shift < kSmiShift) { |
2207 sar(dst, Immediate(kSmiShift - shift)); | 2440 sar(dst, Immediate(kSmiShift - shift)); |
2208 } else { | 2441 } else { |
2209 shl(dst, Immediate(shift - kSmiShift)); | 2442 shl(dst, Immediate(shift - kSmiShift)); |
2210 } | 2443 } |
2211 return SmiIndex(dst, times_1); | 2444 return SmiIndex(dst, times_1); |
2212 } | 2445 } |
| 2446 #else |
| 2447 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst, |
| 2448 Register src, |
| 2449 int shift) { |
| 2450 // Register src holds a positive smi. |
| 2451 ASSERT(shift >= times_1 && shift <= times_8); |
| 2452 if (!dst.is(src)) { |
| 2453 movl(dst, src); |
| 2454 } |
| 2455 neg(dst); |
| 2456 if (shift == times_1) { |
| 2457 sar(dst, Immediate(kSmiShift)); |
| 2458 return SmiIndex(dst, times_1); |
| 2459 } |
| 2460 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1)); |
| 2461 } |
| 2462 #endif |
2213 | 2463 |
2214 | 2464 |
2215 void MacroAssembler::AddSmiField(Register dst, const Operand& src) { | 2465 void MacroAssembler::AddSmiField(Register dst, const Operand& src) { |
| 2466 #ifndef V8_TARGET_ARCH_X32 |
2216 ASSERT_EQ(0, kSmiShift % kBitsPerByte); | 2467 ASSERT_EQ(0, kSmiShift % kBitsPerByte); |
2217 addl(dst, Operand(src, kSmiShift / kBitsPerByte)); | 2468 addl(dst, Operand(src, kSmiShift / kBitsPerByte)); |
| 2469 #else |
| 2470 SmiToInteger32(kScratchRegister, src); |
| 2471 addl(dst, kScratchRegister); |
| 2472 #endif |
2218 } | 2473 } |
2219 | 2474 |
2220 | 2475 |
2221 void MacroAssembler::JumpIfNotString(Register object, | 2476 void MacroAssembler::JumpIfNotString(Register object, |
2222 Register object_map, | 2477 Register object_map, |
2223 Label* not_string, | 2478 Label* not_string, |
2224 Label::Distance near_jump) { | 2479 Label::Distance near_jump) { |
2225 Condition is_smi = CheckSmi(object); | 2480 Condition is_smi = CheckSmi(object); |
2226 j(is_smi, not_string, near_jump); | 2481 j(is_smi, not_string, near_jump); |
2227 CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map); | 2482 CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map); |
(...skipping 216 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2444 if (dst.is(rax)) { | 2699 if (dst.is(rax)) { |
2445 AllowDeferredHandleDereference embedding_raw_address; | 2700 AllowDeferredHandleDereference embedding_raw_address; |
2446 load_rax(cell.location(), RelocInfo::CELL); | 2701 load_rax(cell.location(), RelocInfo::CELL); |
2447 } else { | 2702 } else { |
2448 movq(dst, cell, RelocInfo::CELL); | 2703 movq(dst, cell, RelocInfo::CELL); |
2449 movq(dst, Operand(dst, 0)); | 2704 movq(dst, Operand(dst, 0)); |
2450 } | 2705 } |
2451 } | 2706 } |
2452 | 2707 |
2453 | 2708 |
| 2709 #ifdef V8_TARGET_ARCH_X32 |
| 2710 void MacroAssembler::Push(Immediate value) { |
| 2711 leal(rsp, Operand(rsp, -4)); |
| 2712 movl(Operand(rsp, 0), value); |
| 2713 } |
| 2714 |
| 2715 |
| 2716 void MacroAssembler::Push_imm32(int32_t imm32) { |
| 2717 leal(rsp, Operand(rsp, -4)); |
| 2718 movl(Operand(rsp, 0), Immediate(imm32)); |
| 2719 } |
| 2720 |
| 2721 |
| 2722 void MacroAssembler::Push(Register src) { |
| 2723 // We use 64-bit push for rbp in the prologue |
| 2724 ASSERT(src.code() != rbp.code()); |
| 2725 leal(rsp, Operand(rsp, -4)); |
| 2726 movl(Operand(rsp, 0), src); |
| 2727 } |
| 2728 |
| 2729 |
| 2730 void MacroAssembler::Push(const Operand& src) { |
| 2731 movl(kScratchRegister, src); |
| 2732 leal(rsp, Operand(rsp, -4)); |
| 2733 movl(Operand(rsp, 0), kScratchRegister); |
| 2734 } |
| 2735 |
| 2736 |
| 2737 void MacroAssembler::Pop(Register dst) { |
| 2738 // We use 64-bit push for rbp in the prologue |
| 2739 ASSERT(dst.code() != rbp.code()); |
| 2740 movl(dst, Operand(rsp, 0)); |
| 2741 leal(rsp, Operand(rsp, 4)); |
| 2742 } |
| 2743 |
| 2744 |
| 2745 void MacroAssembler::Pop(const Operand& dst) { |
| 2746 Register scratch = kScratchRegister; |
| 2747 bool needExtraScratch = dst.AddressUsesRegister(kScratchRegister); |
| 2748 if (needExtraScratch) { |
| 2749 scratch = kSmiConstantRegister; |
| 2750 } |
| 2751 movl(scratch, Operand(rsp, 0)); |
| 2752 movl(dst, scratch); |
| 2753 if (needExtraScratch) { |
| 2754 // Restore the value of kSmiConstantRegister. |
| 2755 // Should use InitializeSmiConstantRegister(); |
| 2756 movl(kSmiConstantRegister, |
| 2757 reinterpret_cast<uint32_t>(Smi::FromInt(kSmiConstantRegisterValue)), |
| 2758 RelocInfo::NONE32); |
| 2759 } |
| 2760 leal(rsp, Operand(rsp, 4)); |
| 2761 } |
| 2762 #endif |
| 2763 |
| 2764 |
2454 void MacroAssembler::Push(Smi* source) { | 2765 void MacroAssembler::Push(Smi* source) { |
2455 intptr_t smi = reinterpret_cast<intptr_t>(source); | 2766 intptr_t smi = reinterpret_cast<intptr_t>(source); |
2456 if (is_int32(smi)) { | 2767 if (is_int32(smi)) { |
2457 push(Immediate(static_cast<int32_t>(smi))); | 2768 push(Immediate(static_cast<int32_t>(smi))); |
2458 } else { | 2769 } else { |
2459 Register constant = GetSmiConstant(source); | 2770 Register constant = GetSmiConstant(source); |
2460 push(constant); | 2771 push(constant); |
2461 } | 2772 } |
2462 } | 2773 } |
2463 | 2774 |
2464 | 2775 |
2465 void MacroAssembler::Drop(int stack_elements) { | 2776 void MacroAssembler::Drop(int stack_elements) { |
2466 if (stack_elements > 0) { | 2777 if (stack_elements > 0) { |
2467 addq(rsp, Immediate(stack_elements * kPointerSize)); | 2778 addq(rsp, Immediate(stack_elements * kPointerSize)); |
2468 } | 2779 } |
2469 } | 2780 } |
2470 | 2781 |
2471 | 2782 |
2472 void MacroAssembler::Test(const Operand& src, Smi* source) { | 2783 void MacroAssembler::Test(const Operand& src, Smi* source) { |
| 2784 #ifndef V8_TARGET_ARCH_X32 |
2473 testl(Operand(src, kIntSize), Immediate(source->value())); | 2785 testl(Operand(src, kIntSize), Immediate(source->value())); |
| 2786 #else |
| 2787 testl(src, Immediate(source)); |
| 2788 #endif |
2474 } | 2789 } |
2475 | 2790 |
2476 | 2791 |
2477 void MacroAssembler::TestBit(const Operand& src, int bits) { | 2792 void MacroAssembler::TestBit(const Operand& src, int bits) { |
| 2793 #ifdef V8_TARGET_ARCH_X32 |
| 2794 // Pointer fields in SharedFunctionInfo are SMI. |
| 2795 bits += kSmiTagSize + kSmiShiftSize; |
| 2796 #endif |
2478 int byte_offset = bits / kBitsPerByte; | 2797 int byte_offset = bits / kBitsPerByte; |
2479 int bit_in_byte = bits & (kBitsPerByte - 1); | 2798 int bit_in_byte = bits & (kBitsPerByte - 1); |
2480 testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte)); | 2799 testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte)); |
2481 } | 2800 } |
2482 | 2801 |
2483 | 2802 |
| 2803 #ifdef V8_TARGET_ARCH_X32 |
| 2804 void MacroAssembler::Jump(const Operand& src) { |
| 2805 movl(kScratchRegister, src); |
| 2806 jmp(kScratchRegister); |
| 2807 } |
| 2808 #endif |
| 2809 |
| 2810 |
2484 void MacroAssembler::Jump(ExternalReference ext) { | 2811 void MacroAssembler::Jump(ExternalReference ext) { |
2485 LoadAddress(kScratchRegister, ext); | 2812 LoadAddress(kScratchRegister, ext); |
2486 jmp(kScratchRegister); | 2813 jmp(kScratchRegister); |
2487 } | 2814 } |
2488 | 2815 |
2489 | 2816 |
2490 void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) { | 2817 void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) { |
2491 movq(kScratchRegister, destination, rmode); | 2818 movq(kScratchRegister, destination, rmode); |
2492 jmp(kScratchRegister); | 2819 jmp(kScratchRegister); |
2493 } | 2820 } |
(...skipping 17 matching lines...) Expand all Loading... |
2511 int end_position = pc_offset() + CallSize(ext); | 2838 int end_position = pc_offset() + CallSize(ext); |
2512 #endif | 2839 #endif |
2513 LoadAddress(kScratchRegister, ext); | 2840 LoadAddress(kScratchRegister, ext); |
2514 call(kScratchRegister); | 2841 call(kScratchRegister); |
2515 #ifdef DEBUG | 2842 #ifdef DEBUG |
2516 CHECK_EQ(end_position, pc_offset()); | 2843 CHECK_EQ(end_position, pc_offset()); |
2517 #endif | 2844 #endif |
2518 } | 2845 } |
2519 | 2846 |
2520 | 2847 |
| 2848 #ifdef V8_TARGET_ARCH_X32 |
| 2849 void MacroAssembler::Call(const Operand& op) { |
| 2850 movl(kScratchRegister, op); |
| 2851 call(kScratchRegister); |
| 2852 } |
| 2853 #endif |
| 2854 |
| 2855 |
2521 void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) { | 2856 void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) { |
2522 #ifdef DEBUG | 2857 #ifdef DEBUG |
2523 int end_position = pc_offset() + CallSize(destination, rmode); | 2858 int end_position = pc_offset() + CallSize(destination, rmode); |
2524 #endif | 2859 #endif |
2525 movq(kScratchRegister, destination, rmode); | 2860 movq(kScratchRegister, destination, rmode); |
2526 call(kScratchRegister); | 2861 call(kScratchRegister); |
2527 #ifdef DEBUG | 2862 #ifdef DEBUG |
2528 CHECK_EQ(pc_offset(), end_position); | 2863 CHECK_EQ(pc_offset(), end_position); |
2529 #endif | 2864 #endif |
2530 } | 2865 } |
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2632 | 2967 |
2633 | 2968 |
2634 Operand MacroAssembler::SafepointRegisterSlot(Register reg) { | 2969 Operand MacroAssembler::SafepointRegisterSlot(Register reg) { |
2635 return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize); | 2970 return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize); |
2636 } | 2971 } |
2637 | 2972 |
2638 | 2973 |
2639 void MacroAssembler::PushTryHandler(StackHandler::Kind kind, | 2974 void MacroAssembler::PushTryHandler(StackHandler::Kind kind, |
2640 int handler_index) { | 2975 int handler_index) { |
2641 // Adjust this code if not the case. | 2976 // Adjust this code if not the case. |
| 2977 #ifndef V8_TARGET_ARCH_X32 |
2642 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); | 2978 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); |
| 2979 #else |
| 2980 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize + kHWRegSize); |
| 2981 #endif |
2643 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); | 2982 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); |
2644 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); | 2983 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); |
2645 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); | 2984 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); |
2646 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); | 2985 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); |
2647 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); | 2986 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); |
2648 | 2987 |
2649 // We will build up the handler from the bottom by pushing on the stack. | 2988 // We will build up the handler from the bottom by pushing on the stack. |
2650 // First push the frame pointer and context. | 2989 // First push the frame pointer and context. |
2651 if (kind == StackHandler::JS_ENTRY) { | 2990 if (kind == StackHandler::JS_ENTRY) { |
2652 // The frame pointer does not point to a JS frame so we save NULL for | 2991 // The frame pointer does not point to a JS frame so we save NULL for |
2653 // rbp. We expect the code throwing an exception to check rbp before | 2992 // rbp. We expect the code throwing an exception to check rbp before |
2654 // dereferencing it to restore the context. | 2993 // dereferencing it to restore the context. |
2655 push(Immediate(0)); // NULL frame pointer. | 2994 __k push(Immediate(0)); // NULL frame pointer. |
2656 Push(Smi::FromInt(0)); // No context. | 2995 Push(Smi::FromInt(0)); // No context. |
2657 } else { | 2996 } else { |
2658 push(rbp); | 2997 push(rbp); |
2659 push(rsi); | 2998 push(rsi); |
2660 } | 2999 } |
2661 | 3000 |
2662 // Push the state and the code object. | 3001 // Push the state and the code object. |
2663 unsigned state = | 3002 unsigned state = |
2664 StackHandler::IndexField::encode(handler_index) | | 3003 StackHandler::IndexField::encode(handler_index) | |
2665 StackHandler::KindField::encode(kind); | 3004 StackHandler::KindField::encode(kind); |
(...skipping 25 matching lines...) Expand all Loading... |
2691 movq(rdx, | 3030 movq(rdx, |
2692 FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize)); | 3031 FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize)); |
2693 SmiToInteger64(rdx, rdx); | 3032 SmiToInteger64(rdx, rdx); |
2694 lea(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize)); | 3033 lea(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize)); |
2695 jmp(rdi); | 3034 jmp(rdi); |
2696 } | 3035 } |
2697 | 3036 |
2698 | 3037 |
2699 void MacroAssembler::Throw(Register value) { | 3038 void MacroAssembler::Throw(Register value) { |
2700 // Adjust this code if not the case. | 3039 // Adjust this code if not the case. |
| 3040 #ifndef V8_TARGET_ARCH_X32 |
2701 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); | 3041 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); |
| 3042 #else |
| 3043 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize + kHWRegSize); |
| 3044 #endif |
2702 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); | 3045 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); |
2703 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); | 3046 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); |
2704 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); | 3047 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); |
2705 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); | 3048 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); |
2706 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); | 3049 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); |
2707 | 3050 |
2708 // The exception is expected in rax. | 3051 // The exception is expected in rax. |
2709 if (!value.is(rax)) { | 3052 if (!value.is(rax)) { |
2710 movq(rax, value); | 3053 movq(rax, value); |
2711 } | 3054 } |
(...skipping 19 matching lines...) Expand all Loading... |
2731 j(zero, &skip, Label::kNear); | 3074 j(zero, &skip, Label::kNear); |
2732 movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi); | 3075 movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi); |
2733 bind(&skip); | 3076 bind(&skip); |
2734 | 3077 |
2735 JumpToHandlerEntry(); | 3078 JumpToHandlerEntry(); |
2736 } | 3079 } |
2737 | 3080 |
2738 | 3081 |
2739 void MacroAssembler::ThrowUncatchable(Register value) { | 3082 void MacroAssembler::ThrowUncatchable(Register value) { |
2740 // Adjust this code if not the case. | 3083 // Adjust this code if not the case. |
| 3084 #ifndef V8_TARGET_ARCH_X32 |
2741 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); | 3085 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); |
| 3086 #else |
| 3087 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize + kHWRegSize); |
| 3088 #endif |
2742 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); | 3089 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); |
2743 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); | 3090 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); |
2744 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); | 3091 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); |
2745 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); | 3092 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); |
2746 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); | 3093 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); |
2747 | 3094 |
2748 // The exception is expected in rax. | 3095 // The exception is expected in rax. |
2749 if (!value.is(rax)) { | 3096 if (!value.is(rax)) { |
2750 movq(rax, value); | 3097 movq(rax, value); |
2751 } | 3098 } |
(...skipping 30 matching lines...) Expand all Loading... |
2782 | 3129 |
2783 void MacroAssembler::Ret() { | 3130 void MacroAssembler::Ret() { |
2784 ret(0); | 3131 ret(0); |
2785 } | 3132 } |
2786 | 3133 |
2787 | 3134 |
2788 void MacroAssembler::Ret(int bytes_dropped, Register scratch) { | 3135 void MacroAssembler::Ret(int bytes_dropped, Register scratch) { |
2789 if (is_uint16(bytes_dropped)) { | 3136 if (is_uint16(bytes_dropped)) { |
2790 ret(bytes_dropped); | 3137 ret(bytes_dropped); |
2791 } else { | 3138 } else { |
2792 pop(scratch); | 3139 __k pop(scratch); |
2793 addq(rsp, Immediate(bytes_dropped)); | 3140 addq(rsp, Immediate(bytes_dropped)); |
2794 push(scratch); | 3141 __k push(scratch); |
2795 ret(0); | 3142 ret(0); |
2796 } | 3143 } |
2797 } | 3144 } |
2798 | 3145 |
2799 | 3146 |
2800 void MacroAssembler::FCmp() { | 3147 void MacroAssembler::FCmp() { |
2801 fucomip(); | 3148 fucomip(); |
2802 fstp(0); | 3149 fstp(0); |
2803 } | 3150 } |
2804 | 3151 |
(...skipping 245 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3050 if (emit_debug_code()) { | 3397 if (emit_debug_code()) { |
3051 Condition is_smi = CheckSmi(object); | 3398 Condition is_smi = CheckSmi(object); |
3052 Check(is_smi, "Operand is not a smi"); | 3399 Check(is_smi, "Operand is not a smi"); |
3053 } | 3400 } |
3054 } | 3401 } |
3055 | 3402 |
3056 | 3403 |
3057 void MacroAssembler::AssertZeroExtended(Register int32_register) { | 3404 void MacroAssembler::AssertZeroExtended(Register int32_register) { |
3058 if (emit_debug_code()) { | 3405 if (emit_debug_code()) { |
3059 ASSERT(!int32_register.is(kScratchRegister)); | 3406 ASSERT(!int32_register.is(kScratchRegister)); |
3060 movq(kScratchRegister, 0x100000000l, RelocInfo::NONE64); | 3407 __k movq(kScratchRegister, 0x100000000l, RelocInfo::NONE64); |
3061 cmpq(kScratchRegister, int32_register); | 3408 __k cmpq(kScratchRegister, int32_register); |
3062 Check(above_equal, "32 bit value in register is not zero-extended"); | 3409 Check(above_equal, "32 bit value in register is not zero-extended"); |
3063 } | 3410 } |
3064 } | 3411 } |
3065 | 3412 |
3066 | 3413 |
3067 void MacroAssembler::AssertString(Register object) { | 3414 void MacroAssembler::AssertString(Register object) { |
3068 if (emit_debug_code()) { | 3415 if (emit_debug_code()) { |
3069 testb(object, Immediate(kSmiTagMask)); | 3416 testb(object, Immediate(kSmiTagMask)); |
3070 Check(not_equal, "Operand is a smi and not a string"); | 3417 Check(not_equal, "Operand is a smi and not a string"); |
3071 push(object); | 3418 push(object); |
(...skipping 249 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3321 const ParameterCount& actual, | 3668 const ParameterCount& actual, |
3322 InvokeFlag flag, | 3669 InvokeFlag flag, |
3323 const CallWrapper& call_wrapper, | 3670 const CallWrapper& call_wrapper, |
3324 CallKind call_kind) { | 3671 CallKind call_kind) { |
3325 // You can't call a function without a valid frame. | 3672 // You can't call a function without a valid frame. |
3326 ASSERT(flag == JUMP_FUNCTION || has_frame()); | 3673 ASSERT(flag == JUMP_FUNCTION || has_frame()); |
3327 | 3674 |
3328 ASSERT(function.is(rdi)); | 3675 ASSERT(function.is(rdi)); |
3329 movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); | 3676 movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); |
3330 movq(rsi, FieldOperand(function, JSFunction::kContextOffset)); | 3677 movq(rsi, FieldOperand(function, JSFunction::kContextOffset)); |
| 3678 #ifndef V8_TARGET_ARCH_X32 |
3331 movsxlq(rbx, | 3679 movsxlq(rbx, |
3332 FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset)); | 3680 FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset)); |
| 3681 #else |
| 3682 movl(rbx, |
| 3683 FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset)); |
| 3684 SmiToInteger32(rbx, rbx); |
| 3685 #endif |
3333 // Advances rdx to the end of the Code object header, to the start of | 3686 // Advances rdx to the end of the Code object header, to the start of |
3334 // the executable code. | 3687 // the executable code. |
3335 movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset)); | 3688 movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset)); |
3336 | 3689 |
3337 ParameterCount expected(rbx); | 3690 ParameterCount expected(rbx); |
3338 InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind); | 3691 InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind); |
3339 } | 3692 } |
3340 | 3693 |
3341 | 3694 |
3342 void MacroAssembler::InvokeFunction(Handle<JSFunction> function, | 3695 void MacroAssembler::InvokeFunction(Handle<JSFunction> function, |
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3460 Check(equal, "stack frame types must match"); | 3813 Check(equal, "stack frame types must match"); |
3461 } | 3814 } |
3462 movq(rsp, rbp); | 3815 movq(rsp, rbp); |
3463 pop(rbp); | 3816 pop(rbp); |
3464 } | 3817 } |
3465 | 3818 |
3466 | 3819 |
3467 void MacroAssembler::EnterExitFramePrologue(bool save_rax) { | 3820 void MacroAssembler::EnterExitFramePrologue(bool save_rax) { |
3468 // Set up the frame structure on the stack. | 3821 // Set up the frame structure on the stack. |
3469 // All constants are relative to the frame pointer of the exit frame. | 3822 // All constants are relative to the frame pointer of the exit frame. |
| 3823 #ifndef V8_TARGET_ARCH_X32 |
3470 ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize); | 3824 ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize); |
3471 ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize); | 3825 ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize); |
| 3826 #else |
| 3827 ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kHWRegSize); |
| 3828 ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kHWRegSize); |
| 3829 #endif |
3472 ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize); | 3830 ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize); |
3473 push(rbp); | 3831 push(rbp); |
3474 movq(rbp, rsp); | 3832 movq(rbp, rsp); |
3475 | 3833 |
3476 // Reserve room for entry stack pointer and push the code object. | 3834 // Reserve room for entry stack pointer and push the code object. |
3477 ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize); | 3835 ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize); |
| 3836 #ifndef V8_TARGET_ARCH_X32 |
3478 push(Immediate(0)); // Saved entry sp, patched before call. | 3837 push(Immediate(0)); // Saved entry sp, patched before call. |
3479 movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT); | 3838 movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT); |
3480 push(kScratchRegister); // Accessed from EditFrame::code_slot. | 3839 push(kScratchRegister); // Accessed from EditFrame::code_slot. |
| 3840 #else |
| 3841 movl(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT); |
| 3842 push(kScratchRegister); // Accessed from EditFrame::code_slot. |
| 3843 #endif |
3481 | 3844 |
3482 // Save the frame pointer and the context in top. | 3845 // Save the frame pointer and the context in top. |
3483 if (save_rax) { | 3846 if (save_rax) { |
3484 movq(r14, rax); // Backup rax in callee-save register. | 3847 movq(r14, rax); // Backup rax in callee-save register. |
3485 } | 3848 } |
3486 | 3849 |
3487 Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp); | 3850 Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp); |
3488 Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi); | 3851 Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi); |
3489 } | 3852 } |
3490 | 3853 |
3491 | 3854 |
3492 void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space, | 3855 void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space, |
3493 bool save_doubles) { | 3856 bool save_doubles) { |
3494 #ifdef _WIN64 | 3857 #ifdef _WIN64 |
3495 const int kShadowSpace = 4; | 3858 const int kShadowSpace = 4; |
3496 arg_stack_space += kShadowSpace; | 3859 arg_stack_space += kShadowSpace; |
3497 #endif | 3860 #endif |
3498 // Optionally save all XMM registers. | 3861 // Optionally save all XMM registers. |
3499 if (save_doubles) { | 3862 if (save_doubles) { |
3500 int space = XMMRegister::kMaxNumRegisters * kDoubleSize + | 3863 int space = XMMRegister::kMaxNumRegisters * kDoubleSize + |
3501 arg_stack_space * kPointerSize; | 3864 __q arg_stack_space * kPointerSize; |
3502 subq(rsp, Immediate(space)); | 3865 subq(rsp, Immediate(space)); |
3503 int offset = -2 * kPointerSize; | 3866 int offset = -2 * kPointerSize; |
| 3867 #ifndef V8_TARGET_ARCH_X32 |
3504 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) { | 3868 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) { |
3505 XMMRegister reg = XMMRegister::FromAllocationIndex(i); | 3869 XMMRegister reg = XMMRegister::FromAllocationIndex(i); |
3506 movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg); | 3870 movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg); |
3507 } | 3871 } |
| 3872 #else |
| 3873 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) { |
| 3874 XMMRegister reg = XMMRegister::from_code(i); |
| 3875 movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg); |
| 3876 } |
| 3877 #endif |
3508 } else if (arg_stack_space > 0) { | 3878 } else if (arg_stack_space > 0) { |
3509 subq(rsp, Immediate(arg_stack_space * kPointerSize)); | 3879 __q subq(rsp, Immediate(arg_stack_space * kPointerSize)); |
3510 } | 3880 } |
3511 | 3881 |
3512 // Get the required frame alignment for the OS. | 3882 // Get the required frame alignment for the OS. |
3513 const int kFrameAlignment = OS::ActivationFrameAlignment(); | 3883 const int kFrameAlignment = OS::ActivationFrameAlignment(); |
3514 if (kFrameAlignment > 0) { | 3884 if (kFrameAlignment > 0) { |
3515 ASSERT(IsPowerOf2(kFrameAlignment)); | 3885 ASSERT(IsPowerOf2(kFrameAlignment)); |
3516 ASSERT(is_int8(kFrameAlignment)); | 3886 ASSERT(is_int8(kFrameAlignment)); |
3517 and_(rsp, Immediate(-kFrameAlignment)); | 3887 and_(rsp, Immediate(-kFrameAlignment)); |
3518 } | 3888 } |
3519 | 3889 |
(...skipping 18 matching lines...) Expand all Loading... |
3538 EnterExitFramePrologue(false); | 3908 EnterExitFramePrologue(false); |
3539 EnterExitFrameEpilogue(arg_stack_space, false); | 3909 EnterExitFrameEpilogue(arg_stack_space, false); |
3540 } | 3910 } |
3541 | 3911 |
3542 | 3912 |
3543 void MacroAssembler::LeaveExitFrame(bool save_doubles) { | 3913 void MacroAssembler::LeaveExitFrame(bool save_doubles) { |
3544 // Registers: | 3914 // Registers: |
3545 // r15 : argv | 3915 // r15 : argv |
3546 if (save_doubles) { | 3916 if (save_doubles) { |
3547 int offset = -2 * kPointerSize; | 3917 int offset = -2 * kPointerSize; |
| 3918 #ifndef V8_TARGET_ARCH_X32 |
3548 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) { | 3919 for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) { |
3549 XMMRegister reg = XMMRegister::FromAllocationIndex(i); | 3920 XMMRegister reg = XMMRegister::FromAllocationIndex(i); |
3550 movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize))); | 3921 movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize))); |
3551 } | 3922 } |
| 3923 #else |
| 3924 for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) { |
| 3925 XMMRegister reg = XMMRegister::from_code(i); |
| 3926 movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize))); |
| 3927 } |
| 3928 #endif |
3552 } | 3929 } |
3553 // Get the return address from the stack and restore the frame pointer. | 3930 // Get the return address from the stack and restore the frame pointer. |
3554 movq(rcx, Operand(rbp, 1 * kPointerSize)); | 3931 __q movq(rcx, Operand(rbp, 1 * kPointerSize)); |
3555 movq(rbp, Operand(rbp, 0 * kPointerSize)); | 3932 movq(rbp, Operand(rbp, 0 * kPointerSize)); |
3556 | 3933 |
3557 // Drop everything up to and including the arguments and the receiver | 3934 // Drop everything up to and including the arguments and the receiver |
3558 // from the caller stack. | 3935 // from the caller stack. |
3559 lea(rsp, Operand(r15, 1 * kPointerSize)); | 3936 lea(rsp, Operand(r15, 1 * kPointerSize)); |
3560 | 3937 |
3561 // Push the return address to get ready to return. | 3938 // Push the return address to get ready to return. |
3562 push(rcx); | 3939 __k push(rcx); |
3563 | 3940 |
3564 LeaveExitFrameEpilogue(); | 3941 LeaveExitFrameEpilogue(); |
3565 } | 3942 } |
3566 | 3943 |
3567 | 3944 |
3568 void MacroAssembler::LeaveApiExitFrame() { | 3945 void MacroAssembler::LeaveApiExitFrame() { |
3569 movq(rsp, rbp); | 3946 movq(rsp, rbp); |
3570 pop(rbp); | 3947 pop(rbp); |
3571 | 3948 |
3572 LeaveExitFrameEpilogue(); | 3949 LeaveExitFrameEpilogue(); |
(...skipping 270 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3843 jmp(gc_required); | 4220 jmp(gc_required); |
3844 return; | 4221 return; |
3845 } | 4222 } |
3846 ASSERT(!result.is(result_end)); | 4223 ASSERT(!result.is(result_end)); |
3847 | 4224 |
3848 // Load address of new object into result. | 4225 // Load address of new object into result. |
3849 LoadAllocationTopHelper(result, scratch, flags); | 4226 LoadAllocationTopHelper(result, scratch, flags); |
3850 | 4227 |
3851 // Align the next allocation. Storing the filler map without checking top is | 4228 // Align the next allocation. Storing the filler map without checking top is |
3852 // always safe because the limit of the heap is always aligned. | 4229 // always safe because the limit of the heap is always aligned. |
| 4230 #ifndef V8_TARGET_ARCH_X32 |
3853 if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) { | 4231 if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) { |
3854 testq(result, Immediate(kDoubleAlignmentMask)); | 4232 testq(result, Immediate(kDoubleAlignmentMask)); |
3855 Check(zero, "Allocation is not double aligned"); | 4233 Check(zero, "Allocation is not double aligned"); |
3856 } | 4234 } |
| 4235 #else |
| 4236 if ((flags & DOUBLE_ALIGNMENT) != 0) { |
| 4237 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); |
| 4238 ASSERT(kPointerAlignment * 2 == kDoubleAlignment); |
| 4239 Label aligned; |
| 4240 testl(result, Immediate(kDoubleAlignmentMask)); |
| 4241 j(zero, &aligned, Label::kNear); |
| 4242 LoadRoot(kScratchRegister, Heap::kOnePointerFillerMapRootIndex); |
| 4243 movl(Operand(result, 0), kScratchRegister); |
| 4244 addl(result, Immediate(kDoubleSize / 2)); |
| 4245 bind(&aligned); |
| 4246 } |
| 4247 #endif |
3857 | 4248 |
3858 // Calculate new top and bail out if new space is exhausted. | 4249 // Calculate new top and bail out if new space is exhausted. |
3859 ExternalReference allocation_limit = | 4250 ExternalReference allocation_limit = |
3860 AllocationUtils::GetAllocationLimitReference(isolate(), flags); | 4251 AllocationUtils::GetAllocationLimitReference(isolate(), flags); |
3861 | 4252 |
3862 Register top_reg = result_end.is_valid() ? result_end : result; | 4253 Register top_reg = result_end.is_valid() ? result_end : result; |
3863 | 4254 |
3864 if (!top_reg.is(result)) { | 4255 if (!top_reg.is(result)) { |
3865 movq(top_reg, result); | 4256 movq(top_reg, result); |
3866 } | 4257 } |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3910 jmp(gc_required); | 4301 jmp(gc_required); |
3911 return; | 4302 return; |
3912 } | 4303 } |
3913 ASSERT(!result.is(result_end)); | 4304 ASSERT(!result.is(result_end)); |
3914 | 4305 |
3915 // Load address of new object into result. | 4306 // Load address of new object into result. |
3916 LoadAllocationTopHelper(result, scratch, flags); | 4307 LoadAllocationTopHelper(result, scratch, flags); |
3917 | 4308 |
3918 // Align the next allocation. Storing the filler map without checking top is | 4309 // Align the next allocation. Storing the filler map without checking top is |
3919 // always safe because the limit of the heap is always aligned. | 4310 // always safe because the limit of the heap is always aligned. |
| 4311 #ifndef V8_TARGET_ARCH_X32 |
3920 if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) { | 4312 if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) { |
3921 testq(result, Immediate(kDoubleAlignmentMask)); | 4313 testq(result, Immediate(kDoubleAlignmentMask)); |
3922 Check(zero, "Allocation is not double aligned"); | 4314 Check(zero, "Allocation is not double aligned"); |
3923 } | 4315 } |
| 4316 #else |
| 4317 if ((flags & DOUBLE_ALIGNMENT) != 0) { |
| 4318 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); |
| 4319 ASSERT(kPointerAlignment * 2 == kDoubleAlignment); |
| 4320 Label aligned; |
| 4321 testl(result, Immediate(kDoubleAlignmentMask)); |
| 4322 j(zero, &aligned, Label::kNear); |
| 4323 LoadRoot(kScratchRegister, Heap::kOnePointerFillerMapRootIndex); |
| 4324 movl(Operand(result, 0), kScratchRegister); |
| 4325 addl(result, Immediate(kDoubleSize / 2)); |
| 4326 bind(&aligned); |
| 4327 } |
| 4328 #endif |
3924 | 4329 |
3925 // Calculate new top and bail out if new space is exhausted. | 4330 // Calculate new top and bail out if new space is exhausted. |
3926 ExternalReference allocation_limit = | 4331 ExternalReference allocation_limit = |
3927 AllocationUtils::GetAllocationLimitReference(isolate(), flags); | 4332 AllocationUtils::GetAllocationLimitReference(isolate(), flags); |
3928 | 4333 |
3929 // We assume that element_count*element_size + header_size does not | 4334 // We assume that element_count*element_size + header_size does not |
3930 // overflow. | 4335 // overflow. |
3931 lea(result_end, Operand(element_count, element_size, header_size)); | 4336 lea(result_end, Operand(element_count, element_size, header_size)); |
3932 addq(result_end, result); | 4337 addq(result_end, result); |
3933 j(carry, gc_required); | 4338 j(carry, gc_required); |
(...skipping 30 matching lines...) Expand all Loading... |
3964 // object_size is left unchanged by this function. | 4369 // object_size is left unchanged by this function. |
3965 } | 4370 } |
3966 jmp(gc_required); | 4371 jmp(gc_required); |
3967 return; | 4372 return; |
3968 } | 4373 } |
3969 ASSERT(!result.is(result_end)); | 4374 ASSERT(!result.is(result_end)); |
3970 | 4375 |
3971 // Load address of new object into result. | 4376 // Load address of new object into result. |
3972 LoadAllocationTopHelper(result, scratch, flags); | 4377 LoadAllocationTopHelper(result, scratch, flags); |
3973 | 4378 |
| 4379 // Align the next allocation. Storing the filler map without checking top is |
| 4380 // always safe because the limit of the heap is always aligned. |
| 4381 #ifndef V8_TARGET_ARCH_X32 |
| 4382 if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) { |
| 4383 testq(result, Immediate(kDoubleAlignmentMask)); |
| 4384 Check(zero, "Allocation is not double aligned"); |
| 4385 } |
| 4386 #else |
| 4387 if ((flags & DOUBLE_ALIGNMENT) != 0) { |
| 4388 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); |
| 4389 ASSERT(kPointerAlignment * 2 == kDoubleAlignment); |
| 4390 Label aligned; |
| 4391 testl(result, Immediate(kDoubleAlignmentMask)); |
| 4392 j(zero, &aligned, Label::kNear); |
| 4393 LoadRoot(kScratchRegister, Heap::kOnePointerFillerMapRootIndex); |
| 4394 movl(Operand(result, 0), kScratchRegister); |
| 4395 addl(result, Immediate(kDoubleSize / 2)); |
| 4396 bind(&aligned); |
| 4397 } |
| 4398 #endif |
| 4399 |
3974 // Calculate new top and bail out if new space is exhausted. | 4400 // Calculate new top and bail out if new space is exhausted. |
3975 ExternalReference allocation_limit = | 4401 ExternalReference allocation_limit = |
3976 AllocationUtils::GetAllocationLimitReference(isolate(), flags); | 4402 AllocationUtils::GetAllocationLimitReference(isolate(), flags); |
3977 if (!object_size.is(result_end)) { | 4403 if (!object_size.is(result_end)) { |
3978 movq(result_end, object_size); | 4404 movq(result_end, object_size); |
3979 } | 4405 } |
3980 addq(result_end, result); | 4406 addq(result_end, result); |
3981 j(carry, gc_required); | 4407 j(carry, gc_required); |
3982 Operand limit_operand = ExternalOperand(allocation_limit); | 4408 Operand limit_operand = ExternalOperand(allocation_limit); |
3983 cmpq(result_end, limit_operand); | 4409 cmpq(result_end, limit_operand); |
3984 j(above, gc_required); | 4410 j(above, gc_required); |
3985 | 4411 |
3986 // Update allocation top. | 4412 // Update allocation top. |
3987 UpdateAllocationTopHelper(result_end, scratch, flags); | 4413 UpdateAllocationTopHelper(result_end, scratch, flags); |
3988 | 4414 |
3989 // Align the next allocation. Storing the filler map without checking top is | |
3990 // always safe because the limit of the heap is always aligned. | |
3991 if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) { | |
3992 testq(result, Immediate(kDoubleAlignmentMask)); | |
3993 Check(zero, "Allocation is not double aligned"); | |
3994 } | |
3995 | |
3996 // Tag the result if requested. | 4415 // Tag the result if requested. |
3997 if ((flags & TAG_OBJECT) != 0) { | 4416 if ((flags & TAG_OBJECT) != 0) { |
3998 addq(result, Immediate(kHeapObjectTag)); | 4417 addq(result, Immediate(kHeapObjectTag)); |
3999 } | 4418 } |
4000 } | 4419 } |
4001 | 4420 |
4002 | 4421 |
4003 void MacroAssembler::UndoAllocationInNewSpace(Register object) { | 4422 void MacroAssembler::UndoAllocationInNewSpace(Register object) { |
4004 ExternalReference new_space_allocation_top = | 4423 ExternalReference new_space_allocation_top = |
4005 ExternalReference::new_space_allocation_top_address(isolate()); | 4424 ExternalReference::new_space_allocation_top_address(isolate()); |
(...skipping 396 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4402 void MacroAssembler::PrepareCallCFunction(int num_arguments) { | 4821 void MacroAssembler::PrepareCallCFunction(int num_arguments) { |
4403 int frame_alignment = OS::ActivationFrameAlignment(); | 4822 int frame_alignment = OS::ActivationFrameAlignment(); |
4404 ASSERT(frame_alignment != 0); | 4823 ASSERT(frame_alignment != 0); |
4405 ASSERT(num_arguments >= 0); | 4824 ASSERT(num_arguments >= 0); |
4406 | 4825 |
4407 // Make stack end at alignment and allocate space for arguments and old rsp. | 4826 // Make stack end at alignment and allocate space for arguments and old rsp. |
4408 movq(kScratchRegister, rsp); | 4827 movq(kScratchRegister, rsp); |
4409 ASSERT(IsPowerOf2(frame_alignment)); | 4828 ASSERT(IsPowerOf2(frame_alignment)); |
4410 int argument_slots_on_stack = | 4829 int argument_slots_on_stack = |
4411 ArgumentStackSlotsForCFunctionCall(num_arguments); | 4830 ArgumentStackSlotsForCFunctionCall(num_arguments); |
4412 subq(rsp, Immediate((argument_slots_on_stack + 1) * kPointerSize)); | 4831 __q subq(rsp, Immediate((argument_slots_on_stack + 1) * kPointerSize)); |
4413 and_(rsp, Immediate(-frame_alignment)); | 4832 and_(rsp, Immediate(-frame_alignment)); |
| 4833 #ifndef V8_TARGET_ARCH_X32 |
4414 movq(Operand(rsp, argument_slots_on_stack * kPointerSize), kScratchRegister); | 4834 movq(Operand(rsp, argument_slots_on_stack * kPointerSize), kScratchRegister); |
| 4835 #else |
| 4836 movq(Operand(rsp, argument_slots_on_stack * kHWRegSize), kScratchRegister); |
| 4837 #endif |
4415 } | 4838 } |
4416 | 4839 |
4417 | 4840 |
4418 void MacroAssembler::CallCFunction(ExternalReference function, | 4841 void MacroAssembler::CallCFunction(ExternalReference function, |
4419 int num_arguments) { | 4842 int num_arguments) { |
4420 LoadAddress(rax, function); | 4843 LoadAddress(rax, function); |
4421 CallCFunction(rax, num_arguments); | 4844 CallCFunction(rax, num_arguments); |
4422 } | 4845 } |
4423 | 4846 |
4424 | 4847 |
4425 void MacroAssembler::CallCFunction(Register function, int num_arguments) { | 4848 void MacroAssembler::CallCFunction(Register function, int num_arguments) { |
4426 ASSERT(has_frame()); | 4849 ASSERT(has_frame()); |
4427 // Check stack alignment. | 4850 // Check stack alignment. |
4428 if (emit_debug_code()) { | 4851 if (emit_debug_code()) { |
4429 CheckStackAlignment(); | 4852 CheckStackAlignment(); |
4430 } | 4853 } |
4431 | 4854 |
4432 call(function); | 4855 call(function); |
4433 ASSERT(OS::ActivationFrameAlignment() != 0); | 4856 ASSERT(OS::ActivationFrameAlignment() != 0); |
4434 ASSERT(num_arguments >= 0); | 4857 ASSERT(num_arguments >= 0); |
4435 int argument_slots_on_stack = | 4858 int argument_slots_on_stack = |
4436 ArgumentStackSlotsForCFunctionCall(num_arguments); | 4859 ArgumentStackSlotsForCFunctionCall(num_arguments); |
4437 movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize)); | 4860 __q movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize)); |
4438 } | 4861 } |
4439 | 4862 |
4440 | 4863 |
4441 bool AreAliased(Register r1, Register r2, Register r3, Register r4) { | 4864 bool AreAliased(Register r1, Register r2, Register r3, Register r4) { |
4442 if (r1.is(r2)) return true; | 4865 if (r1.is(r2)) return true; |
4443 if (r1.is(r3)) return true; | 4866 if (r1.is(r3)) return true; |
4444 if (r1.is(r4)) return true; | 4867 if (r1.is(r4)) return true; |
4445 if (r2.is(r3)) return true; | 4868 if (r2.is(r3)) return true; |
4446 if (r2.is(r4)) return true; | 4869 if (r2.is(r4)) return true; |
4447 if (r3.is(r4)) return true; | 4870 if (r3.is(r4)) return true; |
(...skipping 276 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4724 movq(kScratchRegister, new_space_start); | 5147 movq(kScratchRegister, new_space_start); |
4725 cmpq(scratch_reg, kScratchRegister); | 5148 cmpq(scratch_reg, kScratchRegister); |
4726 j(less, &no_info_available); | 5149 j(less, &no_info_available); |
4727 cmpq(scratch_reg, ExternalOperand(new_space_allocation_top)); | 5150 cmpq(scratch_reg, ExternalOperand(new_space_allocation_top)); |
4728 j(greater, &no_info_available); | 5151 j(greater, &no_info_available); |
4729 CompareRoot(MemOperand(scratch_reg, -AllocationSiteInfo::kSize), | 5152 CompareRoot(MemOperand(scratch_reg, -AllocationSiteInfo::kSize), |
4730 Heap::kAllocationSiteInfoMapRootIndex); | 5153 Heap::kAllocationSiteInfoMapRootIndex); |
4731 bind(&no_info_available); | 5154 bind(&no_info_available); |
4732 } | 5155 } |
4733 | 5156 |
| 5157 #undef __n |
| 5158 #undef __q |
| 5159 #undef __k |
4734 | 5160 |
4735 } } // namespace v8::internal | 5161 } } // namespace v8::internal |
4736 | 5162 |
4737 #endif // V8_TARGET_ARCH_X64 | 5163 #endif // V8_TARGET_ARCH_X64 |
OLD | NEW |