OLD | NEW |
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if V8_TARGET_ARCH_PPC | 5 #if V8_TARGET_ARCH_PPC |
6 | 6 |
7 #include "src/codegen.h" | 7 #include "src/codegen.h" |
8 #include "src/debug/debug.h" | 8 #include "src/debug/debug.h" |
9 #include "src/deoptimizer.h" | 9 #include "src/deoptimizer.h" |
10 #include "src/full-codegen/full-codegen.h" | 10 #include "src/full-codegen/full-codegen.h" |
(...skipping 20 matching lines...) Expand all Loading... |
31 | 31 |
32 // Make sure we operate in the context of the called function (for example | 32 // Make sure we operate in the context of the called function (for example |
33 // ConstructStubs implemented in C++ will be run in the context of the caller | 33 // ConstructStubs implemented in C++ will be run in the context of the caller |
34 // instead of the callee, due to the way that [[Construct]] is defined for | 34 // instead of the callee, due to the way that [[Construct]] is defined for |
35 // ordinary functions). | 35 // ordinary functions). |
36 __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset)); | 36 __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset)); |
37 | 37 |
38 // Insert extra arguments. | 38 // Insert extra arguments. |
39 const int num_extra_args = 2; | 39 const int num_extra_args = 2; |
40 __ Push(r4, r6); | 40 __ Push(r4, r6); |
| 41 |
41 // JumpToExternalReference expects r3 to contain the number of arguments | 42 // JumpToExternalReference expects r3 to contain the number of arguments |
42 // including the receiver and the extra arguments. | 43 // including the receiver and the extra arguments. |
43 __ addi(r3, r3, Operand(num_extra_args + 1)); | 44 __ addi(r3, r3, Operand(num_extra_args + 1)); |
44 | 45 |
45 __ JumpToExternalReference(ExternalReference(id, masm->isolate())); | 46 __ JumpToExternalReference(ExternalReference(id, masm->isolate())); |
46 } | 47 } |
47 | 48 |
48 | 49 |
49 // Load the built-in InternalArray function from the current context. | 50 // Load the built-in InternalArray function from the current context. |
50 static void GenerateLoadInternalArrayFunction(MacroAssembler* masm, | 51 static void GenerateLoadInternalArrayFunction(MacroAssembler* masm, |
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
115 __ LoadRoot(r5, Heap::kUndefinedValueRootIndex); | 116 __ LoadRoot(r5, Heap::kUndefinedValueRootIndex); |
116 ArrayConstructorStub stub(masm->isolate()); | 117 ArrayConstructorStub stub(masm->isolate()); |
117 __ TailCallStub(&stub); | 118 __ TailCallStub(&stub); |
118 } | 119 } |
119 | 120 |
120 | 121 |
121 // static | 122 // static |
122 void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) { | 123 void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) { |
123 // ----------- S t a t e ------------- | 124 // ----------- S t a t e ------------- |
124 // -- r3 : number of arguments | 125 // -- r3 : number of arguments |
| 126 // -- r4 : function |
| 127 // -- cp : context |
125 // -- lr : return address | 128 // -- lr : return address |
126 // -- sp[(argc - n) * 8] : arg[n] (zero-based) | 129 // -- sp[(argc - n) * 8] : arg[n] (zero-based) |
127 // -- sp[(argc + 1) * 8] : receiver | 130 // -- sp[(argc + 1) * 8] : receiver |
128 // ----------------------------------- | 131 // ----------------------------------- |
129 Condition const cond_done = (kind == MathMaxMinKind::kMin) ? lt : gt; | 132 Condition const cond_done = (kind == MathMaxMinKind::kMin) ? lt : gt; |
130 Heap::RootListIndex const root_index = | 133 Heap::RootListIndex const root_index = |
131 (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex | 134 (kind == MathMaxMinKind::kMin) ? Heap::kInfinityValueRootIndex |
132 : Heap::kMinusInfinityValueRootIndex; | 135 : Heap::kMinusInfinityValueRootIndex; |
133 DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? d2 : d1; | 136 DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? d2 : d1; |
134 | 137 |
135 // Load the accumulator with the default return value (either -Infinity or | 138 // Load the accumulator with the default return value (either -Infinity or |
136 // +Infinity), with the tagged value in r4 and the double value in d1. | 139 // +Infinity), with the tagged value in r8 and the double value in d1. |
137 __ LoadRoot(r4, root_index); | 140 __ LoadRoot(r8, root_index); |
138 __ lfd(d1, FieldMemOperand(r4, HeapNumber::kValueOffset)); | 141 __ lfd(d1, FieldMemOperand(r8, HeapNumber::kValueOffset)); |
139 | 142 |
140 // Setup state for loop | 143 // Setup state for loop |
141 // r5: address of arg[0] + kPointerSize | 144 // r5: address of arg[0] + kPointerSize |
142 // r6: number of slots to drop at exit (arguments + receiver) | 145 // r6: number of slots to drop at exit (arguments + receiver) |
143 __ ShiftLeftImm(r5, r3, Operand(kPointerSizeLog2)); | 146 __ addi(r7, r3, Operand(1)); |
144 __ add(r5, sp, r5); | |
145 __ addi(r6, r3, Operand(1)); | |
146 | 147 |
147 Label done_loop, loop; | 148 Label done_loop, loop; |
148 __ bind(&loop); | 149 __ bind(&loop); |
149 { | 150 { |
150 // Check if all parameters done. | 151 // Check if all parameters done. |
151 __ cmpl(r5, sp); | 152 __ subi(r3, r3, Operand(1)); |
152 __ ble(&done_loop); | 153 __ cmpi(r3, Operand::Zero()); |
| 154 __ blt(&done_loop); |
153 | 155 |
154 // Load the next parameter tagged value into r3. | 156 // Load the next parameter tagged value into r5. |
155 __ LoadPU(r3, MemOperand(r5, -kPointerSize)); | 157 __ ShiftLeftImm(r5, r3, Operand(kPointerSizeLog2)); |
| 158 __ LoadPX(r5, MemOperand(sp, r5)); |
156 | 159 |
157 // Load the double value of the parameter into d2, maybe converting the | 160 // Load the double value of the parameter into d2, maybe converting the |
158 // parameter to a number first using the ToNumber builtin if necessary. | 161 // parameter to a number first using the ToNumber builtin if necessary. |
159 Label convert, convert_smi, convert_number, done_convert; | 162 Label convert, convert_smi, convert_number, done_convert; |
160 __ bind(&convert); | 163 __ bind(&convert); |
161 __ JumpIfSmi(r3, &convert_smi); | 164 __ JumpIfSmi(r5, &convert_smi); |
162 __ LoadP(r7, FieldMemOperand(r3, HeapObject::kMapOffset)); | 165 __ LoadP(r6, FieldMemOperand(r5, HeapObject::kMapOffset)); |
163 __ JumpIfRoot(r7, Heap::kHeapNumberMapRootIndex, &convert_number); | 166 __ JumpIfRoot(r6, Heap::kHeapNumberMapRootIndex, &convert_number); |
164 { | 167 { |
165 // Parameter is not a Number, use the ToNumber builtin to convert it. | 168 // Parameter is not a Number, use the ToNumber builtin to convert it. |
166 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); | 169 FrameScope scope(masm, StackFrame::MANUAL); |
167 __ SmiTag(r6); | 170 __ PushStandardFrame(r4); |
168 __ Push(r4, r5, r6); | 171 __ SmiTag(r3); |
| 172 __ SmiTag(r7); |
| 173 __ Push(r3, r7, r8); |
| 174 __ mr(r3, r5); |
169 __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET); | 175 __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET); |
170 __ Pop(r4, r5, r6); | 176 __ mr(r5, r3); |
171 __ SmiUntag(r6); | 177 __ Pop(r3, r7, r8); |
172 { | 178 { |
173 // Restore the double accumulator value (d1). | 179 // Restore the double accumulator value (d1). |
174 Label done_restore; | 180 Label done_restore; |
175 __ SmiToDouble(d1, r4); | 181 __ SmiToDouble(d1, r8); |
176 __ JumpIfSmi(r4, &done_restore); | 182 __ JumpIfSmi(r8, &done_restore); |
177 __ lfd(d1, FieldMemOperand(r4, HeapNumber::kValueOffset)); | 183 __ lfd(d1, FieldMemOperand(r8, HeapNumber::kValueOffset)); |
178 __ bind(&done_restore); | 184 __ bind(&done_restore); |
179 } | 185 } |
| 186 __ SmiUntag(r7); |
| 187 __ SmiUntag(r3); |
| 188 // TODO(Jaideep): Add macro furtion for PopStandardFrame |
| 189 if (FLAG_enable_embedded_constant_pool) { |
| 190 __ Pop(r0, fp, kConstantPoolRegister, cp, r4); |
| 191 } else { |
| 192 __ Pop(r0, fp, cp, r4); |
| 193 } |
| 194 __ mtlr(r0); |
180 } | 195 } |
181 __ b(&convert); | 196 __ b(&convert); |
182 __ bind(&convert_number); | 197 __ bind(&convert_number); |
183 __ lfd(d2, FieldMemOperand(r3, HeapNumber::kValueOffset)); | 198 __ lfd(d2, FieldMemOperand(r5, HeapNumber::kValueOffset)); |
184 __ b(&done_convert); | 199 __ b(&done_convert); |
185 __ bind(&convert_smi); | 200 __ bind(&convert_smi); |
186 __ SmiToDouble(d2, r3); | 201 __ SmiToDouble(d2, r5); |
187 __ bind(&done_convert); | 202 __ bind(&done_convert); |
188 | 203 |
189 // Perform the actual comparison with the accumulator value on the left hand | 204 // Perform the actual comparison with the accumulator value on the left hand |
190 // side (d1) and the next parameter value on the right hand side (d2). | 205 // side (d1) and the next parameter value on the right hand side (d2). |
191 Label compare_nan, compare_swap; | 206 Label compare_nan, compare_swap; |
192 __ fcmpu(d1, d2); | 207 __ fcmpu(d1, d2); |
193 __ bunordered(&compare_nan); | 208 __ bunordered(&compare_nan); |
194 __ b(cond_done, &loop); | 209 __ b(cond_done, &loop); |
195 __ b(CommuteCondition(cond_done), &compare_swap); | 210 __ b(CommuteCondition(cond_done), &compare_swap); |
196 | 211 |
197 // Left and right hand side are equal, check for -0 vs. +0. | 212 // Left and right hand side are equal, check for -0 vs. +0. |
198 __ TestDoubleIsMinusZero(reg, r7, r8); | 213 __ TestDoubleIsMinusZero(reg, r9, r0); |
199 __ bne(&loop); | 214 __ bne(&loop); |
200 | 215 |
201 // Update accumulator. Result is on the right hand side. | 216 // Update accumulator. Result is on the right hand side. |
202 __ bind(&compare_swap); | 217 __ bind(&compare_swap); |
203 __ fmr(d1, d2); | 218 __ fmr(d1, d2); |
204 __ mr(r4, r3); | 219 __ mr(r8, r5); |
205 __ b(&loop); | 220 __ b(&loop); |
206 | 221 |
207 // At least one side is NaN, which means that the result will be NaN too. | 222 // At least one side is NaN, which means that the result will be NaN too. |
208 // We still need to visit the rest of the arguments. | 223 // We still need to visit the rest of the arguments. |
209 __ bind(&compare_nan); | 224 __ bind(&compare_nan); |
210 __ LoadRoot(r4, Heap::kNanValueRootIndex); | 225 __ LoadRoot(r8, Heap::kNanValueRootIndex); |
211 __ lfd(d1, FieldMemOperand(r4, HeapNumber::kValueOffset)); | 226 __ lfd(d1, FieldMemOperand(r8, HeapNumber::kValueOffset)); |
212 __ b(&loop); | 227 __ b(&loop); |
213 } | 228 } |
214 | 229 |
215 __ bind(&done_loop); | 230 __ bind(&done_loop); |
216 __ mr(r3, r4); | 231 __ mr(r3, r8); |
217 __ Drop(r6); | 232 __ Drop(r7); |
218 __ Ret(); | 233 __ Ret(); |
219 } | 234 } |
220 | 235 |
221 // static | 236 // static |
222 void Builtins::Generate_NumberConstructor(MacroAssembler* masm) { | 237 void Builtins::Generate_NumberConstructor(MacroAssembler* masm) { |
223 // ----------- S t a t e ------------- | 238 // ----------- S t a t e ------------- |
224 // -- r3 : number of arguments | 239 // -- r3 : number of arguments |
225 // -- r4 : constructor function | 240 // -- r4 : constructor function |
226 // -- lr : return address | 241 // -- lr : return address |
227 // -- sp[(argc - n - 1) * 4] : arg[n] (zero based) | 242 // -- sp[(argc - n - 1) * 4] : arg[n] (zero based) |
(...skipping 2692 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2920 __ bkpt(0); | 2935 __ bkpt(0); |
2921 } | 2936 } |
2922 } | 2937 } |
2923 | 2938 |
2924 | 2939 |
2925 #undef __ | 2940 #undef __ |
2926 } // namespace internal | 2941 } // namespace internal |
2927 } // namespace v8 | 2942 } // namespace v8 |
2928 | 2943 |
2929 #endif // V8_TARGET_ARCH_PPC | 2944 #endif // V8_TARGET_ARCH_PPC |
OLD | NEW |