OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 12 matching lines...) Expand all Loading... |
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | 27 |
28 #include "v8.h" | 28 #include "v8.h" |
29 | 29 |
30 #if defined(V8_TARGET_ARCH_IA32) | 30 #if defined(V8_TARGET_ARCH_IA32) |
31 | 31 |
32 #include "ia32/lithium-codegen-ia32.h" | 32 #include "ia32/lithium-codegen-ia32.h" |
33 #include "ic.h" | |
34 #include "code-stubs.h" | 33 #include "code-stubs.h" |
35 #include "deoptimizer.h" | 34 #include "deoptimizer.h" |
36 #include "stub-cache.h" | 35 #include "stub-cache.h" |
37 #include "codegen.h" | 36 #include "codegen.h" |
38 | 37 |
39 namespace v8 { | 38 namespace v8 { |
40 namespace internal { | 39 namespace internal { |
41 | 40 |
42 | 41 |
43 // When invoking builtins, we need to record the safepoint in the middle of | 42 // When invoking builtins, we need to record the safepoint in the middle of |
(...skipping 20 matching lines...) Expand all Loading... |
64 Safepoint::DeoptMode deopt_mode_; | 63 Safepoint::DeoptMode deopt_mode_; |
65 }; | 64 }; |
66 | 65 |
67 | 66 |
68 #define __ masm()-> | 67 #define __ masm()-> |
69 | 68 |
70 bool LCodeGen::GenerateCode() { | 69 bool LCodeGen::GenerateCode() { |
71 HPhase phase("Z_Code generation", chunk()); | 70 HPhase phase("Z_Code generation", chunk()); |
72 ASSERT(is_unused()); | 71 ASSERT(is_unused()); |
73 status_ = GENERATING; | 72 status_ = GENERATING; |
| 73 CpuFeatures::Scope scope(SSE2); |
74 | 74 |
75 CodeStub::GenerateFPStubs(); | 75 CodeStub::GenerateFPStubs(); |
76 | 76 |
77 // Open a frame scope to indicate that there is a frame on the stack. The | 77 // Open a frame scope to indicate that there is a frame on the stack. The |
78 // MANUAL indicates that the scope shouldn't actually generate code to set up | 78 // MANUAL indicates that the scope shouldn't actually generate code to set up |
79 // the frame (that is done in GeneratePrologue). | 79 // the frame (that is done in GeneratePrologue). |
80 FrameScope frame_scope(masm_, StackFrame::MANUAL); | 80 FrameScope frame_scope(masm_, StackFrame::MANUAL); |
81 | 81 |
82 dynamic_frame_alignment_ = info()->IsOptimizing() && | 82 dynamic_frame_alignment_ = (chunk()->num_double_slots() > 2 && |
83 ((chunk()->num_double_slots() > 2 && | 83 !chunk()->graph()->is_recursive()) || |
84 !chunk()->graph()->is_recursive()) || | 84 !info()->osr_ast_id().IsNone(); |
85 !info()->osr_ast_id().IsNone()); | |
86 | 85 |
87 return GeneratePrologue() && | 86 return GeneratePrologue() && |
88 GenerateBody() && | 87 GenerateBody() && |
89 GenerateDeferredCode() && | 88 GenerateDeferredCode() && |
90 GenerateJumpTable() && | |
91 GenerateSafepointTable(); | 89 GenerateSafepointTable(); |
92 } | 90 } |
93 | 91 |
94 | 92 |
95 void LCodeGen::FinishCode(Handle<Code> code) { | 93 void LCodeGen::FinishCode(Handle<Code> code) { |
96 ASSERT(is_done()); | 94 ASSERT(is_done()); |
97 code->set_stack_slots(GetStackSlotCount()); | 95 code->set_stack_slots(GetStackSlotCount()); |
98 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); | 96 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); |
99 PopulateDeoptimizationData(code); | 97 PopulateDeoptimizationData(code); |
100 if (!info()->IsStub()) { | 98 Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code); |
101 Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code); | |
102 } | |
103 } | 99 } |
104 | 100 |
105 | 101 |
106 void LCodeGen::Abort(const char* reason) { | 102 void LCodeGen::Abort(const char* reason) { |
107 info()->set_bailout_reason(reason); | 103 info()->set_bailout_reason(reason); |
108 status_ = ABORTED; | 104 status_ = ABORTED; |
109 } | 105 } |
110 | 106 |
111 | 107 |
112 void LCodeGen::Comment(const char* format, ...) { | 108 void LCodeGen::Comment(const char* format, ...) { |
(...skipping 10 matching lines...) Expand all Loading... |
123 size_t length = builder.position(); | 119 size_t length = builder.position(); |
124 Vector<char> copy = Vector<char>::New(length + 1); | 120 Vector<char> copy = Vector<char>::New(length + 1); |
125 memcpy(copy.start(), builder.Finalize(), copy.length()); | 121 memcpy(copy.start(), builder.Finalize(), copy.length()); |
126 masm()->RecordComment(copy.start()); | 122 masm()->RecordComment(copy.start()); |
127 } | 123 } |
128 | 124 |
129 | 125 |
130 bool LCodeGen::GeneratePrologue() { | 126 bool LCodeGen::GeneratePrologue() { |
131 ASSERT(is_generating()); | 127 ASSERT(is_generating()); |
132 | 128 |
133 if (info()->IsOptimizing()) { | 129 ProfileEntryHookStub::MaybeCallEntryHook(masm_); |
134 ProfileEntryHookStub::MaybeCallEntryHook(masm_); | |
135 | 130 |
136 #ifdef DEBUG | 131 #ifdef DEBUG |
137 if (strlen(FLAG_stop_at) > 0 && | 132 if (strlen(FLAG_stop_at) > 0 && |
138 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { | 133 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { |
139 __ int3(); | 134 __ int3(); |
140 } | 135 } |
141 #endif | 136 #endif |
142 | 137 |
143 // Strict mode functions and builtins need to replace the receiver | 138 // Strict mode functions and builtins need to replace the receiver |
144 // with undefined when called as functions (without an explicit | 139 // with undefined when called as functions (without an explicit |
145 // receiver object). ecx is zero for method calls and non-zero for | 140 // receiver object). ecx is zero for method calls and non-zero for |
146 // function calls. | 141 // function calls. |
147 if (!info_->is_classic_mode() || info_->is_native()) { | 142 if (!info_->is_classic_mode() || info_->is_native()) { |
148 Label ok; | 143 Label ok; |
149 __ test(ecx, Operand(ecx)); | 144 __ test(ecx, Operand(ecx)); |
150 __ j(zero, &ok, Label::kNear); | 145 __ j(zero, &ok, Label::kNear); |
151 // +1 for return address. | 146 // +1 for return address. |
152 int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize; | 147 int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize; |
153 __ mov(Operand(esp, receiver_offset), | 148 __ mov(Operand(esp, receiver_offset), |
154 Immediate(isolate()->factory()->undefined_value())); | 149 Immediate(isolate()->factory()->undefined_value())); |
155 __ bind(&ok); | 150 __ bind(&ok); |
156 } | 151 } |
157 | 152 |
158 if (dynamic_frame_alignment_) { | |
159 // Move state of dynamic frame alignment into edx. | |
160 __ mov(edx, Immediate(kNoAlignmentPadding)); | |
161 | 153 |
162 Label do_not_pad, align_loop; | 154 if (dynamic_frame_alignment_) { |
163 STATIC_ASSERT(kDoubleSize == 2 * kPointerSize); | 155 // Move state of dynamic frame alignment into edx. |
164 // Align esp + 4 to a multiple of 2 * kPointerSize. | 156 __ mov(edx, Immediate(kNoAlignmentPadding)); |
165 __ test(esp, Immediate(kPointerSize)); | |
166 __ j(not_zero, &do_not_pad, Label::kNear); | |
167 __ push(Immediate(0)); | |
168 __ mov(ebx, esp); | |
169 __ mov(edx, Immediate(kAlignmentPaddingPushed)); | |
170 // Copy arguments, receiver, and return address. | |
171 __ mov(ecx, Immediate(scope()->num_parameters() + 2)); | |
172 | 157 |
173 __ bind(&align_loop); | 158 Label do_not_pad, align_loop; |
174 __ mov(eax, Operand(ebx, 1 * kPointerSize)); | 159 STATIC_ASSERT(kDoubleSize == 2 * kPointerSize); |
175 __ mov(Operand(ebx, 0), eax); | 160 // Align esp + 4 to a multiple of 2 * kPointerSize. |
176 __ add(Operand(ebx), Immediate(kPointerSize)); | 161 __ test(esp, Immediate(kPointerSize)); |
177 __ dec(ecx); | 162 __ j(not_zero, &do_not_pad, Label::kNear); |
178 __ j(not_zero, &align_loop, Label::kNear); | 163 __ push(Immediate(0)); |
179 __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue)); | 164 __ mov(ebx, esp); |
180 __ bind(&do_not_pad); | 165 __ mov(edx, Immediate(kAlignmentPaddingPushed)); |
181 } | 166 // Copy arguments, receiver, and return address. |
| 167 __ mov(ecx, Immediate(scope()->num_parameters() + 2)); |
| 168 |
| 169 __ bind(&align_loop); |
| 170 __ mov(eax, Operand(ebx, 1 * kPointerSize)); |
| 171 __ mov(Operand(ebx, 0), eax); |
| 172 __ add(Operand(ebx), Immediate(kPointerSize)); |
| 173 __ dec(ecx); |
| 174 __ j(not_zero, &align_loop, Label::kNear); |
| 175 __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue)); |
| 176 __ bind(&do_not_pad); |
182 } | 177 } |
183 | 178 |
184 info()->set_prologue_offset(masm_->pc_offset()); | 179 info()->set_prologue_offset(masm_->pc_offset()); |
185 if (NeedsEagerFrame()) { | 180 __ push(ebp); // Caller's frame pointer. |
186 ASSERT(!frame_is_built_); | 181 __ mov(ebp, esp); |
187 frame_is_built_ = true; | 182 __ push(esi); // Callee's context. |
188 __ push(ebp); // Caller's frame pointer. | 183 __ push(edi); // Callee's JS function. |
189 __ mov(ebp, esp); | |
190 __ push(esi); // Callee's context. | |
191 if (info()->IsStub()) { | |
192 __ push(Immediate(Smi::FromInt(StackFrame::STUB))); | |
193 } else { | |
194 __ push(edi); // Callee's JS function. | |
195 } | |
196 } | |
197 | 184 |
198 if (info()->IsOptimizing() && | 185 if (dynamic_frame_alignment_ && FLAG_debug_code) { |
199 dynamic_frame_alignment_ && | |
200 FLAG_debug_code) { | |
201 __ test(esp, Immediate(kPointerSize)); | 186 __ test(esp, Immediate(kPointerSize)); |
202 __ Assert(zero, "frame is expected to be aligned"); | 187 __ Assert(zero, "frame is expected to be aligned"); |
203 } | 188 } |
204 | 189 |
205 // Reserve space for the stack slots needed by the code. | 190 // Reserve space for the stack slots needed by the code. |
206 int slots = GetStackSlotCount(); | 191 int slots = GetStackSlotCount(); |
207 ASSERT(slots != 0 || !info()->IsOptimizing()); | 192 ASSERT_GE(slots, 1); |
208 if (slots > 0) { | 193 if (slots == 1) { |
209 if (slots == 1) { | 194 if (dynamic_frame_alignment_) { |
210 if (dynamic_frame_alignment_) { | 195 __ push(edx); |
211 __ push(edx); | 196 } else { |
212 } else { | 197 __ push(Immediate(kNoAlignmentPadding)); |
213 __ push(Immediate(kNoAlignmentPadding)); | 198 } |
| 199 } else { |
| 200 if (FLAG_debug_code) { |
| 201 __ mov(Operand(eax), Immediate(slots)); |
| 202 Label loop; |
| 203 __ bind(&loop); |
| 204 __ push(Immediate(kSlotsZapValue)); |
| 205 __ dec(eax); |
| 206 __ j(not_zero, &loop); |
| 207 } else { |
| 208 __ sub(Operand(esp), Immediate(slots * kPointerSize)); |
| 209 #ifdef _MSC_VER |
| 210 // On windows, you may not access the stack more than one page below |
| 211 // the most recently mapped page. To make the allocated area randomly |
| 212 // accessible, we write to each page in turn (the value is irrelevant). |
| 213 const int kPageSize = 4 * KB; |
| 214 for (int offset = slots * kPointerSize - kPageSize; |
| 215 offset > 0; |
| 216 offset -= kPageSize) { |
| 217 __ mov(Operand(esp, offset), eax); |
214 } | 218 } |
| 219 #endif |
| 220 } |
| 221 |
| 222 // Store dynamic frame alignment state in the first local. |
| 223 if (dynamic_frame_alignment_) { |
| 224 __ mov(Operand(ebp, |
| 225 JavaScriptFrameConstants::kDynamicAlignmentStateOffset), |
| 226 edx); |
215 } else { | 227 } else { |
216 if (FLAG_debug_code) { | 228 __ mov(Operand(ebp, |
217 __ mov(Operand(eax), Immediate(slots)); | 229 JavaScriptFrameConstants::kDynamicAlignmentStateOffset), |
218 Label loop; | 230 Immediate(kNoAlignmentPadding)); |
219 __ bind(&loop); | |
220 __ push(Immediate(kSlotsZapValue)); | |
221 __ dec(eax); | |
222 __ j(not_zero, &loop); | |
223 } else { | |
224 __ sub(Operand(esp), Immediate(slots * kPointerSize)); | |
225 #ifdef _MSC_VER | |
226 // On windows, you may not access the stack more than one page below | |
227 // the most recently mapped page. To make the allocated area randomly | |
228 // accessible, we write to each page in turn (the value is irrelevant). | |
229 const int kPageSize = 4 * KB; | |
230 for (int offset = slots * kPointerSize - kPageSize; | |
231 offset > 0; | |
232 offset -= kPageSize) { | |
233 __ mov(Operand(esp, offset), eax); | |
234 } | |
235 #endif | |
236 } | |
237 | |
238 // Store dynamic frame alignment state in the first local. | |
239 if (dynamic_frame_alignment_) { | |
240 __ mov(Operand(ebp, | |
241 JavaScriptFrameConstants::kDynamicAlignmentStateOffset), | |
242 edx); | |
243 } else { | |
244 __ mov(Operand(ebp, | |
245 JavaScriptFrameConstants::kDynamicAlignmentStateOffset), | |
246 Immediate(kNoAlignmentPadding)); | |
247 } | |
248 } | 231 } |
249 } | 232 } |
250 | 233 |
251 // Possibly allocate a local context. | 234 // Possibly allocate a local context. |
252 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; | 235 int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; |
253 if (heap_slots > 0) { | 236 if (heap_slots > 0) { |
254 Comment(";;; Allocate local context"); | 237 Comment(";;; Allocate local context"); |
255 // Argument to NewContext is the function, which is still in edi. | 238 // Argument to NewContext is the function, which is still in edi. |
256 __ push(edi); | 239 __ push(edi); |
257 if (heap_slots <= FastNewContextStub::kMaximumSlots) { | 240 if (heap_slots <= FastNewContextStub::kMaximumSlots) { |
258 FastNewContextStub stub(heap_slots); | 241 FastNewContextStub stub(heap_slots); |
259 __ CallStub(&stub); | 242 __ CallStub(&stub); |
260 } else { | 243 } else { |
261 __ CallRuntime(Runtime::kNewFunctionContext, 1); | 244 __ CallRuntime(Runtime::kNewFunctionContext, 1); |
262 } | 245 } |
(...skipping 19 matching lines...) Expand all Loading... |
282 context_offset, | 265 context_offset, |
283 eax, | 266 eax, |
284 ebx, | 267 ebx, |
285 kDontSaveFPRegs); | 268 kDontSaveFPRegs); |
286 } | 269 } |
287 } | 270 } |
288 Comment(";;; End allocate local context"); | 271 Comment(";;; End allocate local context"); |
289 } | 272 } |
290 | 273 |
291 // Trace the call. | 274 // Trace the call. |
292 if (FLAG_trace && info()->IsOptimizing()) { | 275 if (FLAG_trace) { |
293 // We have not executed any compiled code yet, so esi still holds the | 276 // We have not executed any compiled code yet, so esi still holds the |
294 // incoming context. | 277 // incoming context. |
295 __ CallRuntime(Runtime::kTraceEnter, 0); | 278 __ CallRuntime(Runtime::kTraceEnter, 0); |
296 } | 279 } |
297 return !is_aborted(); | 280 return !is_aborted(); |
298 } | 281 } |
299 | 282 |
300 | 283 |
301 bool LCodeGen::GenerateBody() { | 284 bool LCodeGen::GenerateBody() { |
302 ASSERT(is_generating()); | 285 ASSERT(is_generating()); |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
336 } | 319 } |
337 } | 320 } |
338 instr->CompileToNative(this); | 321 instr->CompileToNative(this); |
339 } | 322 } |
340 } | 323 } |
341 EnsureSpaceForLazyDeopt(); | 324 EnsureSpaceForLazyDeopt(); |
342 return !is_aborted(); | 325 return !is_aborted(); |
343 } | 326 } |
344 | 327 |
345 | 328 |
346 bool LCodeGen::GenerateJumpTable() { | |
347 Label needs_frame_not_call; | |
348 Label needs_frame_is_call; | |
349 for (int i = 0; i < jump_table_.length(); i++) { | |
350 __ bind(&jump_table_[i].label); | |
351 Address entry = jump_table_[i].address; | |
352 if (jump_table_[i].needs_frame) { | |
353 __ push(Immediate(ExternalReference::ForDeoptEntry(entry))); | |
354 if (jump_table_[i].is_lazy_deopt) { | |
355 if (needs_frame_is_call.is_bound()) { | |
356 __ jmp(&needs_frame_is_call); | |
357 } else { | |
358 __ bind(&needs_frame_is_call); | |
359 __ push(esi); | |
360 // This variant of deopt can only be used with stubs. Since we don't | |
361 // have a function pointer to install in the stack frame that we're | |
362 // building, install a special marker there instead. | |
363 ASSERT(info()->IsStub()); | |
364 __ push(Immediate(Smi::FromInt(StackFrame::STUB))); | |
365 // Push a PC inside the function so that the deopt code can find where | |
366 // the deopt comes from. It doesn't have to be the precise return | |
367 // address of a "calling" LAZY deopt, it only has to be somewhere | |
368 // inside the code body. | |
369 Label push_approx_pc; | |
370 __ call(&push_approx_pc); | |
371 __ bind(&push_approx_pc); | |
372 // Push the continuation which was stashed were the ebp should | |
373 // be. Replace it with the saved ebp. | |
374 __ push(MemOperand(esp, 3 * kPointerSize)); | |
375 __ mov(MemOperand(esp, 4 * kPointerSize), ebp); | |
376 __ lea(ebp, MemOperand(esp, 4 * kPointerSize)); | |
377 __ ret(0); // Call the continuation without clobbering registers. | |
378 } | |
379 } else { | |
380 if (needs_frame_not_call.is_bound()) { | |
381 __ jmp(&needs_frame_not_call); | |
382 } else { | |
383 __ bind(&needs_frame_not_call); | |
384 __ push(esi); | |
385 // This variant of deopt can only be used with stubs. Since we don't | |
386 // have a function pointer to install in the stack frame that we're | |
387 // building, install a special marker there instead. | |
388 ASSERT(info()->IsStub()); | |
389 __ push(Immediate(Smi::FromInt(StackFrame::STUB))); | |
390 // Push the continuation which was stashed were the ebp should | |
391 // be. Replace it with the saved ebp. | |
392 __ push(MemOperand(esp, 2 * kPointerSize)); | |
393 __ mov(MemOperand(esp, 3 * kPointerSize), ebp); | |
394 __ lea(ebp, MemOperand(esp, 3 * kPointerSize)); | |
395 __ ret(0); // Call the continuation without clobbering registers. | |
396 } | |
397 } | |
398 } else { | |
399 if (jump_table_[i].is_lazy_deopt) { | |
400 __ call(entry, RelocInfo::RUNTIME_ENTRY); | |
401 } else { | |
402 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); | |
403 } | |
404 } | |
405 } | |
406 return !is_aborted(); | |
407 } | |
408 | |
409 | |
410 bool LCodeGen::GenerateDeferredCode() { | 329 bool LCodeGen::GenerateDeferredCode() { |
411 ASSERT(is_generating()); | 330 ASSERT(is_generating()); |
412 if (deferred_.length() > 0) { | 331 if (deferred_.length() > 0) { |
413 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { | 332 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { |
414 LDeferredCode* code = deferred_[i]; | 333 LDeferredCode* code = deferred_[i]; |
415 __ bind(code->entry()); | 334 __ bind(code->entry()); |
416 if (NeedsDeferredFrame()) { | |
417 Comment(";;; Deferred build frame", | |
418 code->instruction_index(), | |
419 code->instr()->Mnemonic()); | |
420 ASSERT(!frame_is_built_); | |
421 ASSERT(info()->IsStub()); | |
422 frame_is_built_ = true; | |
423 // Build the frame in such a way that esi isn't trashed. | |
424 __ push(ebp); // Caller's frame pointer. | |
425 __ push(Operand(ebp, StandardFrameConstants::kContextOffset)); | |
426 __ push(Immediate(Smi::FromInt(StackFrame::STUB))); | |
427 __ lea(ebp, Operand(esp, 2 * kPointerSize)); | |
428 } | |
429 Comment(";;; Deferred code @%d: %s.", | 335 Comment(";;; Deferred code @%d: %s.", |
430 code->instruction_index(), | 336 code->instruction_index(), |
431 code->instr()->Mnemonic()); | 337 code->instr()->Mnemonic()); |
432 code->Generate(); | 338 code->Generate(); |
433 if (NeedsDeferredFrame()) { | |
434 Comment(";;; Deferred destroy frame", | |
435 code->instruction_index(), | |
436 code->instr()->Mnemonic()); | |
437 ASSERT(frame_is_built_); | |
438 frame_is_built_ = false; | |
439 __ mov(esp, ebp); | |
440 __ pop(ebp); | |
441 } | |
442 __ jmp(code->exit()); | 339 __ jmp(code->exit()); |
443 } | 340 } |
444 } | 341 } |
445 | 342 |
446 // Deferred code is the last part of the instruction sequence. Mark | 343 // Deferred code is the last part of the instruction sequence. Mark |
447 // the generated code as done unless we bailed out. | 344 // the generated code as done unless we bailed out. |
448 if (!is_aborted()) status_ = DONE; | 345 if (!is_aborted()) status_ = DONE; |
449 return !is_aborted(); | 346 return !is_aborted(); |
450 } | 347 } |
451 | 348 |
452 | 349 |
453 bool LCodeGen::GenerateSafepointTable() { | 350 bool LCodeGen::GenerateSafepointTable() { |
454 ASSERT(is_done()); | 351 ASSERT(is_done()); |
455 if (!info()->IsStub()) { | |
456 // For lazy deoptimization we need space to patch a call after every call. | |
457 // Ensure there is always space for such patching, even if the code ends | |
458 // in a call. | |
459 int target_offset = masm()->pc_offset() + Deoptimizer::patch_size(); | |
460 while (masm()->pc_offset() < target_offset) { | |
461 masm()->nop(); | |
462 } | |
463 } | |
464 safepoints_.Emit(masm(), GetStackSlotCount()); | 352 safepoints_.Emit(masm(), GetStackSlotCount()); |
465 return !is_aborted(); | 353 return !is_aborted(); |
466 } | 354 } |
467 | 355 |
468 | 356 |
469 Register LCodeGen::ToRegister(int index) const { | 357 Register LCodeGen::ToRegister(int index) const { |
470 return Register::FromAllocationIndex(index); | 358 return Register::FromAllocationIndex(index); |
471 } | 359 } |
472 | 360 |
473 | 361 |
474 XMMRegister LCodeGen::ToDoubleRegister(int index) const { | 362 XMMRegister LCodeGen::ToDoubleRegister(int index) const { |
475 return XMMRegister::FromAllocationIndex(index); | 363 return XMMRegister::FromAllocationIndex(index); |
476 } | 364 } |
477 | 365 |
478 | 366 |
479 bool LCodeGen::IsX87TopOfStack(LOperand* op) const { | |
480 return op->IsDoubleRegister(); | |
481 } | |
482 | |
483 | |
484 Register LCodeGen::ToRegister(LOperand* op) const { | 367 Register LCodeGen::ToRegister(LOperand* op) const { |
485 ASSERT(op->IsRegister()); | 368 ASSERT(op->IsRegister()); |
486 return ToRegister(op->index()); | 369 return ToRegister(op->index()); |
487 } | 370 } |
488 | 371 |
489 | 372 |
490 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { | 373 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { |
491 ASSERT(op->IsDoubleRegister()); | 374 ASSERT(op->IsDoubleRegister()); |
492 return ToDoubleRegister(op->index()); | 375 return ToDoubleRegister(op->index()); |
493 } | 376 } |
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
559 // arguments index points to the first element of a sequence of tagged | 442 // arguments index points to the first element of a sequence of tagged |
560 // values on the stack that represent the arguments. This needs to be | 443 // values on the stack that represent the arguments. This needs to be |
561 // kept in sync with the LArgumentsElements implementation. | 444 // kept in sync with the LArgumentsElements implementation. |
562 *arguments_index = -environment->parameter_count(); | 445 *arguments_index = -environment->parameter_count(); |
563 *arguments_count = environment->parameter_count(); | 446 *arguments_count = environment->parameter_count(); |
564 | 447 |
565 WriteTranslation(environment->outer(), | 448 WriteTranslation(environment->outer(), |
566 translation, | 449 translation, |
567 arguments_index, | 450 arguments_index, |
568 arguments_count); | 451 arguments_count); |
569 bool has_closure_id = !info()->closure().is_null() && | 452 int closure_id = *info()->closure() != *environment->closure() |
570 *info()->closure() != *environment->closure(); | |
571 int closure_id = has_closure_id | |
572 ? DefineDeoptimizationLiteral(environment->closure()) | 453 ? DefineDeoptimizationLiteral(environment->closure()) |
573 : Translation::kSelfLiteralId; | 454 : Translation::kSelfLiteralId; |
574 switch (environment->frame_type()) { | 455 switch (environment->frame_type()) { |
575 case JS_FUNCTION: | 456 case JS_FUNCTION: |
576 translation->BeginJSFrame(environment->ast_id(), closure_id, height); | 457 translation->BeginJSFrame(environment->ast_id(), closure_id, height); |
577 break; | 458 break; |
578 case JS_CONSTRUCT: | 459 case JS_CONSTRUCT: |
579 translation->BeginConstructStubFrame(closure_id, translation_size); | 460 translation->BeginConstructStubFrame(closure_id, translation_size); |
580 break; | 461 break; |
581 case JS_GETTER: | 462 case JS_GETTER: |
582 ASSERT(translation_size == 1); | 463 ASSERT(translation_size == 1); |
583 ASSERT(height == 0); | 464 ASSERT(height == 0); |
584 translation->BeginGetterStubFrame(closure_id); | 465 translation->BeginGetterStubFrame(closure_id); |
585 break; | 466 break; |
586 case JS_SETTER: | 467 case JS_SETTER: |
587 ASSERT(translation_size == 2); | 468 ASSERT(translation_size == 2); |
588 ASSERT(height == 0); | 469 ASSERT(height == 0); |
589 translation->BeginSetterStubFrame(closure_id); | 470 translation->BeginSetterStubFrame(closure_id); |
590 break; | 471 break; |
591 case ARGUMENTS_ADAPTOR: | 472 case ARGUMENTS_ADAPTOR: |
592 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); | 473 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); |
593 break; | 474 break; |
594 case STUB: | |
595 translation->BeginCompiledStubFrame(); | |
596 break; | |
597 default: | |
598 UNREACHABLE(); | |
599 } | 475 } |
600 | 476 |
601 // Inlined frames which push their arguments cause the index to be | 477 // Inlined frames which push their arguments cause the index to be |
602 // bumped and another stack area to be used for materialization. | 478 // bumped and another stack area to be used for materialization. |
603 if (environment->entry() != NULL && | 479 if (environment->entry() != NULL && |
604 environment->entry()->arguments_pushed()) { | 480 environment->entry()->arguments_pushed()) { |
605 *arguments_index = *arguments_index < 0 | 481 *arguments_index = *arguments_index < 0 |
606 ? GetStackSlotCount() | 482 ? GetStackSlotCount() |
607 : *arguments_index + *arguments_count; | 483 : *arguments_index + *arguments_count; |
608 *arguments_count = environment->entry()->arguments_count() + 1; | 484 *arguments_count = environment->entry()->arguments_count() + 1; |
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
723 int argc, | 599 int argc, |
724 LInstruction* instr) { | 600 LInstruction* instr) { |
725 ASSERT(instr != NULL); | 601 ASSERT(instr != NULL); |
726 ASSERT(instr->HasPointerMap()); | 602 ASSERT(instr->HasPointerMap()); |
727 LPointerMap* pointers = instr->pointer_map(); | 603 LPointerMap* pointers = instr->pointer_map(); |
728 RecordPosition(pointers->position()); | 604 RecordPosition(pointers->position()); |
729 | 605 |
730 __ CallRuntime(fun, argc); | 606 __ CallRuntime(fun, argc); |
731 | 607 |
732 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); | 608 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); |
733 | |
734 ASSERT(info()->is_calling()); | |
735 } | 609 } |
736 | 610 |
737 | 611 |
738 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, | 612 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, |
739 int argc, | 613 int argc, |
740 LInstruction* instr, | 614 LInstruction* instr, |
741 LOperand* context) { | 615 LOperand* context) { |
742 if (context->IsRegister()) { | 616 if (context->IsRegister()) { |
743 if (!ToRegister(context).is(esi)) { | 617 if (!ToRegister(context).is(esi)) { |
744 __ mov(esi, ToRegister(context)); | 618 __ mov(esi, ToRegister(context)); |
745 } | 619 } |
746 } else if (context->IsStackSlot()) { | 620 } else if (context->IsStackSlot()) { |
747 __ mov(esi, ToOperand(context)); | 621 __ mov(esi, ToOperand(context)); |
748 } else if (context->IsConstantOperand()) { | 622 } else if (context->IsConstantOperand()) { |
749 HConstant* constant = | 623 HConstant* constant = |
750 chunk_->LookupConstant(LConstantOperand::cast(context)); | 624 chunk_->LookupConstant(LConstantOperand::cast(context)); |
751 __ LoadHeapObject(esi, Handle<Context>::cast(constant->handle())); | 625 __ LoadHeapObject(esi, Handle<Context>::cast(constant->handle())); |
752 } else { | 626 } else { |
753 UNREACHABLE(); | 627 UNREACHABLE(); |
754 } | 628 } |
755 | 629 |
756 __ CallRuntimeSaveDoubles(id); | 630 __ CallRuntimeSaveDoubles(id); |
757 RecordSafepointWithRegisters( | 631 RecordSafepointWithRegisters( |
758 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); | 632 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); |
759 | |
760 ASSERT(info()->is_calling()); | |
761 } | 633 } |
762 | 634 |
763 | 635 |
764 void LCodeGen::RegisterEnvironmentForDeoptimization( | 636 void LCodeGen::RegisterEnvironmentForDeoptimization( |
765 LEnvironment* environment, Safepoint::DeoptMode mode) { | 637 LEnvironment* environment, Safepoint::DeoptMode mode) { |
766 if (!environment->HasBeenRegistered()) { | 638 if (!environment->HasBeenRegistered()) { |
767 // Physical stack frame layout: | 639 // Physical stack frame layout: |
768 // -x ............. -4 0 ..................................... y | 640 // -x ............. -4 0 ..................................... y |
769 // [incoming arguments] [spill slots] [pushed outgoing arguments] | 641 // [incoming arguments] [spill slots] [pushed outgoing arguments] |
770 | 642 |
(...skipping 25 matching lines...) Expand all Loading... |
796 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); | 668 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); |
797 deoptimizations_.Add(environment, zone()); | 669 deoptimizations_.Add(environment, zone()); |
798 } | 670 } |
799 } | 671 } |
800 | 672 |
801 | 673 |
802 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { | 674 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { |
803 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 675 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
804 ASSERT(environment->HasBeenRegistered()); | 676 ASSERT(environment->HasBeenRegistered()); |
805 int id = environment->deoptimization_index(); | 677 int id = environment->deoptimization_index(); |
806 ASSERT(info()->IsOptimizing() || info()->IsStub()); | 678 Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER); |
807 Deoptimizer::BailoutType bailout_type = frame_is_built_ | |
808 ? Deoptimizer::EAGER | |
809 : Deoptimizer::LAZY; | |
810 Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type); | |
811 if (entry == NULL) { | 679 if (entry == NULL) { |
812 Abort("bailout was not prepared"); | 680 Abort("bailout was not prepared"); |
813 return; | 681 return; |
814 } | 682 } |
815 | 683 |
816 if (FLAG_deopt_every_n_times != 0) { | 684 if (FLAG_deopt_every_n_times != 0) { |
817 Handle<SharedFunctionInfo> shared(info_->shared_info()); | 685 Handle<SharedFunctionInfo> shared(info_->shared_info()); |
818 Label no_deopt; | 686 Label no_deopt; |
819 __ pushfd(); | 687 __ pushfd(); |
820 __ push(eax); | 688 __ push(eax); |
(...skipping 13 matching lines...) Expand all Loading... |
834 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); | 702 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); |
835 | 703 |
836 __ bind(&no_deopt); | 704 __ bind(&no_deopt); |
837 __ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset), | 705 __ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset), |
838 eax); | 706 eax); |
839 __ pop(ebx); | 707 __ pop(ebx); |
840 __ pop(eax); | 708 __ pop(eax); |
841 __ popfd(); | 709 __ popfd(); |
842 } | 710 } |
843 | 711 |
844 ASSERT(info()->IsStub() || frame_is_built_); | |
845 bool lazy_deopt_needed = info()->IsStub(); | |
846 if (cc == no_condition) { | 712 if (cc == no_condition) { |
847 if (FLAG_trap_on_deopt) __ int3(); | 713 if (FLAG_trap_on_deopt) __ int3(); |
848 if (lazy_deopt_needed) { | 714 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); |
849 __ call(entry, RelocInfo::RUNTIME_ENTRY); | |
850 } else { | |
851 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); | |
852 } | |
853 } else { | 715 } else { |
854 Label done; | |
855 if (FLAG_trap_on_deopt) { | 716 if (FLAG_trap_on_deopt) { |
| 717 Label done; |
856 __ j(NegateCondition(cc), &done, Label::kNear); | 718 __ j(NegateCondition(cc), &done, Label::kNear); |
857 __ int3(); | 719 __ int3(); |
| 720 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); |
| 721 __ bind(&done); |
| 722 } else { |
| 723 __ j(cc, entry, RelocInfo::RUNTIME_ENTRY); |
858 } | 724 } |
859 if (!lazy_deopt_needed && frame_is_built_) { | |
860 if (FLAG_trap_on_deopt) { | |
861 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); | |
862 } else { | |
863 __ j(cc, entry, RelocInfo::RUNTIME_ENTRY); | |
864 } | |
865 } else { | |
866 // We often have several deopts to the same entry, reuse the last | |
867 // jump entry if this is the case. | |
868 if (jump_table_.is_empty() || | |
869 jump_table_.last().address != entry || | |
870 jump_table_.last().needs_frame != !frame_is_built_ || | |
871 jump_table_.last().is_lazy_deopt != lazy_deopt_needed) { | |
872 JumpTableEntry table_entry(entry, !frame_is_built_, lazy_deopt_needed); | |
873 jump_table_.Add(table_entry, zone()); | |
874 } | |
875 if (FLAG_trap_on_deopt) { | |
876 __ jmp(&jump_table_.last().label); | |
877 } else { | |
878 __ j(cc, &jump_table_.last().label); | |
879 } | |
880 } | |
881 __ bind(&done); | |
882 } | 725 } |
883 } | 726 } |
884 | 727 |
885 | 728 |
886 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { | 729 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { |
887 int length = deoptimizations_.length(); | 730 int length = deoptimizations_.length(); |
888 if (length == 0) return; | 731 if (length == 0) return; |
889 Handle<DeoptimizationInputData> data = | 732 Handle<DeoptimizationInputData> data = |
890 factory()->NewDeoptimizationInputData(length, TENURED); | 733 factory()->NewDeoptimizationInputData(length, TENURED); |
891 | 734 |
(...skipping 680 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1572 // Use xor to produce +0.0 in a fast and compact way, but avoid to | 1415 // Use xor to produce +0.0 in a fast and compact way, but avoid to |
1573 // do so if the constant is -0.0. | 1416 // do so if the constant is -0.0. |
1574 if (BitCast<uint64_t, double>(v) == 0) { | 1417 if (BitCast<uint64_t, double>(v) == 0) { |
1575 __ xorps(res, res); | 1418 __ xorps(res, res); |
1576 } else { | 1419 } else { |
1577 Register temp = ToRegister(instr->temp()); | 1420 Register temp = ToRegister(instr->temp()); |
1578 uint64_t int_val = BitCast<uint64_t, double>(v); | 1421 uint64_t int_val = BitCast<uint64_t, double>(v); |
1579 int32_t lower = static_cast<int32_t>(int_val); | 1422 int32_t lower = static_cast<int32_t>(int_val); |
1580 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); | 1423 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); |
1581 if (CpuFeatures::IsSupported(SSE4_1)) { | 1424 if (CpuFeatures::IsSupported(SSE4_1)) { |
1582 CpuFeatures::Scope scope1(SSE2); | 1425 CpuFeatures::Scope scope(SSE4_1); |
1583 CpuFeatures::Scope scope2(SSE4_1); | |
1584 if (lower != 0) { | 1426 if (lower != 0) { |
1585 __ Set(temp, Immediate(lower)); | 1427 __ Set(temp, Immediate(lower)); |
1586 __ movd(res, Operand(temp)); | 1428 __ movd(res, Operand(temp)); |
1587 __ Set(temp, Immediate(upper)); | 1429 __ Set(temp, Immediate(upper)); |
1588 __ pinsrd(res, Operand(temp), 1); | 1430 __ pinsrd(res, Operand(temp), 1); |
1589 } else { | 1431 } else { |
1590 __ xorps(res, res); | 1432 __ xorps(res, res); |
1591 __ Set(temp, Immediate(upper)); | 1433 __ Set(temp, Immediate(upper)); |
1592 __ pinsrd(res, Operand(temp), 1); | 1434 __ pinsrd(res, Operand(temp), 1); |
1593 } | 1435 } |
1594 } else { | 1436 } else { |
1595 CpuFeatures::Scope scope(SSE2); | |
1596 __ Set(temp, Immediate(upper)); | 1437 __ Set(temp, Immediate(upper)); |
1597 __ movd(res, Operand(temp)); | 1438 __ movd(res, Operand(temp)); |
1598 __ psllq(res, 32); | 1439 __ psllq(res, 32); |
1599 if (lower != 0) { | 1440 if (lower != 0) { |
1600 __ Set(temp, Immediate(lower)); | 1441 __ Set(temp, Immediate(lower)); |
1601 __ movd(xmm0, Operand(temp)); | 1442 __ movd(xmm0, Operand(temp)); |
1602 __ por(res, xmm0); | 1443 __ por(res, xmm0); |
1603 } | 1444 } |
1604 } | 1445 } |
1605 } | 1446 } |
(...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1739 __ add(ToRegister(left), ToOperand(right)); | 1580 __ add(ToRegister(left), ToOperand(right)); |
1740 } | 1581 } |
1741 | 1582 |
1742 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { | 1583 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { |
1743 DeoptimizeIf(overflow, instr->environment()); | 1584 DeoptimizeIf(overflow, instr->environment()); |
1744 } | 1585 } |
1745 } | 1586 } |
1746 | 1587 |
1747 | 1588 |
1748 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { | 1589 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { |
1749 CpuFeatures::Scope scope(SSE2); | |
1750 LOperand* left = instr->left(); | 1590 LOperand* left = instr->left(); |
1751 LOperand* right = instr->right(); | 1591 LOperand* right = instr->right(); |
1752 ASSERT(left->Equals(instr->result())); | 1592 ASSERT(left->Equals(instr->result())); |
1753 HMathMinMax::Operation operation = instr->hydrogen()->operation(); | 1593 HMathMinMax::Operation operation = instr->hydrogen()->operation(); |
1754 if (instr->hydrogen()->representation().IsInteger32()) { | 1594 if (instr->hydrogen()->representation().IsInteger32()) { |
1755 Label return_left; | 1595 Label return_left; |
1756 Condition condition = (operation == HMathMinMax::kMathMin) | 1596 Condition condition = (operation == HMathMinMax::kMathMin) |
1757 ? less_equal | 1597 ? less_equal |
1758 : greater_equal; | 1598 : greater_equal; |
1759 if (right->IsConstantOperand()) { | 1599 if (right->IsConstantOperand()) { |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1801 __ j(parity_even, &return_left, Label::kNear); // left == NaN. | 1641 __ j(parity_even, &return_left, Label::kNear); // left == NaN. |
1802 __ bind(&return_right); | 1642 __ bind(&return_right); |
1803 __ movsd(left_reg, right_reg); | 1643 __ movsd(left_reg, right_reg); |
1804 | 1644 |
1805 __ bind(&return_left); | 1645 __ bind(&return_left); |
1806 } | 1646 } |
1807 } | 1647 } |
1808 | 1648 |
1809 | 1649 |
1810 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { | 1650 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { |
1811 CpuFeatures::Scope scope(SSE2); | |
1812 XMMRegister left = ToDoubleRegister(instr->left()); | 1651 XMMRegister left = ToDoubleRegister(instr->left()); |
1813 XMMRegister right = ToDoubleRegister(instr->right()); | 1652 XMMRegister right = ToDoubleRegister(instr->right()); |
1814 XMMRegister result = ToDoubleRegister(instr->result()); | 1653 XMMRegister result = ToDoubleRegister(instr->result()); |
1815 // Modulo uses a fixed result register. | 1654 // Modulo uses a fixed result register. |
1816 ASSERT(instr->op() == Token::MOD || left.is(result)); | 1655 ASSERT(instr->op() == Token::MOD || left.is(result)); |
1817 switch (instr->op()) { | 1656 switch (instr->op()) { |
1818 case Token::ADD: | 1657 case Token::ADD: |
1819 __ addsd(left, right); | 1658 __ addsd(left, right); |
1820 break; | 1659 break; |
1821 case Token::SUB: | 1660 case Token::SUB: |
1822 __ subsd(left, right); | 1661 __ subsd(left, right); |
1823 break; | 1662 break; |
1824 case Token::MUL: | 1663 case Token::MUL: |
1825 __ mulsd(left, right); | 1664 __ mulsd(left, right); |
1826 break; | 1665 break; |
1827 case Token::DIV: | 1666 case Token::DIV: |
1828 __ divsd(left, right); | 1667 __ divsd(left, right); |
1829 break; | 1668 break; |
1830 case Token::MOD: { | 1669 case Token::MOD: { |
1831 // Pass two doubles as arguments on the stack. | 1670 // Pass two doubles as arguments on the stack. |
1832 __ PrepareCallCFunction(4, eax); | 1671 __ PrepareCallCFunction(4, eax); |
1833 __ movdbl(Operand(esp, 0 * kDoubleSize), left); | 1672 __ movdbl(Operand(esp, 0 * kDoubleSize), left); |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1886 } else { | 1725 } else { |
1887 __ j(cc, chunk_->GetAssemblyLabel(left_block)); | 1726 __ j(cc, chunk_->GetAssemblyLabel(left_block)); |
1888 __ jmp(chunk_->GetAssemblyLabel(right_block)); | 1727 __ jmp(chunk_->GetAssemblyLabel(right_block)); |
1889 } | 1728 } |
1890 } | 1729 } |
1891 | 1730 |
1892 | 1731 |
1893 void LCodeGen::DoBranch(LBranch* instr) { | 1732 void LCodeGen::DoBranch(LBranch* instr) { |
1894 int true_block = chunk_->LookupDestination(instr->true_block_id()); | 1733 int true_block = chunk_->LookupDestination(instr->true_block_id()); |
1895 int false_block = chunk_->LookupDestination(instr->false_block_id()); | 1734 int false_block = chunk_->LookupDestination(instr->false_block_id()); |
1896 CpuFeatures::Scope scope(SSE2); | |
1897 | 1735 |
1898 Representation r = instr->hydrogen()->value()->representation(); | 1736 Representation r = instr->hydrogen()->value()->representation(); |
1899 if (r.IsInteger32()) { | 1737 if (r.IsInteger32()) { |
1900 Register reg = ToRegister(instr->value()); | 1738 Register reg = ToRegister(instr->value()); |
1901 __ test(reg, Operand(reg)); | 1739 __ test(reg, Operand(reg)); |
1902 EmitBranch(true_block, false_block, not_zero); | 1740 EmitBranch(true_block, false_block, not_zero); |
1903 } else if (r.IsDouble()) { | 1741 } else if (r.IsDouble()) { |
1904 XMMRegister reg = ToDoubleRegister(instr->value()); | 1742 XMMRegister reg = ToDoubleRegister(instr->value()); |
1905 __ xorps(xmm0, xmm0); | 1743 __ xorps(xmm0, xmm0); |
1906 __ ucomisd(reg, xmm0); | 1744 __ ucomisd(reg, xmm0); |
(...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2046 return cond; | 1884 return cond; |
2047 } | 1885 } |
2048 | 1886 |
2049 | 1887 |
2050 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { | 1888 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { |
2051 LOperand* left = instr->left(); | 1889 LOperand* left = instr->left(); |
2052 LOperand* right = instr->right(); | 1890 LOperand* right = instr->right(); |
2053 int false_block = chunk_->LookupDestination(instr->false_block_id()); | 1891 int false_block = chunk_->LookupDestination(instr->false_block_id()); |
2054 int true_block = chunk_->LookupDestination(instr->true_block_id()); | 1892 int true_block = chunk_->LookupDestination(instr->true_block_id()); |
2055 Condition cc = TokenToCondition(instr->op(), instr->is_double()); | 1893 Condition cc = TokenToCondition(instr->op(), instr->is_double()); |
2056 CpuFeatures::Scope scope(SSE2); | |
2057 | 1894 |
2058 if (left->IsConstantOperand() && right->IsConstantOperand()) { | 1895 if (left->IsConstantOperand() && right->IsConstantOperand()) { |
2059 // We can statically evaluate the comparison. | 1896 // We can statically evaluate the comparison. |
2060 double left_val = ToDouble(LConstantOperand::cast(left)); | 1897 double left_val = ToDouble(LConstantOperand::cast(left)); |
2061 double right_val = ToDouble(LConstantOperand::cast(right)); | 1898 double right_val = ToDouble(LConstantOperand::cast(right)); |
2062 int next_block = | 1899 int next_block = |
2063 EvalComparison(instr->op(), left_val, right_val) ? true_block | 1900 EvalComparison(instr->op(), left_val, right_val) ? true_block |
2064 : false_block; | 1901 : false_block; |
2065 EmitGoto(next_block); | 1902 EmitGoto(next_block); |
2066 } else { | 1903 } else { |
(...skipping 489 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2556 __ j(condition, &true_value, Label::kNear); | 2393 __ j(condition, &true_value, Label::kNear); |
2557 __ mov(ToRegister(instr->result()), factory()->false_value()); | 2394 __ mov(ToRegister(instr->result()), factory()->false_value()); |
2558 __ jmp(&done, Label::kNear); | 2395 __ jmp(&done, Label::kNear); |
2559 __ bind(&true_value); | 2396 __ bind(&true_value); |
2560 __ mov(ToRegister(instr->result()), factory()->true_value()); | 2397 __ mov(ToRegister(instr->result()), factory()->true_value()); |
2561 __ bind(&done); | 2398 __ bind(&done); |
2562 } | 2399 } |
2563 | 2400 |
2564 | 2401 |
2565 void LCodeGen::DoReturn(LReturn* instr) { | 2402 void LCodeGen::DoReturn(LReturn* instr) { |
2566 if (FLAG_trace && info()->IsOptimizing()) { | 2403 if (FLAG_trace) { |
2567 // Preserve the return value on the stack and rely on the runtime call | 2404 // Preserve the return value on the stack and rely on the runtime call |
2568 // to return the value in the same register. We're leaving the code | 2405 // to return the value in the same register. We're leaving the code |
2569 // managed by the register allocator and tearing down the frame, it's | 2406 // managed by the register allocator and tearing down the frame, it's |
2570 // safe to write to the context register. | 2407 // safe to write to the context register. |
2571 __ push(eax); | 2408 __ push(eax); |
2572 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); | 2409 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); |
2573 __ CallRuntime(Runtime::kTraceExit, 1); | 2410 __ CallRuntime(Runtime::kTraceExit, 1); |
2574 } | 2411 } |
2575 if (dynamic_frame_alignment_) { | 2412 if (dynamic_frame_alignment_) { |
2576 // Fetch the state of the dynamic frame alignment. | 2413 // Fetch the state of the dynamic frame alignment. |
2577 __ mov(edx, Operand(ebp, | 2414 __ mov(edx, Operand(ebp, |
2578 JavaScriptFrameConstants::kDynamicAlignmentStateOffset)); | 2415 JavaScriptFrameConstants::kDynamicAlignmentStateOffset)); |
2579 } | 2416 } |
2580 if (NeedsEagerFrame()) { | 2417 __ mov(esp, ebp); |
2581 __ mov(esp, ebp); | 2418 __ pop(ebp); |
2582 __ pop(ebp); | |
2583 } | |
2584 if (dynamic_frame_alignment_) { | 2419 if (dynamic_frame_alignment_) { |
2585 Label no_padding; | 2420 Label no_padding; |
2586 __ cmp(edx, Immediate(kNoAlignmentPadding)); | 2421 __ cmp(edx, Immediate(kNoAlignmentPadding)); |
2587 __ j(equal, &no_padding); | 2422 __ j(equal, &no_padding); |
2588 if (FLAG_debug_code) { | 2423 if (FLAG_debug_code) { |
2589 __ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize), | 2424 __ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize), |
2590 Immediate(kAlignmentZapValue)); | 2425 Immediate(kAlignmentZapValue)); |
2591 __ Assert(equal, "expected alignment marker"); | 2426 __ Assert(equal, "expected alignment marker"); |
2592 } | 2427 } |
2593 __ Ret((GetParameterCount() + 2) * kPointerSize, ecx); | 2428 __ Ret((GetParameterCount() + 2) * kPointerSize, ecx); |
2594 __ bind(&no_padding); | 2429 __ bind(&no_padding); |
2595 } | 2430 } |
2596 if (info()->IsStub()) { | 2431 __ Ret((GetParameterCount() + 1) * kPointerSize, ecx); |
2597 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); | |
2598 __ Ret(); | |
2599 } else { | |
2600 __ Ret((GetParameterCount() + 1) * kPointerSize, ecx); | |
2601 } | |
2602 } | 2432 } |
2603 | 2433 |
2604 | 2434 |
2605 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { | 2435 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { |
2606 Register result = ToRegister(instr->result()); | 2436 Register result = ToRegister(instr->result()); |
2607 __ mov(result, Operand::Cell(instr->hydrogen()->cell())); | 2437 __ mov(result, Operand::Cell(instr->hydrogen()->cell())); |
2608 if (instr->hydrogen()->RequiresHoleCheck()) { | 2438 if (instr->hydrogen()->RequiresHoleCheck()) { |
2609 __ cmp(result, factory()->the_hole_value()); | 2439 __ cmp(result, factory()->the_hole_value()); |
2610 DeoptimizeIf(equal, instr->environment()); | 2440 DeoptimizeIf(equal, instr->environment()); |
2611 } | 2441 } |
(...skipping 355 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2967 __ SmiUntag(ToRegister(key)); | 2797 __ SmiUntag(ToRegister(key)); |
2968 } | 2798 } |
2969 Operand operand(BuildFastArrayOperand( | 2799 Operand operand(BuildFastArrayOperand( |
2970 instr->elements(), | 2800 instr->elements(), |
2971 key, | 2801 key, |
2972 instr->hydrogen()->key()->representation(), | 2802 instr->hydrogen()->key()->representation(), |
2973 elements_kind, | 2803 elements_kind, |
2974 0, | 2804 0, |
2975 instr->additional_index())); | 2805 instr->additional_index())); |
2976 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { | 2806 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
2977 if (CpuFeatures::IsSupported(SSE2)) { | 2807 XMMRegister result(ToDoubleRegister(instr->result())); |
2978 CpuFeatures::Scope scope(SSE2); | 2808 __ movss(result, operand); |
2979 XMMRegister result(ToDoubleRegister(instr->result())); | 2809 __ cvtss2sd(result, result); |
2980 __ movss(result, operand); | |
2981 __ cvtss2sd(result, result); | |
2982 } else { | |
2983 __ fld_s(operand); | |
2984 HandleX87FPReturnValue(instr); | |
2985 } | |
2986 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { | 2810 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { |
2987 if (CpuFeatures::IsSupported(SSE2)) { | 2811 __ movdbl(ToDoubleRegister(instr->result()), operand); |
2988 CpuFeatures::Scope scope(SSE2); | |
2989 __ movdbl(ToDoubleRegister(instr->result()), operand); | |
2990 } else { | |
2991 __ fld_d(operand); | |
2992 HandleX87FPReturnValue(instr); | |
2993 } | |
2994 } else { | 2812 } else { |
2995 Register result(ToRegister(instr->result())); | 2813 Register result(ToRegister(instr->result())); |
2996 switch (elements_kind) { | 2814 switch (elements_kind) { |
2997 case EXTERNAL_BYTE_ELEMENTS: | 2815 case EXTERNAL_BYTE_ELEMENTS: |
2998 __ movsx_b(result, operand); | 2816 __ movsx_b(result, operand); |
2999 break; | 2817 break; |
3000 case EXTERNAL_PIXEL_ELEMENTS: | 2818 case EXTERNAL_PIXEL_ELEMENTS: |
3001 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: | 2819 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: |
3002 __ movzx_b(result, operand); | 2820 __ movzx_b(result, operand); |
3003 break; | 2821 break; |
(...skipping 23 matching lines...) Expand all Loading... |
3027 case FAST_HOLEY_DOUBLE_ELEMENTS: | 2845 case FAST_HOLEY_DOUBLE_ELEMENTS: |
3028 case DICTIONARY_ELEMENTS: | 2846 case DICTIONARY_ELEMENTS: |
3029 case NON_STRICT_ARGUMENTS_ELEMENTS: | 2847 case NON_STRICT_ARGUMENTS_ELEMENTS: |
3030 UNREACHABLE(); | 2848 UNREACHABLE(); |
3031 break; | 2849 break; |
3032 } | 2850 } |
3033 } | 2851 } |
3034 } | 2852 } |
3035 | 2853 |
3036 | 2854 |
3037 void LCodeGen::HandleX87FPReturnValue(LInstruction* instr) { | |
3038 if (IsX87TopOfStack(instr->result())) { | |
3039 // Return value is already on stack. If the value has no uses, then | |
3040 // pop it off the FP stack. Otherwise, make sure that there are enough | |
3041 // copies of the value on the stack to feed all of the usages, e.g. | |
3042 // when the following instruction uses the return value in multiple | |
3043 // inputs. | |
3044 int count = instr->hydrogen_value()->UseCount(); | |
3045 if (count == 0) { | |
3046 __ fstp(0); | |
3047 } else { | |
3048 count--; | |
3049 ASSERT(count <= 7); | |
3050 while (count-- > 0) { | |
3051 __ fld(0); | |
3052 } | |
3053 } | |
3054 } else { | |
3055 __ fstp_d(ToOperand(instr->result())); | |
3056 } | |
3057 } | |
3058 | |
3059 | |
3060 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { | 2855 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { |
| 2856 XMMRegister result = ToDoubleRegister(instr->result()); |
| 2857 |
3061 if (instr->hydrogen()->RequiresHoleCheck()) { | 2858 if (instr->hydrogen()->RequiresHoleCheck()) { |
3062 int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + | 2859 int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + |
3063 sizeof(kHoleNanLower32); | 2860 sizeof(kHoleNanLower32); |
3064 Operand hole_check_operand = BuildFastArrayOperand( | 2861 Operand hole_check_operand = BuildFastArrayOperand( |
3065 instr->elements(), instr->key(), | 2862 instr->elements(), instr->key(), |
3066 instr->hydrogen()->key()->representation(), | 2863 instr->hydrogen()->key()->representation(), |
3067 FAST_DOUBLE_ELEMENTS, | 2864 FAST_DOUBLE_ELEMENTS, |
3068 offset, | 2865 offset, |
3069 instr->additional_index()); | 2866 instr->additional_index()); |
3070 __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); | 2867 __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); |
3071 DeoptimizeIf(equal, instr->environment()); | 2868 DeoptimizeIf(equal, instr->environment()); |
3072 } | 2869 } |
3073 | 2870 |
3074 Operand double_load_operand = BuildFastArrayOperand( | 2871 Operand double_load_operand = BuildFastArrayOperand( |
3075 instr->elements(), | 2872 instr->elements(), |
3076 instr->key(), | 2873 instr->key(), |
3077 instr->hydrogen()->key()->representation(), | 2874 instr->hydrogen()->key()->representation(), |
3078 FAST_DOUBLE_ELEMENTS, | 2875 FAST_DOUBLE_ELEMENTS, |
3079 FixedDoubleArray::kHeaderSize - kHeapObjectTag, | 2876 FixedDoubleArray::kHeaderSize - kHeapObjectTag, |
3080 instr->additional_index()); | 2877 instr->additional_index()); |
3081 if (CpuFeatures::IsSupported(SSE2)) { | 2878 __ movdbl(result, double_load_operand); |
3082 CpuFeatures::Scope scope(SSE2); | |
3083 XMMRegister result = ToDoubleRegister(instr->result()); | |
3084 __ movdbl(result, double_load_operand); | |
3085 } else { | |
3086 __ fld_d(double_load_operand); | |
3087 HandleX87FPReturnValue(instr); | |
3088 } | |
3089 } | 2879 } |
3090 | 2880 |
3091 | 2881 |
3092 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { | 2882 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { |
3093 Register result = ToRegister(instr->result()); | 2883 Register result = ToRegister(instr->result()); |
3094 | 2884 |
3095 // Load the result. | 2885 // Load the result. |
3096 __ mov(result, | 2886 __ mov(result, |
3097 BuildFastArrayOperand(instr->elements(), | 2887 BuildFastArrayOperand(instr->elements(), |
3098 instr->key(), | 2888 instr->key(), |
(...skipping 395 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3494 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); | 3284 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); |
3495 } | 3285 } |
3496 virtual LInstruction* instr() { return instr_; } | 3286 virtual LInstruction* instr() { return instr_; } |
3497 private: | 3287 private: |
3498 LUnaryMathOperation* instr_; | 3288 LUnaryMathOperation* instr_; |
3499 }; | 3289 }; |
3500 | 3290 |
3501 ASSERT(instr->value()->Equals(instr->result())); | 3291 ASSERT(instr->value()->Equals(instr->result())); |
3502 Representation r = instr->hydrogen()->value()->representation(); | 3292 Representation r = instr->hydrogen()->value()->representation(); |
3503 | 3293 |
3504 CpuFeatures::Scope scope(SSE2); | |
3505 if (r.IsDouble()) { | 3294 if (r.IsDouble()) { |
3506 XMMRegister scratch = xmm0; | 3295 XMMRegister scratch = xmm0; |
3507 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3296 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
3508 __ xorps(scratch, scratch); | 3297 __ xorps(scratch, scratch); |
3509 __ subsd(scratch, input_reg); | 3298 __ subsd(scratch, input_reg); |
3510 __ pand(input_reg, scratch); | 3299 __ pand(input_reg, scratch); |
3511 } else if (r.IsInteger32()) { | 3300 } else if (r.IsInteger32()) { |
3512 EmitIntegerMathAbs(instr); | 3301 EmitIntegerMathAbs(instr); |
3513 } else { // Tagged case. | 3302 } else { // Tagged case. |
3514 DeferredMathAbsTaggedHeapNumber* deferred = | 3303 DeferredMathAbsTaggedHeapNumber* deferred = |
3515 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); | 3304 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); |
3516 Register input_reg = ToRegister(instr->value()); | 3305 Register input_reg = ToRegister(instr->value()); |
3517 // Smi check. | 3306 // Smi check. |
3518 __ JumpIfNotSmi(input_reg, deferred->entry()); | 3307 __ JumpIfNotSmi(input_reg, deferred->entry()); |
3519 EmitIntegerMathAbs(instr); | 3308 EmitIntegerMathAbs(instr); |
3520 __ bind(deferred->exit()); | 3309 __ bind(deferred->exit()); |
3521 } | 3310 } |
3522 } | 3311 } |
3523 | 3312 |
3524 | 3313 |
3525 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { | 3314 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { |
3526 CpuFeatures::Scope scope(SSE2); | |
3527 XMMRegister xmm_scratch = xmm0; | 3315 XMMRegister xmm_scratch = xmm0; |
3528 Register output_reg = ToRegister(instr->result()); | 3316 Register output_reg = ToRegister(instr->result()); |
3529 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3317 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
3530 | 3318 |
3531 if (CpuFeatures::IsSupported(SSE4_1)) { | 3319 if (CpuFeatures::IsSupported(SSE4_1)) { |
3532 CpuFeatures::Scope scope(SSE4_1); | 3320 CpuFeatures::Scope scope(SSE4_1); |
3533 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3321 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
3534 // Deoptimize on negative zero. | 3322 // Deoptimize on negative zero. |
3535 Label non_zero; | 3323 Label non_zero; |
3536 __ xorps(xmm_scratch, xmm_scratch); // Zero the register. | 3324 __ xorps(xmm_scratch, xmm_scratch); // Zero the register. |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3581 __ ucomisd(input_reg, xmm_scratch); | 3369 __ ucomisd(input_reg, xmm_scratch); |
3582 __ j(equal, &done, Label::kNear); | 3370 __ j(equal, &done, Label::kNear); |
3583 __ sub(output_reg, Immediate(1)); | 3371 __ sub(output_reg, Immediate(1)); |
3584 DeoptimizeIf(overflow, instr->environment()); | 3372 DeoptimizeIf(overflow, instr->environment()); |
3585 | 3373 |
3586 __ bind(&done); | 3374 __ bind(&done); |
3587 } | 3375 } |
3588 } | 3376 } |
3589 | 3377 |
3590 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { | 3378 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { |
3591 CpuFeatures::Scope scope(SSE2); | |
3592 XMMRegister xmm_scratch = xmm0; | 3379 XMMRegister xmm_scratch = xmm0; |
3593 Register output_reg = ToRegister(instr->result()); | 3380 Register output_reg = ToRegister(instr->result()); |
3594 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3381 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
3595 | 3382 |
3596 Label below_half, done; | 3383 Label below_half, done; |
3597 // xmm_scratch = 0.5 | 3384 // xmm_scratch = 0.5 |
3598 ExternalReference one_half = ExternalReference::address_of_one_half(); | 3385 ExternalReference one_half = ExternalReference::address_of_one_half(); |
3599 __ movdbl(xmm_scratch, Operand::StaticVariable(one_half)); | 3386 __ movdbl(xmm_scratch, Operand::StaticVariable(one_half)); |
3600 __ ucomisd(xmm_scratch, input_reg); | 3387 __ ucomisd(xmm_scratch, input_reg); |
3601 __ j(above, &below_half); | 3388 __ j(above, &below_half); |
(...skipping 25 matching lines...) Expand all Loading... |
3627 __ cvtss2sd(xmm_scratch, xmm_scratch); | 3414 __ cvtss2sd(xmm_scratch, xmm_scratch); |
3628 __ ucomisd(input_reg, xmm_scratch); | 3415 __ ucomisd(input_reg, xmm_scratch); |
3629 DeoptimizeIf(below, instr->environment()); | 3416 DeoptimizeIf(below, instr->environment()); |
3630 } | 3417 } |
3631 __ Set(output_reg, Immediate(0)); | 3418 __ Set(output_reg, Immediate(0)); |
3632 __ bind(&done); | 3419 __ bind(&done); |
3633 } | 3420 } |
3634 | 3421 |
3635 | 3422 |
3636 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { | 3423 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { |
3637 CpuFeatures::Scope scope(SSE2); | |
3638 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3424 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
3639 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); | 3425 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); |
3640 __ sqrtsd(input_reg, input_reg); | 3426 __ sqrtsd(input_reg, input_reg); |
3641 } | 3427 } |
3642 | 3428 |
3643 | 3429 |
3644 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { | 3430 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { |
3645 CpuFeatures::Scope scope(SSE2); | |
3646 XMMRegister xmm_scratch = xmm0; | 3431 XMMRegister xmm_scratch = xmm0; |
3647 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3432 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
3648 Register scratch = ToRegister(instr->temp()); | 3433 Register scratch = ToRegister(instr->temp()); |
3649 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); | 3434 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); |
3650 | 3435 |
3651 // Note that according to ECMA-262 15.8.2.13: | 3436 // Note that according to ECMA-262 15.8.2.13: |
3652 // Math.pow(-Infinity, 0.5) == Infinity | 3437 // Math.pow(-Infinity, 0.5) == Infinity |
3653 // Math.sqrt(-Infinity) == NaN | 3438 // Math.sqrt(-Infinity) == NaN |
3654 Label done, sqrt; | 3439 Label done, sqrt; |
3655 // Check base for -Infinity. According to IEEE-754, single-precision | 3440 // Check base for -Infinity. According to IEEE-754, single-precision |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3712 DeferredDoRandom(LCodeGen* codegen, LRandom* instr) | 3497 DeferredDoRandom(LCodeGen* codegen, LRandom* instr) |
3713 : LDeferredCode(codegen), instr_(instr) { } | 3498 : LDeferredCode(codegen), instr_(instr) { } |
3714 virtual void Generate() { codegen()->DoDeferredRandom(instr_); } | 3499 virtual void Generate() { codegen()->DoDeferredRandom(instr_); } |
3715 virtual LInstruction* instr() { return instr_; } | 3500 virtual LInstruction* instr() { return instr_; } |
3716 private: | 3501 private: |
3717 LRandom* instr_; | 3502 LRandom* instr_; |
3718 }; | 3503 }; |
3719 | 3504 |
3720 DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr); | 3505 DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr); |
3721 | 3506 |
3722 CpuFeatures::Scope scope(SSE2); | |
3723 // Having marked this instruction as a call we can use any | 3507 // Having marked this instruction as a call we can use any |
3724 // registers. | 3508 // registers. |
3725 ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); | 3509 ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); |
3726 ASSERT(ToRegister(instr->global_object()).is(eax)); | 3510 ASSERT(ToRegister(instr->global_object()).is(eax)); |
3727 // Assert that the register size is indeed the size of each seed. | 3511 // Assert that the register size is indeed the size of each seed. |
3728 static const int kSeedSize = sizeof(uint32_t); | 3512 static const int kSeedSize = sizeof(uint32_t); |
3729 STATIC_ASSERT(kPointerSize == kSeedSize); | 3513 STATIC_ASSERT(kPointerSize == kSeedSize); |
3730 | 3514 |
3731 __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset)); | 3515 __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset)); |
3732 static const int kRandomSeedOffset = | 3516 static const int kRandomSeedOffset = |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3780 | 3564 |
3781 void LCodeGen::DoDeferredRandom(LRandom* instr) { | 3565 void LCodeGen::DoDeferredRandom(LRandom* instr) { |
3782 __ PrepareCallCFunction(1, ebx); | 3566 __ PrepareCallCFunction(1, ebx); |
3783 __ mov(Operand(esp, 0), eax); | 3567 __ mov(Operand(esp, 0), eax); |
3784 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); | 3568 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); |
3785 // Return value is in eax. | 3569 // Return value is in eax. |
3786 } | 3570 } |
3787 | 3571 |
3788 | 3572 |
3789 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { | 3573 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { |
3790 CpuFeatures::Scope scope(SSE2); | |
3791 ASSERT(instr->value()->Equals(instr->result())); | 3574 ASSERT(instr->value()->Equals(instr->result())); |
3792 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3575 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
3793 Label positive, done, zero; | 3576 Label positive, done, zero; |
3794 __ xorps(xmm0, xmm0); | 3577 __ xorps(xmm0, xmm0); |
3795 __ ucomisd(input_reg, xmm0); | 3578 __ ucomisd(input_reg, xmm0); |
3796 __ j(above, &positive, Label::kNear); | 3579 __ j(above, &positive, Label::kNear); |
3797 __ j(equal, &zero, Label::kNear); | 3580 __ j(equal, &zero, Label::kNear); |
3798 ExternalReference nan = | 3581 ExternalReference nan = |
3799 ExternalReference::address_of_canonical_non_hole_nan(); | 3582 ExternalReference::address_of_canonical_non_hole_nan(); |
3800 __ movdbl(input_reg, Operand::StaticVariable(nan)); | 3583 __ movdbl(input_reg, Operand::StaticVariable(nan)); |
(...skipping 11 matching lines...) Expand all Loading... |
3812 __ fld_d(Operand(esp, 0)); | 3595 __ fld_d(Operand(esp, 0)); |
3813 __ fyl2x(); | 3596 __ fyl2x(); |
3814 __ fstp_d(Operand(esp, 0)); | 3597 __ fstp_d(Operand(esp, 0)); |
3815 __ movdbl(input_reg, Operand(esp, 0)); | 3598 __ movdbl(input_reg, Operand(esp, 0)); |
3816 __ add(Operand(esp), Immediate(kDoubleSize)); | 3599 __ add(Operand(esp), Immediate(kDoubleSize)); |
3817 __ bind(&done); | 3600 __ bind(&done); |
3818 } | 3601 } |
3819 | 3602 |
3820 | 3603 |
3821 void LCodeGen::DoMathExp(LMathExp* instr) { | 3604 void LCodeGen::DoMathExp(LMathExp* instr) { |
3822 CpuFeatures::Scope scope(SSE2); | |
3823 XMMRegister input = ToDoubleRegister(instr->value()); | 3605 XMMRegister input = ToDoubleRegister(instr->value()); |
3824 XMMRegister result = ToDoubleRegister(instr->result()); | 3606 XMMRegister result = ToDoubleRegister(instr->result()); |
3825 Register temp1 = ToRegister(instr->temp1()); | 3607 Register temp1 = ToRegister(instr->temp1()); |
3826 Register temp2 = ToRegister(instr->temp2()); | 3608 Register temp2 = ToRegister(instr->temp2()); |
3827 | 3609 |
3828 MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2); | 3610 MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2); |
3829 } | 3611 } |
3830 | 3612 |
3831 | 3613 |
3832 void LCodeGen::DoMathTan(LUnaryMathOperation* instr) { | 3614 void LCodeGen::DoMathTan(LUnaryMathOperation* instr) { |
(...skipping 248 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4081 int constant_index = | 3863 int constant_index = |
4082 ToInteger32(LConstantOperand::cast(instr->index())); | 3864 ToInteger32(LConstantOperand::cast(instr->index())); |
4083 if (instr->hydrogen()->length()->representation().IsTagged()) { | 3865 if (instr->hydrogen()->length()->representation().IsTagged()) { |
4084 __ cmp(ToOperand(instr->length()), | 3866 __ cmp(ToOperand(instr->length()), |
4085 Immediate(Smi::FromInt(constant_index))); | 3867 Immediate(Smi::FromInt(constant_index))); |
4086 } else { | 3868 } else { |
4087 __ cmp(ToOperand(instr->length()), Immediate(constant_index)); | 3869 __ cmp(ToOperand(instr->length()), Immediate(constant_index)); |
4088 } | 3870 } |
4089 DeoptimizeIf(below_equal, instr->environment()); | 3871 DeoptimizeIf(below_equal, instr->environment()); |
4090 } else { | 3872 } else { |
4091 if (instr->hydrogen()->index()->representation().IsTagged() && | |
4092 !instr->hydrogen()->index()->type().IsSmi()) { | |
4093 __ test(ToRegister(instr->index()), Immediate(kSmiTagMask)); | |
4094 DeoptimizeIf(not_zero, instr->environment()); | |
4095 } | |
4096 __ cmp(ToRegister(instr->index()), ToOperand(instr->length())); | 3873 __ cmp(ToRegister(instr->index()), ToOperand(instr->length())); |
4097 DeoptimizeIf(above_equal, instr->environment()); | 3874 DeoptimizeIf(above_equal, instr->environment()); |
4098 } | 3875 } |
4099 } | 3876 } |
4100 | 3877 |
4101 | 3878 |
4102 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { | 3879 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { |
4103 ElementsKind elements_kind = instr->elements_kind(); | 3880 ElementsKind elements_kind = instr->elements_kind(); |
4104 LOperand* key = instr->key(); | 3881 LOperand* key = instr->key(); |
4105 if (!key->IsConstantOperand() && | 3882 if (!key->IsConstantOperand() && |
4106 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(), | 3883 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(), |
4107 elements_kind)) { | 3884 elements_kind)) { |
4108 __ SmiUntag(ToRegister(key)); | 3885 __ SmiUntag(ToRegister(key)); |
4109 } | 3886 } |
4110 Operand operand(BuildFastArrayOperand( | 3887 Operand operand(BuildFastArrayOperand( |
4111 instr->elements(), | 3888 instr->elements(), |
4112 key, | 3889 key, |
4113 instr->hydrogen()->key()->representation(), | 3890 instr->hydrogen()->key()->representation(), |
4114 elements_kind, | 3891 elements_kind, |
4115 0, | 3892 0, |
4116 instr->additional_index())); | 3893 instr->additional_index())); |
4117 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { | 3894 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
4118 CpuFeatures::Scope scope(SSE2); | |
4119 __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value())); | 3895 __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value())); |
4120 __ movss(operand, xmm0); | 3896 __ movss(operand, xmm0); |
4121 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { | 3897 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { |
4122 CpuFeatures::Scope scope(SSE2); | |
4123 __ movdbl(operand, ToDoubleRegister(instr->value())); | 3898 __ movdbl(operand, ToDoubleRegister(instr->value())); |
4124 } else { | 3899 } else { |
4125 Register value = ToRegister(instr->value()); | 3900 Register value = ToRegister(instr->value()); |
4126 switch (elements_kind) { | 3901 switch (elements_kind) { |
4127 case EXTERNAL_PIXEL_ELEMENTS: | 3902 case EXTERNAL_PIXEL_ELEMENTS: |
4128 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: | 3903 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: |
4129 case EXTERNAL_BYTE_ELEMENTS: | 3904 case EXTERNAL_BYTE_ELEMENTS: |
4130 __ mov_b(operand, value); | 3905 __ mov_b(operand, value); |
4131 break; | 3906 break; |
4132 case EXTERNAL_SHORT_ELEMENTS: | 3907 case EXTERNAL_SHORT_ELEMENTS: |
(...skipping 15 matching lines...) Expand all Loading... |
4148 case DICTIONARY_ELEMENTS: | 3923 case DICTIONARY_ELEMENTS: |
4149 case NON_STRICT_ARGUMENTS_ELEMENTS: | 3924 case NON_STRICT_ARGUMENTS_ELEMENTS: |
4150 UNREACHABLE(); | 3925 UNREACHABLE(); |
4151 break; | 3926 break; |
4152 } | 3927 } |
4153 } | 3928 } |
4154 } | 3929 } |
4155 | 3930 |
4156 | 3931 |
4157 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { | 3932 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { |
4158 CpuFeatures::Scope scope(SSE2); | |
4159 XMMRegister value = ToDoubleRegister(instr->value()); | 3933 XMMRegister value = ToDoubleRegister(instr->value()); |
4160 | 3934 |
4161 if (instr->NeedsCanonicalization()) { | 3935 if (instr->NeedsCanonicalization()) { |
4162 Label have_value; | 3936 Label have_value; |
4163 | 3937 |
4164 __ ucomisd(value, value); | 3938 __ ucomisd(value, value); |
4165 __ j(parity_odd, &have_value); // NaN. | 3939 __ j(parity_odd, &have_value); // NaN. |
4166 | 3940 |
4167 ExternalReference canonical_nan_reference = | 3941 ExternalReference canonical_nan_reference = |
4168 ExternalReference::address_of_canonical_non_hole_nan(); | 3942 ExternalReference::address_of_canonical_non_hole_nan(); |
(...skipping 230 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4399 | 4173 |
4400 void LCodeGen::DoStringAdd(LStringAdd* instr) { | 4174 void LCodeGen::DoStringAdd(LStringAdd* instr) { |
4401 EmitPushTaggedOperand(instr->left()); | 4175 EmitPushTaggedOperand(instr->left()); |
4402 EmitPushTaggedOperand(instr->right()); | 4176 EmitPushTaggedOperand(instr->right()); |
4403 StringAddStub stub(NO_STRING_CHECK_IN_STUB); | 4177 StringAddStub stub(NO_STRING_CHECK_IN_STUB); |
4404 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 4178 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
4405 } | 4179 } |
4406 | 4180 |
4407 | 4181 |
4408 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { | 4182 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { |
4409 if (CpuFeatures::IsSupported(SSE2)) { | 4183 LOperand* input = instr->value(); |
4410 CpuFeatures::Scope scope(SSE2); | 4184 ASSERT(input->IsRegister() || input->IsStackSlot()); |
4411 LOperand* input = instr->value(); | 4185 LOperand* output = instr->result(); |
4412 ASSERT(input->IsRegister() || input->IsStackSlot()); | 4186 ASSERT(output->IsDoubleRegister()); |
4413 LOperand* output = instr->result(); | 4187 __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input)); |
4414 ASSERT(output->IsDoubleRegister()); | |
4415 __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input)); | |
4416 } else { | |
4417 UNREACHABLE(); | |
4418 } | |
4419 } | 4188 } |
4420 | 4189 |
4421 | 4190 |
4422 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { | 4191 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { |
4423 CpuFeatures::Scope scope(SSE2); | |
4424 LOperand* input = instr->value(); | 4192 LOperand* input = instr->value(); |
4425 LOperand* output = instr->result(); | 4193 LOperand* output = instr->result(); |
4426 LOperand* temp = instr->temp(); | 4194 LOperand* temp = instr->temp(); |
4427 | 4195 |
4428 __ LoadUint32(ToDoubleRegister(output), | 4196 __ LoadUint32(ToDoubleRegister(output), |
4429 ToRegister(input), | 4197 ToRegister(input), |
4430 ToDoubleRegister(temp)); | 4198 ToDoubleRegister(temp)); |
4431 } | 4199 } |
4432 | 4200 |
4433 | 4201 |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4491 PushSafepointRegistersScope scope(this); | 4259 PushSafepointRegistersScope scope(this); |
4492 | 4260 |
4493 Label done; | 4261 Label done; |
4494 | 4262 |
4495 if (signedness == SIGNED_INT32) { | 4263 if (signedness == SIGNED_INT32) { |
4496 // There was overflow, so bits 30 and 31 of the original integer | 4264 // There was overflow, so bits 30 and 31 of the original integer |
4497 // disagree. Try to allocate a heap number in new space and store | 4265 // disagree. Try to allocate a heap number in new space and store |
4498 // the value in there. If that fails, call the runtime system. | 4266 // the value in there. If that fails, call the runtime system. |
4499 __ SmiUntag(reg); | 4267 __ SmiUntag(reg); |
4500 __ xor_(reg, 0x80000000); | 4268 __ xor_(reg, 0x80000000); |
4501 if (CpuFeatures::IsSupported(SSE2)) { | 4269 __ cvtsi2sd(xmm0, Operand(reg)); |
4502 CpuFeatures::Scope feature_scope(SSE2); | |
4503 __ cvtsi2sd(xmm0, Operand(reg)); | |
4504 } else { | |
4505 __ push(reg); | |
4506 __ fild_s(Operand(esp, 0)); | |
4507 __ pop(reg); | |
4508 } | |
4509 } else { | 4270 } else { |
4510 if (CpuFeatures::IsSupported(SSE2)) { | 4271 __ LoadUint32(xmm0, reg, xmm1); |
4511 CpuFeatures::Scope feature_scope(SSE2); | |
4512 __ LoadUint32(xmm0, reg, xmm1); | |
4513 } else { | |
4514 UNREACHABLE(); | |
4515 } | |
4516 } | 4272 } |
4517 | 4273 |
4518 if (FLAG_inline_new) { | 4274 if (FLAG_inline_new) { |
4519 __ AllocateHeapNumber(reg, tmp, no_reg, &slow); | 4275 __ AllocateHeapNumber(reg, tmp, no_reg, &slow); |
4520 __ jmp(&done, Label::kNear); | 4276 __ jmp(&done, Label::kNear); |
4521 } | 4277 } |
4522 | 4278 |
4523 // Slow case: Call the runtime system to do the number allocation. | 4279 // Slow case: Call the runtime system to do the number allocation. |
4524 __ bind(&slow); | 4280 __ bind(&slow); |
4525 | 4281 |
4526 // TODO(3095996): Put a valid pointer value in the stack slot where the result | 4282 // TODO(3095996): Put a valid pointer value in the stack slot where the result |
4527 // register is stored, as this register is in the pointer map, but contains an | 4283 // register is stored, as this register is in the pointer map, but contains an |
4528 // integer value. | 4284 // integer value. |
4529 __ StoreToSafepointRegisterSlot(reg, Immediate(0)); | 4285 __ StoreToSafepointRegisterSlot(reg, Immediate(0)); |
4530 // NumberTagI and NumberTagD use the context from the frame, rather than | 4286 // NumberTagI and NumberTagD use the context from the frame, rather than |
4531 // the environment's HContext or HInlinedContext value. | 4287 // the environment's HContext or HInlinedContext value. |
4532 // They only call Runtime::kAllocateHeapNumber. | 4288 // They only call Runtime::kAllocateHeapNumber. |
4533 // The corresponding HChange instructions are added in a phase that does | 4289 // The corresponding HChange instructions are added in a phase that does |
4534 // not have easy access to the local context. | 4290 // not have easy access to the local context. |
4535 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); | 4291 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); |
4536 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); | 4292 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); |
4537 RecordSafepointWithRegisters( | 4293 RecordSafepointWithRegisters( |
4538 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); | 4294 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); |
4539 if (!reg.is(eax)) __ mov(reg, eax); | 4295 if (!reg.is(eax)) __ mov(reg, eax); |
4540 | 4296 |
4541 // Done. Put the value in xmm0 into the value of the allocated heap | 4297 // Done. Put the value in xmm0 into the value of the allocated heap |
4542 // number. | 4298 // number. |
4543 __ bind(&done); | 4299 __ bind(&done); |
4544 if (CpuFeatures::IsSupported(SSE2)) { | 4300 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0); |
4545 CpuFeatures::Scope feature_scope(SSE2); | |
4546 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0); | |
4547 } else { | |
4548 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); | |
4549 } | |
4550 __ StoreToSafepointRegisterSlot(reg, reg); | 4301 __ StoreToSafepointRegisterSlot(reg, reg); |
4551 } | 4302 } |
4552 | 4303 |
4553 | 4304 |
4554 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { | 4305 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { |
4555 class DeferredNumberTagD: public LDeferredCode { | 4306 class DeferredNumberTagD: public LDeferredCode { |
4556 public: | 4307 public: |
4557 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) | 4308 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) |
4558 : LDeferredCode(codegen), instr_(instr) { } | 4309 : LDeferredCode(codegen), instr_(instr) { } |
4559 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } | 4310 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } |
4560 virtual LInstruction* instr() { return instr_; } | 4311 virtual LInstruction* instr() { return instr_; } |
4561 private: | 4312 private: |
4562 LNumberTagD* instr_; | 4313 LNumberTagD* instr_; |
4563 }; | 4314 }; |
4564 | 4315 |
| 4316 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
4565 Register reg = ToRegister(instr->result()); | 4317 Register reg = ToRegister(instr->result()); |
4566 Register tmp = ToRegister(instr->temp()); | 4318 Register tmp = ToRegister(instr->temp()); |
4567 | 4319 |
4568 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); | 4320 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); |
4569 if (FLAG_inline_new) { | 4321 if (FLAG_inline_new) { |
4570 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); | 4322 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); |
4571 } else { | 4323 } else { |
4572 __ jmp(deferred->entry()); | 4324 __ jmp(deferred->entry()); |
4573 } | 4325 } |
4574 __ bind(deferred->exit()); | 4326 __ bind(deferred->exit()); |
4575 if (CpuFeatures::IsSupported(SSE2)) { | 4327 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); |
4576 CpuFeatures::Scope scope(SSE2); | |
4577 XMMRegister input_reg = ToDoubleRegister(instr->value()); | |
4578 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); | |
4579 } else { | |
4580 if (!IsX87TopOfStack(instr->value())) { | |
4581 __ fld_d(ToOperand(instr->value())); | |
4582 } | |
4583 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); | |
4584 } | |
4585 } | 4328 } |
4586 | 4329 |
4587 | 4330 |
4588 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { | 4331 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { |
4589 // TODO(3095996): Get rid of this. For now, we need to make the | 4332 // TODO(3095996): Get rid of this. For now, we need to make the |
4590 // result register contain a valid pointer because it is already | 4333 // result register contain a valid pointer because it is already |
4591 // contained in the register pointer map. | 4334 // contained in the register pointer map. |
4592 Register reg = ToRegister(instr->result()); | 4335 Register reg = ToRegister(instr->result()); |
4593 __ Set(reg, Immediate(0)); | 4336 __ Set(reg, Immediate(0)); |
4594 | 4337 |
(...skipping 136 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4731 __ cmp(input_reg, 0x80000000u); | 4474 __ cmp(input_reg, 0x80000000u); |
4732 __ j(not_equal, &done); | 4475 __ j(not_equal, &done); |
4733 // Check if the input was 0x8000000 (kMinInt). | 4476 // Check if the input was 0x8000000 (kMinInt). |
4734 // If no, then we got an overflow and we deoptimize. | 4477 // If no, then we got an overflow and we deoptimize. |
4735 ExternalReference min_int = ExternalReference::address_of_min_int(); | 4478 ExternalReference min_int = ExternalReference::address_of_min_int(); |
4736 __ movdbl(xmm_temp, Operand::StaticVariable(min_int)); | 4479 __ movdbl(xmm_temp, Operand::StaticVariable(min_int)); |
4737 __ ucomisd(xmm_temp, xmm0); | 4480 __ ucomisd(xmm_temp, xmm0); |
4738 DeoptimizeIf(not_equal, instr->environment()); | 4481 DeoptimizeIf(not_equal, instr->environment()); |
4739 DeoptimizeIf(parity_even, instr->environment()); // NaN. | 4482 DeoptimizeIf(parity_even, instr->environment()); // NaN. |
4740 } | 4483 } |
4741 } else if (CpuFeatures::IsSupported(SSE2)) { | 4484 } else { |
4742 CpuFeatures::Scope scope(SSE2); | |
4743 // Deoptimize if we don't have a heap number. | 4485 // Deoptimize if we don't have a heap number. |
4744 __ RecordComment("Deferred TaggedToI: not a heap number"); | 4486 __ RecordComment("Deferred TaggedToI: not a heap number"); |
4745 DeoptimizeIf(not_equal, instr->environment()); | 4487 DeoptimizeIf(not_equal, instr->environment()); |
4746 | 4488 |
4747 XMMRegister xmm_temp = ToDoubleRegister(instr->temp()); | 4489 XMMRegister xmm_temp = ToDoubleRegister(instr->temp()); |
4748 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 4490 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
4749 __ cvttsd2si(input_reg, Operand(xmm0)); | 4491 __ cvttsd2si(input_reg, Operand(xmm0)); |
4750 __ cvtsi2sd(xmm_temp, Operand(input_reg)); | 4492 __ cvtsi2sd(xmm_temp, Operand(input_reg)); |
4751 __ ucomisd(xmm0, xmm_temp); | 4493 __ ucomisd(xmm0, xmm_temp); |
4752 __ RecordComment("Deferred TaggedToI: lost precision"); | 4494 __ RecordComment("Deferred TaggedToI: lost precision"); |
4753 DeoptimizeIf(not_equal, instr->environment()); | 4495 DeoptimizeIf(not_equal, instr->environment()); |
4754 __ RecordComment("Deferred TaggedToI: NaN"); | 4496 __ RecordComment("Deferred TaggedToI: NaN"); |
4755 DeoptimizeIf(parity_even, instr->environment()); // NaN. | 4497 DeoptimizeIf(parity_even, instr->environment()); // NaN. |
4756 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 4498 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
4757 __ test(input_reg, Operand(input_reg)); | 4499 __ test(input_reg, Operand(input_reg)); |
4758 __ j(not_zero, &done); | 4500 __ j(not_zero, &done); |
4759 __ movmskpd(input_reg, xmm0); | 4501 __ movmskpd(input_reg, xmm0); |
4760 __ and_(input_reg, 1); | 4502 __ and_(input_reg, 1); |
4761 __ RecordComment("Deferred TaggedToI: minus zero"); | 4503 __ RecordComment("Deferred TaggedToI: minus zero"); |
4762 DeoptimizeIf(not_zero, instr->environment()); | 4504 DeoptimizeIf(not_zero, instr->environment()); |
4763 } | 4505 } |
4764 } else { | |
4765 UNREACHABLE(); | |
4766 } | 4506 } |
4767 __ bind(&done); | 4507 __ bind(&done); |
4768 } | 4508 } |
4769 | 4509 |
4770 | 4510 |
4771 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { | 4511 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { |
4772 class DeferredTaggedToI: public LDeferredCode { | 4512 class DeferredTaggedToI: public LDeferredCode { |
4773 public: | 4513 public: |
4774 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) | 4514 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) |
4775 : LDeferredCode(codegen), instr_(instr) { } | 4515 : LDeferredCode(codegen), instr_(instr) { } |
(...skipping 22 matching lines...) Expand all Loading... |
4798 | 4538 |
4799 | 4539 |
4800 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { | 4540 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { |
4801 LOperand* input = instr->value(); | 4541 LOperand* input = instr->value(); |
4802 ASSERT(input->IsRegister()); | 4542 ASSERT(input->IsRegister()); |
4803 LOperand* temp = instr->temp(); | 4543 LOperand* temp = instr->temp(); |
4804 ASSERT(temp == NULL || temp->IsRegister()); | 4544 ASSERT(temp == NULL || temp->IsRegister()); |
4805 LOperand* result = instr->result(); | 4545 LOperand* result = instr->result(); |
4806 ASSERT(result->IsDoubleRegister()); | 4546 ASSERT(result->IsDoubleRegister()); |
4807 | 4547 |
4808 if (CpuFeatures::IsSupported(SSE2)) { | 4548 Register input_reg = ToRegister(input); |
4809 CpuFeatures::Scope scope(SSE2); | 4549 XMMRegister result_reg = ToDoubleRegister(result); |
4810 Register input_reg = ToRegister(input); | |
4811 XMMRegister result_reg = ToDoubleRegister(result); | |
4812 | 4550 |
4813 bool deoptimize_on_minus_zero = | 4551 bool deoptimize_on_minus_zero = |
4814 instr->hydrogen()->deoptimize_on_minus_zero(); | 4552 instr->hydrogen()->deoptimize_on_minus_zero(); |
4815 Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg; | 4553 Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg; |
4816 | 4554 |
4817 EmitNumberUntagD(input_reg, | 4555 EmitNumberUntagD(input_reg, |
4818 temp_reg, | 4556 temp_reg, |
4819 result_reg, | 4557 result_reg, |
4820 instr->hydrogen()->deoptimize_on_undefined(), | 4558 instr->hydrogen()->deoptimize_on_undefined(), |
4821 deoptimize_on_minus_zero, | 4559 deoptimize_on_minus_zero, |
4822 instr->environment()); | 4560 instr->environment()); |
4823 } else { | |
4824 UNIMPLEMENTED(); | |
4825 } | |
4826 } | 4561 } |
4827 | 4562 |
4828 | 4563 |
4829 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { | 4564 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { |
4830 LOperand* input = instr->value(); | 4565 LOperand* input = instr->value(); |
4831 ASSERT(input->IsDoubleRegister()); | 4566 ASSERT(input->IsDoubleRegister()); |
4832 LOperand* result = instr->result(); | 4567 LOperand* result = instr->result(); |
4833 ASSERT(result->IsRegister()); | 4568 ASSERT(result->IsRegister()); |
4834 CpuFeatures::Scope scope(SSE2); | |
4835 | 4569 |
4836 XMMRegister input_reg = ToDoubleRegister(input); | 4570 XMMRegister input_reg = ToDoubleRegister(input); |
4837 Register result_reg = ToRegister(result); | 4571 Register result_reg = ToRegister(result); |
4838 | 4572 |
4839 if (instr->truncating()) { | 4573 if (instr->truncating()) { |
4840 // Performs a truncating conversion of a floating point number as used by | 4574 // Performs a truncating conversion of a floating point number as used by |
4841 // the JS bitwise operations. | 4575 // the JS bitwise operations. |
4842 __ cvttsd2si(result_reg, Operand(input_reg)); | 4576 __ cvttsd2si(result_reg, Operand(input_reg)); |
4843 __ cmp(result_reg, 0x80000000u); | 4577 __ cmp(result_reg, 0x80000000u); |
4844 if (CpuFeatures::IsSupported(SSE3)) { | 4578 if (CpuFeatures::IsSupported(SSE3)) { |
(...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5014 Operand operand = ToOperand(instr->value()); | 4748 Operand operand = ToOperand(instr->value()); |
5015 __ cmp(operand, target); | 4749 __ cmp(operand, target); |
5016 } | 4750 } |
5017 DeoptimizeIf(not_equal, instr->environment()); | 4751 DeoptimizeIf(not_equal, instr->environment()); |
5018 } | 4752 } |
5019 | 4753 |
5020 | 4754 |
5021 void LCodeGen::DoCheckMapCommon(Register reg, | 4755 void LCodeGen::DoCheckMapCommon(Register reg, |
5022 Handle<Map> map, | 4756 Handle<Map> map, |
5023 CompareMapMode mode, | 4757 CompareMapMode mode, |
5024 LInstruction* instr) { | 4758 LEnvironment* env) { |
5025 Label success; | 4759 Label success; |
5026 __ CompareMap(reg, map, &success, mode); | 4760 __ CompareMap(reg, map, &success, mode); |
5027 DeoptimizeIf(not_equal, instr->environment()); | 4761 DeoptimizeIf(not_equal, env); |
5028 __ bind(&success); | 4762 __ bind(&success); |
5029 } | 4763 } |
5030 | 4764 |
5031 | 4765 |
5032 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { | 4766 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
5033 LOperand* input = instr->value(); | 4767 LOperand* input = instr->value(); |
5034 ASSERT(input->IsRegister()); | 4768 ASSERT(input->IsRegister()); |
5035 Register reg = ToRegister(input); | 4769 Register reg = ToRegister(input); |
5036 | 4770 |
5037 Label success; | 4771 Label success; |
5038 SmallMapList* map_set = instr->hydrogen()->map_set(); | 4772 SmallMapList* map_set = instr->hydrogen()->map_set(); |
5039 for (int i = 0; i < map_set->length() - 1; i++) { | 4773 for (int i = 0; i < map_set->length() - 1; i++) { |
5040 Handle<Map> map = map_set->at(i); | 4774 Handle<Map> map = map_set->at(i); |
5041 __ CompareMap(reg, map, &success, REQUIRE_EXACT_MAP); | 4775 __ CompareMap(reg, map, &success, REQUIRE_EXACT_MAP); |
5042 __ j(equal, &success); | 4776 __ j(equal, &success); |
5043 } | 4777 } |
5044 Handle<Map> map = map_set->last(); | 4778 Handle<Map> map = map_set->last(); |
5045 DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr); | 4779 DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment()); |
5046 __ bind(&success); | 4780 __ bind(&success); |
5047 } | 4781 } |
5048 | 4782 |
5049 | 4783 |
5050 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | 4784 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
5051 CpuFeatures::Scope scope(SSE2); | |
5052 XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); | 4785 XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); |
5053 Register result_reg = ToRegister(instr->result()); | 4786 Register result_reg = ToRegister(instr->result()); |
5054 __ ClampDoubleToUint8(value_reg, xmm0, result_reg); | 4787 __ ClampDoubleToUint8(value_reg, xmm0, result_reg); |
5055 } | 4788 } |
5056 | 4789 |
5057 | 4790 |
5058 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { | 4791 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { |
5059 ASSERT(instr->unclamped()->Equals(instr->result())); | 4792 ASSERT(instr->unclamped()->Equals(instr->result())); |
5060 Register value_reg = ToRegister(instr->result()); | 4793 Register value_reg = ToRegister(instr->result()); |
5061 __ ClampUint8(value_reg); | 4794 __ ClampUint8(value_reg); |
5062 } | 4795 } |
5063 | 4796 |
5064 | 4797 |
5065 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { | 4798 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { |
5066 CpuFeatures::Scope scope(SSE2); | |
5067 | |
5068 ASSERT(instr->unclamped()->Equals(instr->result())); | 4799 ASSERT(instr->unclamped()->Equals(instr->result())); |
5069 Register input_reg = ToRegister(instr->unclamped()); | 4800 Register input_reg = ToRegister(instr->unclamped()); |
5070 Label is_smi, done, heap_number; | 4801 Label is_smi, done, heap_number; |
5071 | 4802 |
5072 __ JumpIfSmi(input_reg, &is_smi); | 4803 __ JumpIfSmi(input_reg, &is_smi); |
5073 | 4804 |
5074 // Check for heap number | 4805 // Check for heap number |
5075 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), | 4806 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), |
5076 factory()->heap_number_map()); | 4807 factory()->heap_number_map()); |
5077 __ j(equal, &heap_number, Label::kNear); | 4808 __ j(equal, &heap_number, Label::kNear); |
(...skipping 26 matching lines...) Expand all Loading... |
5104 | 4835 |
5105 Handle<JSObject> holder = instr->holder(); | 4836 Handle<JSObject> holder = instr->holder(); |
5106 Handle<JSObject> current_prototype = instr->prototype(); | 4837 Handle<JSObject> current_prototype = instr->prototype(); |
5107 | 4838 |
5108 // Load prototype object. | 4839 // Load prototype object. |
5109 __ LoadHeapObject(reg, current_prototype); | 4840 __ LoadHeapObject(reg, current_prototype); |
5110 | 4841 |
5111 // Check prototype maps up to the holder. | 4842 // Check prototype maps up to the holder. |
5112 while (!current_prototype.is_identical_to(holder)) { | 4843 while (!current_prototype.is_identical_to(holder)) { |
5113 DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()), | 4844 DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()), |
5114 ALLOW_ELEMENT_TRANSITION_MAPS, instr); | 4845 ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment()); |
5115 | 4846 |
5116 current_prototype = | 4847 current_prototype = |
5117 Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype())); | 4848 Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype())); |
5118 // Load next prototype object. | 4849 // Load next prototype object. |
5119 __ LoadHeapObject(reg, current_prototype); | 4850 __ LoadHeapObject(reg, current_prototype); |
5120 } | 4851 } |
5121 | 4852 |
5122 // Check the holder map. | 4853 // Check the holder map. |
5123 DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()), | 4854 DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()), |
5124 ALLOW_ELEMENT_TRANSITION_MAPS, instr); | 4855 ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment()); |
5125 } | 4856 } |
5126 | 4857 |
5127 | 4858 |
5128 void LCodeGen::DoAllocateObject(LAllocateObject* instr) { | 4859 void LCodeGen::DoAllocateObject(LAllocateObject* instr) { |
5129 class DeferredAllocateObject: public LDeferredCode { | 4860 class DeferredAllocateObject: public LDeferredCode { |
5130 public: | 4861 public: |
5131 DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr) | 4862 DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr) |
5132 : LDeferredCode(codegen), instr_(instr) { } | 4863 : LDeferredCode(codegen), instr_(instr) { } |
5133 virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); } | 4864 virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); } |
5134 virtual LInstruction* instr() { return instr_; } | 4865 virtual LInstruction* instr() { return instr_; } |
(...skipping 516 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5651 __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset)); | 5382 __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset)); |
5652 | 5383 |
5653 // Check the marker in the calling frame. | 5384 // Check the marker in the calling frame. |
5654 __ bind(&check_frame_marker); | 5385 __ bind(&check_frame_marker); |
5655 __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset), | 5386 __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset), |
5656 Immediate(Smi::FromInt(StackFrame::CONSTRUCT))); | 5387 Immediate(Smi::FromInt(StackFrame::CONSTRUCT))); |
5657 } | 5388 } |
5658 | 5389 |
5659 | 5390 |
5660 void LCodeGen::EnsureSpaceForLazyDeopt() { | 5391 void LCodeGen::EnsureSpaceForLazyDeopt() { |
5661 if (!info()->IsStub()) { | 5392 // Ensure that we have enough space after the previous lazy-bailout |
5662 // Ensure that we have enough space after the previous lazy-bailout | 5393 // instruction for patching the code here. |
5663 // instruction for patching the code here. | 5394 int current_pc = masm()->pc_offset(); |
5664 int current_pc = masm()->pc_offset(); | 5395 int patch_size = Deoptimizer::patch_size(); |
5665 int patch_size = Deoptimizer::patch_size(); | 5396 if (current_pc < last_lazy_deopt_pc_ + patch_size) { |
5666 if (current_pc < last_lazy_deopt_pc_ + patch_size) { | 5397 int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc; |
5667 int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc; | 5398 __ Nop(padding_size); |
5668 __ Nop(padding_size); | |
5669 } | |
5670 } | 5399 } |
5671 last_lazy_deopt_pc_ = masm()->pc_offset(); | 5400 last_lazy_deopt_pc_ = masm()->pc_offset(); |
5672 } | 5401 } |
5673 | 5402 |
5674 | 5403 |
5675 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { | 5404 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { |
5676 EnsureSpaceForLazyDeopt(); | 5405 EnsureSpaceForLazyDeopt(); |
5677 ASSERT(instr->HasEnvironment()); | 5406 ASSERT(instr->HasEnvironment()); |
5678 LEnvironment* env = instr->environment(); | 5407 LEnvironment* env = instr->environment(); |
5679 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); | 5408 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); |
(...skipping 203 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5883 FixedArray::kHeaderSize - kPointerSize)); | 5612 FixedArray::kHeaderSize - kPointerSize)); |
5884 __ bind(&done); | 5613 __ bind(&done); |
5885 } | 5614 } |
5886 | 5615 |
5887 | 5616 |
5888 #undef __ | 5617 #undef __ |
5889 | 5618 |
5890 } } // namespace v8::internal | 5619 } } // namespace v8::internal |
5891 | 5620 |
5892 #endif // V8_TARGET_ARCH_IA32 | 5621 #endif // V8_TARGET_ARCH_IA32 |
OLD | NEW |