Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(3)

Side by Side Diff: src/ia32/lithium-codegen-ia32.cc

Issue 11498006: Revert 13157, 13145 and 13140: Crankshaft code stubs. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 8 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ia32/lithium-codegen-ia32.h ('k') | src/ia32/lithium-gap-resolver-ia32.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 12 matching lines...) Expand all
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #include "v8.h" 28 #include "v8.h"
29 29
30 #if defined(V8_TARGET_ARCH_IA32) 30 #if defined(V8_TARGET_ARCH_IA32)
31 31
32 #include "ia32/lithium-codegen-ia32.h" 32 #include "ia32/lithium-codegen-ia32.h"
33 #include "ic.h"
34 #include "code-stubs.h" 33 #include "code-stubs.h"
35 #include "deoptimizer.h" 34 #include "deoptimizer.h"
36 #include "stub-cache.h" 35 #include "stub-cache.h"
37 #include "codegen.h" 36 #include "codegen.h"
38 37
39 namespace v8 { 38 namespace v8 {
40 namespace internal { 39 namespace internal {
41 40
42 41
43 // When invoking builtins, we need to record the safepoint in the middle of 42 // When invoking builtins, we need to record the safepoint in the middle of
(...skipping 20 matching lines...) Expand all
64 Safepoint::DeoptMode deopt_mode_; 63 Safepoint::DeoptMode deopt_mode_;
65 }; 64 };
66 65
67 66
68 #define __ masm()-> 67 #define __ masm()->
69 68
70 bool LCodeGen::GenerateCode() { 69 bool LCodeGen::GenerateCode() {
71 HPhase phase("Z_Code generation", chunk()); 70 HPhase phase("Z_Code generation", chunk());
72 ASSERT(is_unused()); 71 ASSERT(is_unused());
73 status_ = GENERATING; 72 status_ = GENERATING;
73 CpuFeatures::Scope scope(SSE2);
74 74
75 CodeStub::GenerateFPStubs(); 75 CodeStub::GenerateFPStubs();
76 76
77 // Open a frame scope to indicate that there is a frame on the stack. The 77 // Open a frame scope to indicate that there is a frame on the stack. The
78 // MANUAL indicates that the scope shouldn't actually generate code to set up 78 // MANUAL indicates that the scope shouldn't actually generate code to set up
79 // the frame (that is done in GeneratePrologue). 79 // the frame (that is done in GeneratePrologue).
80 FrameScope frame_scope(masm_, StackFrame::MANUAL); 80 FrameScope frame_scope(masm_, StackFrame::MANUAL);
81 81
82 dynamic_frame_alignment_ = info()->IsOptimizing() && 82 dynamic_frame_alignment_ = (chunk()->num_double_slots() > 2 &&
83 ((chunk()->num_double_slots() > 2 && 83 !chunk()->graph()->is_recursive()) ||
84 !chunk()->graph()->is_recursive()) || 84 !info()->osr_ast_id().IsNone();
85 !info()->osr_ast_id().IsNone());
86 85
87 return GeneratePrologue() && 86 return GeneratePrologue() &&
88 GenerateBody() && 87 GenerateBody() &&
89 GenerateDeferredCode() && 88 GenerateDeferredCode() &&
90 GenerateJumpTable() &&
91 GenerateSafepointTable(); 89 GenerateSafepointTable();
92 } 90 }
93 91
94 92
95 void LCodeGen::FinishCode(Handle<Code> code) { 93 void LCodeGen::FinishCode(Handle<Code> code) {
96 ASSERT(is_done()); 94 ASSERT(is_done());
97 code->set_stack_slots(GetStackSlotCount()); 95 code->set_stack_slots(GetStackSlotCount());
98 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); 96 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
99 PopulateDeoptimizationData(code); 97 PopulateDeoptimizationData(code);
100 if (!info()->IsStub()) { 98 Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
101 Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
102 }
103 } 99 }
104 100
105 101
106 void LCodeGen::Abort(const char* reason) { 102 void LCodeGen::Abort(const char* reason) {
107 info()->set_bailout_reason(reason); 103 info()->set_bailout_reason(reason);
108 status_ = ABORTED; 104 status_ = ABORTED;
109 } 105 }
110 106
111 107
112 void LCodeGen::Comment(const char* format, ...) { 108 void LCodeGen::Comment(const char* format, ...) {
(...skipping 10 matching lines...) Expand all
123 size_t length = builder.position(); 119 size_t length = builder.position();
124 Vector<char> copy = Vector<char>::New(length + 1); 120 Vector<char> copy = Vector<char>::New(length + 1);
125 memcpy(copy.start(), builder.Finalize(), copy.length()); 121 memcpy(copy.start(), builder.Finalize(), copy.length());
126 masm()->RecordComment(copy.start()); 122 masm()->RecordComment(copy.start());
127 } 123 }
128 124
129 125
130 bool LCodeGen::GeneratePrologue() { 126 bool LCodeGen::GeneratePrologue() {
131 ASSERT(is_generating()); 127 ASSERT(is_generating());
132 128
133 if (info()->IsOptimizing()) { 129 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
134 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
135 130
136 #ifdef DEBUG 131 #ifdef DEBUG
137 if (strlen(FLAG_stop_at) > 0 && 132 if (strlen(FLAG_stop_at) > 0 &&
138 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { 133 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
139 __ int3(); 134 __ int3();
140 } 135 }
141 #endif 136 #endif
142 137
143 // Strict mode functions and builtins need to replace the receiver 138 // Strict mode functions and builtins need to replace the receiver
144 // with undefined when called as functions (without an explicit 139 // with undefined when called as functions (without an explicit
145 // receiver object). ecx is zero for method calls and non-zero for 140 // receiver object). ecx is zero for method calls and non-zero for
146 // function calls. 141 // function calls.
147 if (!info_->is_classic_mode() || info_->is_native()) { 142 if (!info_->is_classic_mode() || info_->is_native()) {
148 Label ok; 143 Label ok;
149 __ test(ecx, Operand(ecx)); 144 __ test(ecx, Operand(ecx));
150 __ j(zero, &ok, Label::kNear); 145 __ j(zero, &ok, Label::kNear);
151 // +1 for return address. 146 // +1 for return address.
152 int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize; 147 int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
153 __ mov(Operand(esp, receiver_offset), 148 __ mov(Operand(esp, receiver_offset),
154 Immediate(isolate()->factory()->undefined_value())); 149 Immediate(isolate()->factory()->undefined_value()));
155 __ bind(&ok); 150 __ bind(&ok);
156 } 151 }
157 152
158 if (dynamic_frame_alignment_) {
159 // Move state of dynamic frame alignment into edx.
160 __ mov(edx, Immediate(kNoAlignmentPadding));
161 153
162 Label do_not_pad, align_loop; 154 if (dynamic_frame_alignment_) {
163 STATIC_ASSERT(kDoubleSize == 2 * kPointerSize); 155 // Move state of dynamic frame alignment into edx.
164 // Align esp + 4 to a multiple of 2 * kPointerSize. 156 __ mov(edx, Immediate(kNoAlignmentPadding));
165 __ test(esp, Immediate(kPointerSize));
166 __ j(not_zero, &do_not_pad, Label::kNear);
167 __ push(Immediate(0));
168 __ mov(ebx, esp);
169 __ mov(edx, Immediate(kAlignmentPaddingPushed));
170 // Copy arguments, receiver, and return address.
171 __ mov(ecx, Immediate(scope()->num_parameters() + 2));
172 157
173 __ bind(&align_loop); 158 Label do_not_pad, align_loop;
174 __ mov(eax, Operand(ebx, 1 * kPointerSize)); 159 STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
175 __ mov(Operand(ebx, 0), eax); 160 // Align esp + 4 to a multiple of 2 * kPointerSize.
176 __ add(Operand(ebx), Immediate(kPointerSize)); 161 __ test(esp, Immediate(kPointerSize));
177 __ dec(ecx); 162 __ j(not_zero, &do_not_pad, Label::kNear);
178 __ j(not_zero, &align_loop, Label::kNear); 163 __ push(Immediate(0));
179 __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue)); 164 __ mov(ebx, esp);
180 __ bind(&do_not_pad); 165 __ mov(edx, Immediate(kAlignmentPaddingPushed));
181 } 166 // Copy arguments, receiver, and return address.
167 __ mov(ecx, Immediate(scope()->num_parameters() + 2));
168
169 __ bind(&align_loop);
170 __ mov(eax, Operand(ebx, 1 * kPointerSize));
171 __ mov(Operand(ebx, 0), eax);
172 __ add(Operand(ebx), Immediate(kPointerSize));
173 __ dec(ecx);
174 __ j(not_zero, &align_loop, Label::kNear);
175 __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
176 __ bind(&do_not_pad);
182 } 177 }
183 178
184 info()->set_prologue_offset(masm_->pc_offset()); 179 info()->set_prologue_offset(masm_->pc_offset());
185 if (NeedsEagerFrame()) { 180 __ push(ebp); // Caller's frame pointer.
186 ASSERT(!frame_is_built_); 181 __ mov(ebp, esp);
187 frame_is_built_ = true; 182 __ push(esi); // Callee's context.
188 __ push(ebp); // Caller's frame pointer. 183 __ push(edi); // Callee's JS function.
189 __ mov(ebp, esp);
190 __ push(esi); // Callee's context.
191 if (info()->IsStub()) {
192 __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
193 } else {
194 __ push(edi); // Callee's JS function.
195 }
196 }
197 184
198 if (info()->IsOptimizing() && 185 if (dynamic_frame_alignment_ && FLAG_debug_code) {
199 dynamic_frame_alignment_ &&
200 FLAG_debug_code) {
201 __ test(esp, Immediate(kPointerSize)); 186 __ test(esp, Immediate(kPointerSize));
202 __ Assert(zero, "frame is expected to be aligned"); 187 __ Assert(zero, "frame is expected to be aligned");
203 } 188 }
204 189
205 // Reserve space for the stack slots needed by the code. 190 // Reserve space for the stack slots needed by the code.
206 int slots = GetStackSlotCount(); 191 int slots = GetStackSlotCount();
207 ASSERT(slots != 0 || !info()->IsOptimizing()); 192 ASSERT_GE(slots, 1);
208 if (slots > 0) { 193 if (slots == 1) {
209 if (slots == 1) { 194 if (dynamic_frame_alignment_) {
210 if (dynamic_frame_alignment_) { 195 __ push(edx);
211 __ push(edx); 196 } else {
212 } else { 197 __ push(Immediate(kNoAlignmentPadding));
213 __ push(Immediate(kNoAlignmentPadding)); 198 }
199 } else {
200 if (FLAG_debug_code) {
201 __ mov(Operand(eax), Immediate(slots));
202 Label loop;
203 __ bind(&loop);
204 __ push(Immediate(kSlotsZapValue));
205 __ dec(eax);
206 __ j(not_zero, &loop);
207 } else {
208 __ sub(Operand(esp), Immediate(slots * kPointerSize));
209 #ifdef _MSC_VER
210 // On windows, you may not access the stack more than one page below
211 // the most recently mapped page. To make the allocated area randomly
212 // accessible, we write to each page in turn (the value is irrelevant).
213 const int kPageSize = 4 * KB;
214 for (int offset = slots * kPointerSize - kPageSize;
215 offset > 0;
216 offset -= kPageSize) {
217 __ mov(Operand(esp, offset), eax);
214 } 218 }
219 #endif
220 }
221
222 // Store dynamic frame alignment state in the first local.
223 if (dynamic_frame_alignment_) {
224 __ mov(Operand(ebp,
225 JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
226 edx);
215 } else { 227 } else {
216 if (FLAG_debug_code) { 228 __ mov(Operand(ebp,
217 __ mov(Operand(eax), Immediate(slots)); 229 JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
218 Label loop; 230 Immediate(kNoAlignmentPadding));
219 __ bind(&loop);
220 __ push(Immediate(kSlotsZapValue));
221 __ dec(eax);
222 __ j(not_zero, &loop);
223 } else {
224 __ sub(Operand(esp), Immediate(slots * kPointerSize));
225 #ifdef _MSC_VER
226 // On windows, you may not access the stack more than one page below
227 // the most recently mapped page. To make the allocated area randomly
228 // accessible, we write to each page in turn (the value is irrelevant).
229 const int kPageSize = 4 * KB;
230 for (int offset = slots * kPointerSize - kPageSize;
231 offset > 0;
232 offset -= kPageSize) {
233 __ mov(Operand(esp, offset), eax);
234 }
235 #endif
236 }
237
238 // Store dynamic frame alignment state in the first local.
239 if (dynamic_frame_alignment_) {
240 __ mov(Operand(ebp,
241 JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
242 edx);
243 } else {
244 __ mov(Operand(ebp,
245 JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
246 Immediate(kNoAlignmentPadding));
247 }
248 } 231 }
249 } 232 }
250 233
251 // Possibly allocate a local context. 234 // Possibly allocate a local context.
252 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; 235 int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
253 if (heap_slots > 0) { 236 if (heap_slots > 0) {
254 Comment(";;; Allocate local context"); 237 Comment(";;; Allocate local context");
255 // Argument to NewContext is the function, which is still in edi. 238 // Argument to NewContext is the function, which is still in edi.
256 __ push(edi); 239 __ push(edi);
257 if (heap_slots <= FastNewContextStub::kMaximumSlots) { 240 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
258 FastNewContextStub stub(heap_slots); 241 FastNewContextStub stub(heap_slots);
259 __ CallStub(&stub); 242 __ CallStub(&stub);
260 } else { 243 } else {
261 __ CallRuntime(Runtime::kNewFunctionContext, 1); 244 __ CallRuntime(Runtime::kNewFunctionContext, 1);
262 } 245 }
(...skipping 19 matching lines...) Expand all
282 context_offset, 265 context_offset,
283 eax, 266 eax,
284 ebx, 267 ebx,
285 kDontSaveFPRegs); 268 kDontSaveFPRegs);
286 } 269 }
287 } 270 }
288 Comment(";;; End allocate local context"); 271 Comment(";;; End allocate local context");
289 } 272 }
290 273
291 // Trace the call. 274 // Trace the call.
292 if (FLAG_trace && info()->IsOptimizing()) { 275 if (FLAG_trace) {
293 // We have not executed any compiled code yet, so esi still holds the 276 // We have not executed any compiled code yet, so esi still holds the
294 // incoming context. 277 // incoming context.
295 __ CallRuntime(Runtime::kTraceEnter, 0); 278 __ CallRuntime(Runtime::kTraceEnter, 0);
296 } 279 }
297 return !is_aborted(); 280 return !is_aborted();
298 } 281 }
299 282
300 283
301 bool LCodeGen::GenerateBody() { 284 bool LCodeGen::GenerateBody() {
302 ASSERT(is_generating()); 285 ASSERT(is_generating());
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
336 } 319 }
337 } 320 }
338 instr->CompileToNative(this); 321 instr->CompileToNative(this);
339 } 322 }
340 } 323 }
341 EnsureSpaceForLazyDeopt(); 324 EnsureSpaceForLazyDeopt();
342 return !is_aborted(); 325 return !is_aborted();
343 } 326 }
344 327
345 328
346 bool LCodeGen::GenerateJumpTable() {
347 Label needs_frame_not_call;
348 Label needs_frame_is_call;
349 for (int i = 0; i < jump_table_.length(); i++) {
350 __ bind(&jump_table_[i].label);
351 Address entry = jump_table_[i].address;
352 if (jump_table_[i].needs_frame) {
353 __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
354 if (jump_table_[i].is_lazy_deopt) {
355 if (needs_frame_is_call.is_bound()) {
356 __ jmp(&needs_frame_is_call);
357 } else {
358 __ bind(&needs_frame_is_call);
359 __ push(esi);
360 // This variant of deopt can only be used with stubs. Since we don't
361 // have a function pointer to install in the stack frame that we're
362 // building, install a special marker there instead.
363 ASSERT(info()->IsStub());
364 __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
365 // Push a PC inside the function so that the deopt code can find where
366 // the deopt comes from. It doesn't have to be the precise return
367 // address of a "calling" LAZY deopt, it only has to be somewhere
368 // inside the code body.
369 Label push_approx_pc;
370 __ call(&push_approx_pc);
371 __ bind(&push_approx_pc);
372 // Push the continuation which was stashed were the ebp should
373 // be. Replace it with the saved ebp.
374 __ push(MemOperand(esp, 3 * kPointerSize));
375 __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
376 __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
377 __ ret(0); // Call the continuation without clobbering registers.
378 }
379 } else {
380 if (needs_frame_not_call.is_bound()) {
381 __ jmp(&needs_frame_not_call);
382 } else {
383 __ bind(&needs_frame_not_call);
384 __ push(esi);
385 // This variant of deopt can only be used with stubs. Since we don't
386 // have a function pointer to install in the stack frame that we're
387 // building, install a special marker there instead.
388 ASSERT(info()->IsStub());
389 __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
390 // Push the continuation which was stashed were the ebp should
391 // be. Replace it with the saved ebp.
392 __ push(MemOperand(esp, 2 * kPointerSize));
393 __ mov(MemOperand(esp, 3 * kPointerSize), ebp);
394 __ lea(ebp, MemOperand(esp, 3 * kPointerSize));
395 __ ret(0); // Call the continuation without clobbering registers.
396 }
397 }
398 } else {
399 if (jump_table_[i].is_lazy_deopt) {
400 __ call(entry, RelocInfo::RUNTIME_ENTRY);
401 } else {
402 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
403 }
404 }
405 }
406 return !is_aborted();
407 }
408
409
410 bool LCodeGen::GenerateDeferredCode() { 329 bool LCodeGen::GenerateDeferredCode() {
411 ASSERT(is_generating()); 330 ASSERT(is_generating());
412 if (deferred_.length() > 0) { 331 if (deferred_.length() > 0) {
413 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { 332 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
414 LDeferredCode* code = deferred_[i]; 333 LDeferredCode* code = deferred_[i];
415 __ bind(code->entry()); 334 __ bind(code->entry());
416 if (NeedsDeferredFrame()) {
417 Comment(";;; Deferred build frame",
418 code->instruction_index(),
419 code->instr()->Mnemonic());
420 ASSERT(!frame_is_built_);
421 ASSERT(info()->IsStub());
422 frame_is_built_ = true;
423 // Build the frame in such a way that esi isn't trashed.
424 __ push(ebp); // Caller's frame pointer.
425 __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
426 __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
427 __ lea(ebp, Operand(esp, 2 * kPointerSize));
428 }
429 Comment(";;; Deferred code @%d: %s.", 335 Comment(";;; Deferred code @%d: %s.",
430 code->instruction_index(), 336 code->instruction_index(),
431 code->instr()->Mnemonic()); 337 code->instr()->Mnemonic());
432 code->Generate(); 338 code->Generate();
433 if (NeedsDeferredFrame()) {
434 Comment(";;; Deferred destroy frame",
435 code->instruction_index(),
436 code->instr()->Mnemonic());
437 ASSERT(frame_is_built_);
438 frame_is_built_ = false;
439 __ mov(esp, ebp);
440 __ pop(ebp);
441 }
442 __ jmp(code->exit()); 339 __ jmp(code->exit());
443 } 340 }
444 } 341 }
445 342
446 // Deferred code is the last part of the instruction sequence. Mark 343 // Deferred code is the last part of the instruction sequence. Mark
447 // the generated code as done unless we bailed out. 344 // the generated code as done unless we bailed out.
448 if (!is_aborted()) status_ = DONE; 345 if (!is_aborted()) status_ = DONE;
449 return !is_aborted(); 346 return !is_aborted();
450 } 347 }
451 348
452 349
453 bool LCodeGen::GenerateSafepointTable() { 350 bool LCodeGen::GenerateSafepointTable() {
454 ASSERT(is_done()); 351 ASSERT(is_done());
455 if (!info()->IsStub()) {
456 // For lazy deoptimization we need space to patch a call after every call.
457 // Ensure there is always space for such patching, even if the code ends
458 // in a call.
459 int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
460 while (masm()->pc_offset() < target_offset) {
461 masm()->nop();
462 }
463 }
464 safepoints_.Emit(masm(), GetStackSlotCount()); 352 safepoints_.Emit(masm(), GetStackSlotCount());
465 return !is_aborted(); 353 return !is_aborted();
466 } 354 }
467 355
468 356
469 Register LCodeGen::ToRegister(int index) const { 357 Register LCodeGen::ToRegister(int index) const {
470 return Register::FromAllocationIndex(index); 358 return Register::FromAllocationIndex(index);
471 } 359 }
472 360
473 361
474 XMMRegister LCodeGen::ToDoubleRegister(int index) const { 362 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
475 return XMMRegister::FromAllocationIndex(index); 363 return XMMRegister::FromAllocationIndex(index);
476 } 364 }
477 365
478 366
479 bool LCodeGen::IsX87TopOfStack(LOperand* op) const {
480 return op->IsDoubleRegister();
481 }
482
483
484 Register LCodeGen::ToRegister(LOperand* op) const { 367 Register LCodeGen::ToRegister(LOperand* op) const {
485 ASSERT(op->IsRegister()); 368 ASSERT(op->IsRegister());
486 return ToRegister(op->index()); 369 return ToRegister(op->index());
487 } 370 }
488 371
489 372
490 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { 373 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
491 ASSERT(op->IsDoubleRegister()); 374 ASSERT(op->IsDoubleRegister());
492 return ToDoubleRegister(op->index()); 375 return ToDoubleRegister(op->index());
493 } 376 }
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
559 // arguments index points to the first element of a sequence of tagged 442 // arguments index points to the first element of a sequence of tagged
560 // values on the stack that represent the arguments. This needs to be 443 // values on the stack that represent the arguments. This needs to be
561 // kept in sync with the LArgumentsElements implementation. 444 // kept in sync with the LArgumentsElements implementation.
562 *arguments_index = -environment->parameter_count(); 445 *arguments_index = -environment->parameter_count();
563 *arguments_count = environment->parameter_count(); 446 *arguments_count = environment->parameter_count();
564 447
565 WriteTranslation(environment->outer(), 448 WriteTranslation(environment->outer(),
566 translation, 449 translation,
567 arguments_index, 450 arguments_index,
568 arguments_count); 451 arguments_count);
569 bool has_closure_id = !info()->closure().is_null() && 452 int closure_id = *info()->closure() != *environment->closure()
570 *info()->closure() != *environment->closure();
571 int closure_id = has_closure_id
572 ? DefineDeoptimizationLiteral(environment->closure()) 453 ? DefineDeoptimizationLiteral(environment->closure())
573 : Translation::kSelfLiteralId; 454 : Translation::kSelfLiteralId;
574 switch (environment->frame_type()) { 455 switch (environment->frame_type()) {
575 case JS_FUNCTION: 456 case JS_FUNCTION:
576 translation->BeginJSFrame(environment->ast_id(), closure_id, height); 457 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
577 break; 458 break;
578 case JS_CONSTRUCT: 459 case JS_CONSTRUCT:
579 translation->BeginConstructStubFrame(closure_id, translation_size); 460 translation->BeginConstructStubFrame(closure_id, translation_size);
580 break; 461 break;
581 case JS_GETTER: 462 case JS_GETTER:
582 ASSERT(translation_size == 1); 463 ASSERT(translation_size == 1);
583 ASSERT(height == 0); 464 ASSERT(height == 0);
584 translation->BeginGetterStubFrame(closure_id); 465 translation->BeginGetterStubFrame(closure_id);
585 break; 466 break;
586 case JS_SETTER: 467 case JS_SETTER:
587 ASSERT(translation_size == 2); 468 ASSERT(translation_size == 2);
588 ASSERT(height == 0); 469 ASSERT(height == 0);
589 translation->BeginSetterStubFrame(closure_id); 470 translation->BeginSetterStubFrame(closure_id);
590 break; 471 break;
591 case ARGUMENTS_ADAPTOR: 472 case ARGUMENTS_ADAPTOR:
592 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); 473 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
593 break; 474 break;
594 case STUB:
595 translation->BeginCompiledStubFrame();
596 break;
597 default:
598 UNREACHABLE();
599 } 475 }
600 476
601 // Inlined frames which push their arguments cause the index to be 477 // Inlined frames which push their arguments cause the index to be
602 // bumped and another stack area to be used for materialization. 478 // bumped and another stack area to be used for materialization.
603 if (environment->entry() != NULL && 479 if (environment->entry() != NULL &&
604 environment->entry()->arguments_pushed()) { 480 environment->entry()->arguments_pushed()) {
605 *arguments_index = *arguments_index < 0 481 *arguments_index = *arguments_index < 0
606 ? GetStackSlotCount() 482 ? GetStackSlotCount()
607 : *arguments_index + *arguments_count; 483 : *arguments_index + *arguments_count;
608 *arguments_count = environment->entry()->arguments_count() + 1; 484 *arguments_count = environment->entry()->arguments_count() + 1;
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after
723 int argc, 599 int argc,
724 LInstruction* instr) { 600 LInstruction* instr) {
725 ASSERT(instr != NULL); 601 ASSERT(instr != NULL);
726 ASSERT(instr->HasPointerMap()); 602 ASSERT(instr->HasPointerMap());
727 LPointerMap* pointers = instr->pointer_map(); 603 LPointerMap* pointers = instr->pointer_map();
728 RecordPosition(pointers->position()); 604 RecordPosition(pointers->position());
729 605
730 __ CallRuntime(fun, argc); 606 __ CallRuntime(fun, argc);
731 607
732 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 608 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
733
734 ASSERT(info()->is_calling());
735 } 609 }
736 610
737 611
738 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, 612 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
739 int argc, 613 int argc,
740 LInstruction* instr, 614 LInstruction* instr,
741 LOperand* context) { 615 LOperand* context) {
742 if (context->IsRegister()) { 616 if (context->IsRegister()) {
743 if (!ToRegister(context).is(esi)) { 617 if (!ToRegister(context).is(esi)) {
744 __ mov(esi, ToRegister(context)); 618 __ mov(esi, ToRegister(context));
745 } 619 }
746 } else if (context->IsStackSlot()) { 620 } else if (context->IsStackSlot()) {
747 __ mov(esi, ToOperand(context)); 621 __ mov(esi, ToOperand(context));
748 } else if (context->IsConstantOperand()) { 622 } else if (context->IsConstantOperand()) {
749 HConstant* constant = 623 HConstant* constant =
750 chunk_->LookupConstant(LConstantOperand::cast(context)); 624 chunk_->LookupConstant(LConstantOperand::cast(context));
751 __ LoadHeapObject(esi, Handle<Context>::cast(constant->handle())); 625 __ LoadHeapObject(esi, Handle<Context>::cast(constant->handle()));
752 } else { 626 } else {
753 UNREACHABLE(); 627 UNREACHABLE();
754 } 628 }
755 629
756 __ CallRuntimeSaveDoubles(id); 630 __ CallRuntimeSaveDoubles(id);
757 RecordSafepointWithRegisters( 631 RecordSafepointWithRegisters(
758 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); 632 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
759
760 ASSERT(info()->is_calling());
761 } 633 }
762 634
763 635
764 void LCodeGen::RegisterEnvironmentForDeoptimization( 636 void LCodeGen::RegisterEnvironmentForDeoptimization(
765 LEnvironment* environment, Safepoint::DeoptMode mode) { 637 LEnvironment* environment, Safepoint::DeoptMode mode) {
766 if (!environment->HasBeenRegistered()) { 638 if (!environment->HasBeenRegistered()) {
767 // Physical stack frame layout: 639 // Physical stack frame layout:
768 // -x ............. -4 0 ..................................... y 640 // -x ............. -4 0 ..................................... y
769 // [incoming arguments] [spill slots] [pushed outgoing arguments] 641 // [incoming arguments] [spill slots] [pushed outgoing arguments]
770 642
(...skipping 25 matching lines...) Expand all
796 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); 668 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
797 deoptimizations_.Add(environment, zone()); 669 deoptimizations_.Add(environment, zone());
798 } 670 }
799 } 671 }
800 672
801 673
802 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { 674 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
803 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 675 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
804 ASSERT(environment->HasBeenRegistered()); 676 ASSERT(environment->HasBeenRegistered());
805 int id = environment->deoptimization_index(); 677 int id = environment->deoptimization_index();
806 ASSERT(info()->IsOptimizing() || info()->IsStub()); 678 Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
807 Deoptimizer::BailoutType bailout_type = frame_is_built_
808 ? Deoptimizer::EAGER
809 : Deoptimizer::LAZY;
810 Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type);
811 if (entry == NULL) { 679 if (entry == NULL) {
812 Abort("bailout was not prepared"); 680 Abort("bailout was not prepared");
813 return; 681 return;
814 } 682 }
815 683
816 if (FLAG_deopt_every_n_times != 0) { 684 if (FLAG_deopt_every_n_times != 0) {
817 Handle<SharedFunctionInfo> shared(info_->shared_info()); 685 Handle<SharedFunctionInfo> shared(info_->shared_info());
818 Label no_deopt; 686 Label no_deopt;
819 __ pushfd(); 687 __ pushfd();
820 __ push(eax); 688 __ push(eax);
(...skipping 13 matching lines...) Expand all
834 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); 702 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
835 703
836 __ bind(&no_deopt); 704 __ bind(&no_deopt);
837 __ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset), 705 __ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset),
838 eax); 706 eax);
839 __ pop(ebx); 707 __ pop(ebx);
840 __ pop(eax); 708 __ pop(eax);
841 __ popfd(); 709 __ popfd();
842 } 710 }
843 711
844 ASSERT(info()->IsStub() || frame_is_built_);
845 bool lazy_deopt_needed = info()->IsStub();
846 if (cc == no_condition) { 712 if (cc == no_condition) {
847 if (FLAG_trap_on_deopt) __ int3(); 713 if (FLAG_trap_on_deopt) __ int3();
848 if (lazy_deopt_needed) { 714 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
849 __ call(entry, RelocInfo::RUNTIME_ENTRY);
850 } else {
851 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
852 }
853 } else { 715 } else {
854 Label done;
855 if (FLAG_trap_on_deopt) { 716 if (FLAG_trap_on_deopt) {
717 Label done;
856 __ j(NegateCondition(cc), &done, Label::kNear); 718 __ j(NegateCondition(cc), &done, Label::kNear);
857 __ int3(); 719 __ int3();
720 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
721 __ bind(&done);
722 } else {
723 __ j(cc, entry, RelocInfo::RUNTIME_ENTRY);
858 } 724 }
859 if (!lazy_deopt_needed && frame_is_built_) {
860 if (FLAG_trap_on_deopt) {
861 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
862 } else {
863 __ j(cc, entry, RelocInfo::RUNTIME_ENTRY);
864 }
865 } else {
866 // We often have several deopts to the same entry, reuse the last
867 // jump entry if this is the case.
868 if (jump_table_.is_empty() ||
869 jump_table_.last().address != entry ||
870 jump_table_.last().needs_frame != !frame_is_built_ ||
871 jump_table_.last().is_lazy_deopt != lazy_deopt_needed) {
872 JumpTableEntry table_entry(entry, !frame_is_built_, lazy_deopt_needed);
873 jump_table_.Add(table_entry, zone());
874 }
875 if (FLAG_trap_on_deopt) {
876 __ jmp(&jump_table_.last().label);
877 } else {
878 __ j(cc, &jump_table_.last().label);
879 }
880 }
881 __ bind(&done);
882 } 725 }
883 } 726 }
884 727
885 728
886 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { 729 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
887 int length = deoptimizations_.length(); 730 int length = deoptimizations_.length();
888 if (length == 0) return; 731 if (length == 0) return;
889 Handle<DeoptimizationInputData> data = 732 Handle<DeoptimizationInputData> data =
890 factory()->NewDeoptimizationInputData(length, TENURED); 733 factory()->NewDeoptimizationInputData(length, TENURED);
891 734
(...skipping 680 matching lines...) Expand 10 before | Expand all | Expand 10 after
1572 // Use xor to produce +0.0 in a fast and compact way, but avoid to 1415 // Use xor to produce +0.0 in a fast and compact way, but avoid to
1573 // do so if the constant is -0.0. 1416 // do so if the constant is -0.0.
1574 if (BitCast<uint64_t, double>(v) == 0) { 1417 if (BitCast<uint64_t, double>(v) == 0) {
1575 __ xorps(res, res); 1418 __ xorps(res, res);
1576 } else { 1419 } else {
1577 Register temp = ToRegister(instr->temp()); 1420 Register temp = ToRegister(instr->temp());
1578 uint64_t int_val = BitCast<uint64_t, double>(v); 1421 uint64_t int_val = BitCast<uint64_t, double>(v);
1579 int32_t lower = static_cast<int32_t>(int_val); 1422 int32_t lower = static_cast<int32_t>(int_val);
1580 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); 1423 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
1581 if (CpuFeatures::IsSupported(SSE4_1)) { 1424 if (CpuFeatures::IsSupported(SSE4_1)) {
1582 CpuFeatures::Scope scope1(SSE2); 1425 CpuFeatures::Scope scope(SSE4_1);
1583 CpuFeatures::Scope scope2(SSE4_1);
1584 if (lower != 0) { 1426 if (lower != 0) {
1585 __ Set(temp, Immediate(lower)); 1427 __ Set(temp, Immediate(lower));
1586 __ movd(res, Operand(temp)); 1428 __ movd(res, Operand(temp));
1587 __ Set(temp, Immediate(upper)); 1429 __ Set(temp, Immediate(upper));
1588 __ pinsrd(res, Operand(temp), 1); 1430 __ pinsrd(res, Operand(temp), 1);
1589 } else { 1431 } else {
1590 __ xorps(res, res); 1432 __ xorps(res, res);
1591 __ Set(temp, Immediate(upper)); 1433 __ Set(temp, Immediate(upper));
1592 __ pinsrd(res, Operand(temp), 1); 1434 __ pinsrd(res, Operand(temp), 1);
1593 } 1435 }
1594 } else { 1436 } else {
1595 CpuFeatures::Scope scope(SSE2);
1596 __ Set(temp, Immediate(upper)); 1437 __ Set(temp, Immediate(upper));
1597 __ movd(res, Operand(temp)); 1438 __ movd(res, Operand(temp));
1598 __ psllq(res, 32); 1439 __ psllq(res, 32);
1599 if (lower != 0) { 1440 if (lower != 0) {
1600 __ Set(temp, Immediate(lower)); 1441 __ Set(temp, Immediate(lower));
1601 __ movd(xmm0, Operand(temp)); 1442 __ movd(xmm0, Operand(temp));
1602 __ por(res, xmm0); 1443 __ por(res, xmm0);
1603 } 1444 }
1604 } 1445 }
1605 } 1446 }
(...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after
1748 __ add(ToRegister(left), ToOperand(right)); 1589 __ add(ToRegister(left), ToOperand(right));
1749 } 1590 }
1750 1591
1751 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 1592 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1752 DeoptimizeIf(overflow, instr->environment()); 1593 DeoptimizeIf(overflow, instr->environment());
1753 } 1594 }
1754 } 1595 }
1755 1596
1756 1597
1757 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { 1598 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1758 CpuFeatures::Scope scope(SSE2);
1759 LOperand* left = instr->left(); 1599 LOperand* left = instr->left();
1760 LOperand* right = instr->right(); 1600 LOperand* right = instr->right();
1761 ASSERT(left->Equals(instr->result())); 1601 ASSERT(left->Equals(instr->result()));
1762 HMathMinMax::Operation operation = instr->hydrogen()->operation(); 1602 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1763 if (instr->hydrogen()->representation().IsInteger32()) { 1603 if (instr->hydrogen()->representation().IsInteger32()) {
1764 Label return_left; 1604 Label return_left;
1765 Condition condition = (operation == HMathMinMax::kMathMin) 1605 Condition condition = (operation == HMathMinMax::kMathMin)
1766 ? less_equal 1606 ? less_equal
1767 : greater_equal; 1607 : greater_equal;
1768 if (right->IsConstantOperand()) { 1608 if (right->IsConstantOperand()) {
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
1810 __ j(parity_even, &return_left, Label::kNear); // left == NaN. 1650 __ j(parity_even, &return_left, Label::kNear); // left == NaN.
1811 __ bind(&return_right); 1651 __ bind(&return_right);
1812 __ movsd(left_reg, right_reg); 1652 __ movsd(left_reg, right_reg);
1813 1653
1814 __ bind(&return_left); 1654 __ bind(&return_left);
1815 } 1655 }
1816 } 1656 }
1817 1657
1818 1658
1819 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { 1659 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1820 CpuFeatures::Scope scope(SSE2);
1821 XMMRegister left = ToDoubleRegister(instr->left()); 1660 XMMRegister left = ToDoubleRegister(instr->left());
1822 XMMRegister right = ToDoubleRegister(instr->right()); 1661 XMMRegister right = ToDoubleRegister(instr->right());
1823 XMMRegister result = ToDoubleRegister(instr->result()); 1662 XMMRegister result = ToDoubleRegister(instr->result());
1824 // Modulo uses a fixed result register. 1663 // Modulo uses a fixed result register.
1825 ASSERT(instr->op() == Token::MOD || left.is(result)); 1664 ASSERT(instr->op() == Token::MOD || left.is(result));
1826 switch (instr->op()) { 1665 switch (instr->op()) {
1827 case Token::ADD: 1666 case Token::ADD:
1828 __ addsd(left, right); 1667 __ addsd(left, right);
1829 break; 1668 break;
1830 case Token::SUB: 1669 case Token::SUB:
1831 __ subsd(left, right); 1670 __ subsd(left, right);
1832 break; 1671 break;
1833 case Token::MUL: 1672 case Token::MUL:
1834 __ mulsd(left, right); 1673 __ mulsd(left, right);
1835 break; 1674 break;
1836 case Token::DIV: 1675 case Token::DIV:
1837 __ divsd(left, right); 1676 __ divsd(left, right);
1838 break; 1677 break;
1839 case Token::MOD: { 1678 case Token::MOD: {
1840 // Pass two doubles as arguments on the stack. 1679 // Pass two doubles as arguments on the stack.
1841 __ PrepareCallCFunction(4, eax); 1680 __ PrepareCallCFunction(4, eax);
1842 __ movdbl(Operand(esp, 0 * kDoubleSize), left); 1681 __ movdbl(Operand(esp, 0 * kDoubleSize), left);
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
1895 } else { 1734 } else {
1896 __ j(cc, chunk_->GetAssemblyLabel(left_block)); 1735 __ j(cc, chunk_->GetAssemblyLabel(left_block));
1897 __ jmp(chunk_->GetAssemblyLabel(right_block)); 1736 __ jmp(chunk_->GetAssemblyLabel(right_block));
1898 } 1737 }
1899 } 1738 }
1900 1739
1901 1740
1902 void LCodeGen::DoBranch(LBranch* instr) { 1741 void LCodeGen::DoBranch(LBranch* instr) {
1903 int true_block = chunk_->LookupDestination(instr->true_block_id()); 1742 int true_block = chunk_->LookupDestination(instr->true_block_id());
1904 int false_block = chunk_->LookupDestination(instr->false_block_id()); 1743 int false_block = chunk_->LookupDestination(instr->false_block_id());
1905 CpuFeatures::Scope scope(SSE2);
1906 1744
1907 Representation r = instr->hydrogen()->value()->representation(); 1745 Representation r = instr->hydrogen()->value()->representation();
1908 if (r.IsInteger32()) { 1746 if (r.IsInteger32()) {
1909 Register reg = ToRegister(instr->value()); 1747 Register reg = ToRegister(instr->value());
1910 __ test(reg, Operand(reg)); 1748 __ test(reg, Operand(reg));
1911 EmitBranch(true_block, false_block, not_zero); 1749 EmitBranch(true_block, false_block, not_zero);
1912 } else if (r.IsDouble()) { 1750 } else if (r.IsDouble()) {
1913 XMMRegister reg = ToDoubleRegister(instr->value()); 1751 XMMRegister reg = ToDoubleRegister(instr->value());
1914 __ xorps(xmm0, xmm0); 1752 __ xorps(xmm0, xmm0);
1915 __ ucomisd(reg, xmm0); 1753 __ ucomisd(reg, xmm0);
(...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after
2055 return cond; 1893 return cond;
2056 } 1894 }
2057 1895
2058 1896
2059 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { 1897 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
2060 LOperand* left = instr->left(); 1898 LOperand* left = instr->left();
2061 LOperand* right = instr->right(); 1899 LOperand* right = instr->right();
2062 int false_block = chunk_->LookupDestination(instr->false_block_id()); 1900 int false_block = chunk_->LookupDestination(instr->false_block_id());
2063 int true_block = chunk_->LookupDestination(instr->true_block_id()); 1901 int true_block = chunk_->LookupDestination(instr->true_block_id());
2064 Condition cc = TokenToCondition(instr->op(), instr->is_double()); 1902 Condition cc = TokenToCondition(instr->op(), instr->is_double());
2065 CpuFeatures::Scope scope(SSE2);
2066 1903
2067 if (left->IsConstantOperand() && right->IsConstantOperand()) { 1904 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2068 // We can statically evaluate the comparison. 1905 // We can statically evaluate the comparison.
2069 double left_val = ToDouble(LConstantOperand::cast(left)); 1906 double left_val = ToDouble(LConstantOperand::cast(left));
2070 double right_val = ToDouble(LConstantOperand::cast(right)); 1907 double right_val = ToDouble(LConstantOperand::cast(right));
2071 int next_block = 1908 int next_block =
2072 EvalComparison(instr->op(), left_val, right_val) ? true_block 1909 EvalComparison(instr->op(), left_val, right_val) ? true_block
2073 : false_block; 1910 : false_block;
2074 EmitGoto(next_block); 1911 EmitGoto(next_block);
2075 } else { 1912 } else {
(...skipping 489 matching lines...) Expand 10 before | Expand all | Expand 10 after
2565 __ j(condition, &true_value, Label::kNear); 2402 __ j(condition, &true_value, Label::kNear);
2566 __ mov(ToRegister(instr->result()), factory()->false_value()); 2403 __ mov(ToRegister(instr->result()), factory()->false_value());
2567 __ jmp(&done, Label::kNear); 2404 __ jmp(&done, Label::kNear);
2568 __ bind(&true_value); 2405 __ bind(&true_value);
2569 __ mov(ToRegister(instr->result()), factory()->true_value()); 2406 __ mov(ToRegister(instr->result()), factory()->true_value());
2570 __ bind(&done); 2407 __ bind(&done);
2571 } 2408 }
2572 2409
2573 2410
2574 void LCodeGen::DoReturn(LReturn* instr) { 2411 void LCodeGen::DoReturn(LReturn* instr) {
2575 if (FLAG_trace && info()->IsOptimizing()) { 2412 if (FLAG_trace) {
2576 // Preserve the return value on the stack and rely on the runtime call 2413 // Preserve the return value on the stack and rely on the runtime call
2577 // to return the value in the same register. We're leaving the code 2414 // to return the value in the same register. We're leaving the code
2578 // managed by the register allocator and tearing down the frame, it's 2415 // managed by the register allocator and tearing down the frame, it's
2579 // safe to write to the context register. 2416 // safe to write to the context register.
2580 __ push(eax); 2417 __ push(eax);
2581 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 2418 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2582 __ CallRuntime(Runtime::kTraceExit, 1); 2419 __ CallRuntime(Runtime::kTraceExit, 1);
2583 } 2420 }
2584 if (dynamic_frame_alignment_) { 2421 if (dynamic_frame_alignment_) {
2585 // Fetch the state of the dynamic frame alignment. 2422 // Fetch the state of the dynamic frame alignment.
2586 __ mov(edx, Operand(ebp, 2423 __ mov(edx, Operand(ebp,
2587 JavaScriptFrameConstants::kDynamicAlignmentStateOffset)); 2424 JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
2588 } 2425 }
2589 if (NeedsEagerFrame()) { 2426 __ mov(esp, ebp);
2590 __ mov(esp, ebp); 2427 __ pop(ebp);
2591 __ pop(ebp);
2592 }
2593 if (dynamic_frame_alignment_) { 2428 if (dynamic_frame_alignment_) {
2594 Label no_padding; 2429 Label no_padding;
2595 __ cmp(edx, Immediate(kNoAlignmentPadding)); 2430 __ cmp(edx, Immediate(kNoAlignmentPadding));
2596 __ j(equal, &no_padding); 2431 __ j(equal, &no_padding);
2597 if (FLAG_debug_code) { 2432 if (FLAG_debug_code) {
2598 __ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize), 2433 __ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize),
2599 Immediate(kAlignmentZapValue)); 2434 Immediate(kAlignmentZapValue));
2600 __ Assert(equal, "expected alignment marker"); 2435 __ Assert(equal, "expected alignment marker");
2601 } 2436 }
2602 __ Ret((GetParameterCount() + 2) * kPointerSize, ecx); 2437 __ Ret((GetParameterCount() + 2) * kPointerSize, ecx);
2603 __ bind(&no_padding); 2438 __ bind(&no_padding);
2604 } 2439 }
2605 if (info()->IsStub()) { 2440 __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
2606 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2607 __ Ret();
2608 } else {
2609 __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
2610 }
2611 } 2441 }
2612 2442
2613 2443
2614 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { 2444 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2615 Register result = ToRegister(instr->result()); 2445 Register result = ToRegister(instr->result());
2616 __ mov(result, Operand::Cell(instr->hydrogen()->cell())); 2446 __ mov(result, Operand::Cell(instr->hydrogen()->cell()));
2617 if (instr->hydrogen()->RequiresHoleCheck()) { 2447 if (instr->hydrogen()->RequiresHoleCheck()) {
2618 __ cmp(result, factory()->the_hole_value()); 2448 __ cmp(result, factory()->the_hole_value());
2619 DeoptimizeIf(equal, instr->environment()); 2449 DeoptimizeIf(equal, instr->environment());
2620 } 2450 }
(...skipping 355 matching lines...) Expand 10 before | Expand all | Expand 10 after
2976 __ SmiUntag(ToRegister(key)); 2806 __ SmiUntag(ToRegister(key));
2977 } 2807 }
2978 Operand operand(BuildFastArrayOperand( 2808 Operand operand(BuildFastArrayOperand(
2979 instr->elements(), 2809 instr->elements(),
2980 key, 2810 key,
2981 instr->hydrogen()->key()->representation(), 2811 instr->hydrogen()->key()->representation(),
2982 elements_kind, 2812 elements_kind,
2983 0, 2813 0,
2984 instr->additional_index())); 2814 instr->additional_index()));
2985 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { 2815 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
2986 if (CpuFeatures::IsSupported(SSE2)) { 2816 XMMRegister result(ToDoubleRegister(instr->result()));
2987 CpuFeatures::Scope scope(SSE2); 2817 __ movss(result, operand);
2988 XMMRegister result(ToDoubleRegister(instr->result())); 2818 __ cvtss2sd(result, result);
2989 __ movss(result, operand);
2990 __ cvtss2sd(result, result);
2991 } else {
2992 __ fld_s(operand);
2993 HandleX87FPReturnValue(instr);
2994 }
2995 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { 2819 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
2996 if (CpuFeatures::IsSupported(SSE2)) { 2820 __ movdbl(ToDoubleRegister(instr->result()), operand);
2997 CpuFeatures::Scope scope(SSE2);
2998 __ movdbl(ToDoubleRegister(instr->result()), operand);
2999 } else {
3000 __ fld_d(operand);
3001 HandleX87FPReturnValue(instr);
3002 }
3003 } else { 2821 } else {
3004 Register result(ToRegister(instr->result())); 2822 Register result(ToRegister(instr->result()));
3005 switch (elements_kind) { 2823 switch (elements_kind) {
3006 case EXTERNAL_BYTE_ELEMENTS: 2824 case EXTERNAL_BYTE_ELEMENTS:
3007 __ movsx_b(result, operand); 2825 __ movsx_b(result, operand);
3008 break; 2826 break;
3009 case EXTERNAL_PIXEL_ELEMENTS: 2827 case EXTERNAL_PIXEL_ELEMENTS:
3010 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: 2828 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3011 __ movzx_b(result, operand); 2829 __ movzx_b(result, operand);
3012 break; 2830 break;
(...skipping 23 matching lines...) Expand all
3036 case FAST_HOLEY_DOUBLE_ELEMENTS: 2854 case FAST_HOLEY_DOUBLE_ELEMENTS:
3037 case DICTIONARY_ELEMENTS: 2855 case DICTIONARY_ELEMENTS:
3038 case NON_STRICT_ARGUMENTS_ELEMENTS: 2856 case NON_STRICT_ARGUMENTS_ELEMENTS:
3039 UNREACHABLE(); 2857 UNREACHABLE();
3040 break; 2858 break;
3041 } 2859 }
3042 } 2860 }
3043 } 2861 }
3044 2862
3045 2863
3046 void LCodeGen::HandleX87FPReturnValue(LInstruction* instr) {
3047 if (IsX87TopOfStack(instr->result())) {
3048 // Return value is already on stack. If the value has no uses, then
3049 // pop it off the FP stack. Otherwise, make sure that there are enough
3050 // copies of the value on the stack to feed all of the usages, e.g.
3051 // when the following instruction uses the return value in multiple
3052 // inputs.
3053 int count = instr->hydrogen_value()->UseCount();
3054 if (count == 0) {
3055 __ fstp(0);
3056 } else {
3057 count--;
3058 ASSERT(count <= 7);
3059 while (count-- > 0) {
3060 __ fld(0);
3061 }
3062 }
3063 } else {
3064 __ fstp_d(ToOperand(instr->result()));
3065 }
3066 }
3067
3068
3069 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { 2864 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
2865 XMMRegister result = ToDoubleRegister(instr->result());
2866
3070 if (instr->hydrogen()->RequiresHoleCheck()) { 2867 if (instr->hydrogen()->RequiresHoleCheck()) {
3071 int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + 2868 int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
3072 sizeof(kHoleNanLower32); 2869 sizeof(kHoleNanLower32);
3073 Operand hole_check_operand = BuildFastArrayOperand( 2870 Operand hole_check_operand = BuildFastArrayOperand(
3074 instr->elements(), instr->key(), 2871 instr->elements(), instr->key(),
3075 instr->hydrogen()->key()->representation(), 2872 instr->hydrogen()->key()->representation(),
3076 FAST_DOUBLE_ELEMENTS, 2873 FAST_DOUBLE_ELEMENTS,
3077 offset, 2874 offset,
3078 instr->additional_index()); 2875 instr->additional_index());
3079 __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); 2876 __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
3080 DeoptimizeIf(equal, instr->environment()); 2877 DeoptimizeIf(equal, instr->environment());
3081 } 2878 }
3082 2879
3083 Operand double_load_operand = BuildFastArrayOperand( 2880 Operand double_load_operand = BuildFastArrayOperand(
3084 instr->elements(), 2881 instr->elements(),
3085 instr->key(), 2882 instr->key(),
3086 instr->hydrogen()->key()->representation(), 2883 instr->hydrogen()->key()->representation(),
3087 FAST_DOUBLE_ELEMENTS, 2884 FAST_DOUBLE_ELEMENTS,
3088 FixedDoubleArray::kHeaderSize - kHeapObjectTag, 2885 FixedDoubleArray::kHeaderSize - kHeapObjectTag,
3089 instr->additional_index()); 2886 instr->additional_index());
3090 if (CpuFeatures::IsSupported(SSE2)) { 2887 __ movdbl(result, double_load_operand);
3091 CpuFeatures::Scope scope(SSE2);
3092 XMMRegister result = ToDoubleRegister(instr->result());
3093 __ movdbl(result, double_load_operand);
3094 } else {
3095 __ fld_d(double_load_operand);
3096 HandleX87FPReturnValue(instr);
3097 }
3098 } 2888 }
3099 2889
3100 2890
3101 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { 2891 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3102 Register result = ToRegister(instr->result()); 2892 Register result = ToRegister(instr->result());
3103 2893
3104 // Load the result. 2894 // Load the result.
3105 __ mov(result, 2895 __ mov(result,
3106 BuildFastArrayOperand(instr->elements(), 2896 BuildFastArrayOperand(instr->elements(),
3107 instr->key(), 2897 instr->key(),
(...skipping 395 matching lines...) Expand 10 before | Expand all | Expand 10 after
3503 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); 3293 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3504 } 3294 }
3505 virtual LInstruction* instr() { return instr_; } 3295 virtual LInstruction* instr() { return instr_; }
3506 private: 3296 private:
3507 LUnaryMathOperation* instr_; 3297 LUnaryMathOperation* instr_;
3508 }; 3298 };
3509 3299
3510 ASSERT(instr->value()->Equals(instr->result())); 3300 ASSERT(instr->value()->Equals(instr->result()));
3511 Representation r = instr->hydrogen()->value()->representation(); 3301 Representation r = instr->hydrogen()->value()->representation();
3512 3302
3513 CpuFeatures::Scope scope(SSE2);
3514 if (r.IsDouble()) { 3303 if (r.IsDouble()) {
3515 XMMRegister scratch = xmm0; 3304 XMMRegister scratch = xmm0;
3516 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3305 XMMRegister input_reg = ToDoubleRegister(instr->value());
3517 __ xorps(scratch, scratch); 3306 __ xorps(scratch, scratch);
3518 __ subsd(scratch, input_reg); 3307 __ subsd(scratch, input_reg);
3519 __ pand(input_reg, scratch); 3308 __ pand(input_reg, scratch);
3520 } else if (r.IsInteger32()) { 3309 } else if (r.IsInteger32()) {
3521 EmitIntegerMathAbs(instr); 3310 EmitIntegerMathAbs(instr);
3522 } else { // Tagged case. 3311 } else { // Tagged case.
3523 DeferredMathAbsTaggedHeapNumber* deferred = 3312 DeferredMathAbsTaggedHeapNumber* deferred =
3524 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); 3313 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3525 Register input_reg = ToRegister(instr->value()); 3314 Register input_reg = ToRegister(instr->value());
3526 // Smi check. 3315 // Smi check.
3527 __ JumpIfNotSmi(input_reg, deferred->entry()); 3316 __ JumpIfNotSmi(input_reg, deferred->entry());
3528 EmitIntegerMathAbs(instr); 3317 EmitIntegerMathAbs(instr);
3529 __ bind(deferred->exit()); 3318 __ bind(deferred->exit());
3530 } 3319 }
3531 } 3320 }
3532 3321
3533 3322
3534 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { 3323 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
3535 CpuFeatures::Scope scope(SSE2);
3536 XMMRegister xmm_scratch = xmm0; 3324 XMMRegister xmm_scratch = xmm0;
3537 Register output_reg = ToRegister(instr->result()); 3325 Register output_reg = ToRegister(instr->result());
3538 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3326 XMMRegister input_reg = ToDoubleRegister(instr->value());
3539 3327
3540 if (CpuFeatures::IsSupported(SSE4_1)) { 3328 if (CpuFeatures::IsSupported(SSE4_1)) {
3541 CpuFeatures::Scope scope(SSE4_1); 3329 CpuFeatures::Scope scope(SSE4_1);
3542 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3330 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3543 // Deoptimize on negative zero. 3331 // Deoptimize on negative zero.
3544 Label non_zero; 3332 Label non_zero;
3545 __ xorps(xmm_scratch, xmm_scratch); // Zero the register. 3333 __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
3590 __ ucomisd(input_reg, xmm_scratch); 3378 __ ucomisd(input_reg, xmm_scratch);
3591 __ j(equal, &done, Label::kNear); 3379 __ j(equal, &done, Label::kNear);
3592 __ sub(output_reg, Immediate(1)); 3380 __ sub(output_reg, Immediate(1));
3593 DeoptimizeIf(overflow, instr->environment()); 3381 DeoptimizeIf(overflow, instr->environment());
3594 3382
3595 __ bind(&done); 3383 __ bind(&done);
3596 } 3384 }
3597 } 3385 }
3598 3386
3599 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { 3387 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3600 CpuFeatures::Scope scope(SSE2);
3601 XMMRegister xmm_scratch = xmm0; 3388 XMMRegister xmm_scratch = xmm0;
3602 Register output_reg = ToRegister(instr->result()); 3389 Register output_reg = ToRegister(instr->result());
3603 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3390 XMMRegister input_reg = ToDoubleRegister(instr->value());
3604 3391
3605 Label below_half, done; 3392 Label below_half, done;
3606 // xmm_scratch = 0.5 3393 // xmm_scratch = 0.5
3607 ExternalReference one_half = ExternalReference::address_of_one_half(); 3394 ExternalReference one_half = ExternalReference::address_of_one_half();
3608 __ movdbl(xmm_scratch, Operand::StaticVariable(one_half)); 3395 __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
3609 __ ucomisd(xmm_scratch, input_reg); 3396 __ ucomisd(xmm_scratch, input_reg);
3610 __ j(above, &below_half); 3397 __ j(above, &below_half);
(...skipping 25 matching lines...) Expand all
3636 __ cvtss2sd(xmm_scratch, xmm_scratch); 3423 __ cvtss2sd(xmm_scratch, xmm_scratch);
3637 __ ucomisd(input_reg, xmm_scratch); 3424 __ ucomisd(input_reg, xmm_scratch);
3638 DeoptimizeIf(below, instr->environment()); 3425 DeoptimizeIf(below, instr->environment());
3639 } 3426 }
3640 __ Set(output_reg, Immediate(0)); 3427 __ Set(output_reg, Immediate(0));
3641 __ bind(&done); 3428 __ bind(&done);
3642 } 3429 }
3643 3430
3644 3431
3645 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { 3432 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
3646 CpuFeatures::Scope scope(SSE2);
3647 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3433 XMMRegister input_reg = ToDoubleRegister(instr->value());
3648 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); 3434 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
3649 __ sqrtsd(input_reg, input_reg); 3435 __ sqrtsd(input_reg, input_reg);
3650 } 3436 }
3651 3437
3652 3438
3653 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { 3439 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3654 CpuFeatures::Scope scope(SSE2);
3655 XMMRegister xmm_scratch = xmm0; 3440 XMMRegister xmm_scratch = xmm0;
3656 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3441 XMMRegister input_reg = ToDoubleRegister(instr->value());
3657 Register scratch = ToRegister(instr->temp()); 3442 Register scratch = ToRegister(instr->temp());
3658 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); 3443 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
3659 3444
3660 // Note that according to ECMA-262 15.8.2.13: 3445 // Note that according to ECMA-262 15.8.2.13:
3661 // Math.pow(-Infinity, 0.5) == Infinity 3446 // Math.pow(-Infinity, 0.5) == Infinity
3662 // Math.sqrt(-Infinity) == NaN 3447 // Math.sqrt(-Infinity) == NaN
3663 Label done, sqrt; 3448 Label done, sqrt;
3664 // Check base for -Infinity. According to IEEE-754, single-precision 3449 // Check base for -Infinity. According to IEEE-754, single-precision
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
3721 DeferredDoRandom(LCodeGen* codegen, LRandom* instr) 3506 DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
3722 : LDeferredCode(codegen), instr_(instr) { } 3507 : LDeferredCode(codegen), instr_(instr) { }
3723 virtual void Generate() { codegen()->DoDeferredRandom(instr_); } 3508 virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
3724 virtual LInstruction* instr() { return instr_; } 3509 virtual LInstruction* instr() { return instr_; }
3725 private: 3510 private:
3726 LRandom* instr_; 3511 LRandom* instr_;
3727 }; 3512 };
3728 3513
3729 DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr); 3514 DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
3730 3515
3731 CpuFeatures::Scope scope(SSE2);
3732 // Having marked this instruction as a call we can use any 3516 // Having marked this instruction as a call we can use any
3733 // registers. 3517 // registers.
3734 ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); 3518 ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
3735 ASSERT(ToRegister(instr->global_object()).is(eax)); 3519 ASSERT(ToRegister(instr->global_object()).is(eax));
3736 // Assert that the register size is indeed the size of each seed. 3520 // Assert that the register size is indeed the size of each seed.
3737 static const int kSeedSize = sizeof(uint32_t); 3521 static const int kSeedSize = sizeof(uint32_t);
3738 STATIC_ASSERT(kPointerSize == kSeedSize); 3522 STATIC_ASSERT(kPointerSize == kSeedSize);
3739 3523
3740 __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset)); 3524 __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset));
3741 static const int kRandomSeedOffset = 3525 static const int kRandomSeedOffset =
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
3789 3573
3790 void LCodeGen::DoDeferredRandom(LRandom* instr) { 3574 void LCodeGen::DoDeferredRandom(LRandom* instr) {
3791 __ PrepareCallCFunction(1, ebx); 3575 __ PrepareCallCFunction(1, ebx);
3792 __ mov(Operand(esp, 0), eax); 3576 __ mov(Operand(esp, 0), eax);
3793 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); 3577 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
3794 // Return value is in eax. 3578 // Return value is in eax.
3795 } 3579 }
3796 3580
3797 3581
3798 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { 3582 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
3799 CpuFeatures::Scope scope(SSE2);
3800 ASSERT(instr->value()->Equals(instr->result())); 3583 ASSERT(instr->value()->Equals(instr->result()));
3801 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3584 XMMRegister input_reg = ToDoubleRegister(instr->value());
3802 Label positive, done, zero; 3585 Label positive, done, zero;
3803 __ xorps(xmm0, xmm0); 3586 __ xorps(xmm0, xmm0);
3804 __ ucomisd(input_reg, xmm0); 3587 __ ucomisd(input_reg, xmm0);
3805 __ j(above, &positive, Label::kNear); 3588 __ j(above, &positive, Label::kNear);
3806 __ j(equal, &zero, Label::kNear); 3589 __ j(equal, &zero, Label::kNear);
3807 ExternalReference nan = 3590 ExternalReference nan =
3808 ExternalReference::address_of_canonical_non_hole_nan(); 3591 ExternalReference::address_of_canonical_non_hole_nan();
3809 __ movdbl(input_reg, Operand::StaticVariable(nan)); 3592 __ movdbl(input_reg, Operand::StaticVariable(nan));
(...skipping 11 matching lines...) Expand all
3821 __ fld_d(Operand(esp, 0)); 3604 __ fld_d(Operand(esp, 0));
3822 __ fyl2x(); 3605 __ fyl2x();
3823 __ fstp_d(Operand(esp, 0)); 3606 __ fstp_d(Operand(esp, 0));
3824 __ movdbl(input_reg, Operand(esp, 0)); 3607 __ movdbl(input_reg, Operand(esp, 0));
3825 __ add(Operand(esp), Immediate(kDoubleSize)); 3608 __ add(Operand(esp), Immediate(kDoubleSize));
3826 __ bind(&done); 3609 __ bind(&done);
3827 } 3610 }
3828 3611
3829 3612
3830 void LCodeGen::DoMathExp(LMathExp* instr) { 3613 void LCodeGen::DoMathExp(LMathExp* instr) {
3831 CpuFeatures::Scope scope(SSE2);
3832 XMMRegister input = ToDoubleRegister(instr->value()); 3614 XMMRegister input = ToDoubleRegister(instr->value());
3833 XMMRegister result = ToDoubleRegister(instr->result()); 3615 XMMRegister result = ToDoubleRegister(instr->result());
3834 Register temp1 = ToRegister(instr->temp1()); 3616 Register temp1 = ToRegister(instr->temp1());
3835 Register temp2 = ToRegister(instr->temp2()); 3617 Register temp2 = ToRegister(instr->temp2());
3836 3618
3837 MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2); 3619 MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2);
3838 } 3620 }
3839 3621
3840 3622
3841 void LCodeGen::DoMathTan(LUnaryMathOperation* instr) { 3623 void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
(...skipping 248 matching lines...) Expand 10 before | Expand all | Expand 10 after
4090 int constant_index = 3872 int constant_index =
4091 ToInteger32(LConstantOperand::cast(instr->index())); 3873 ToInteger32(LConstantOperand::cast(instr->index()));
4092 if (instr->hydrogen()->length()->representation().IsTagged()) { 3874 if (instr->hydrogen()->length()->representation().IsTagged()) {
4093 __ cmp(ToOperand(instr->length()), 3875 __ cmp(ToOperand(instr->length()),
4094 Immediate(Smi::FromInt(constant_index))); 3876 Immediate(Smi::FromInt(constant_index)));
4095 } else { 3877 } else {
4096 __ cmp(ToOperand(instr->length()), Immediate(constant_index)); 3878 __ cmp(ToOperand(instr->length()), Immediate(constant_index));
4097 } 3879 }
4098 DeoptimizeIf(below_equal, instr->environment()); 3880 DeoptimizeIf(below_equal, instr->environment());
4099 } else { 3881 } else {
4100 if (instr->hydrogen()->index()->representation().IsTagged() &&
4101 !instr->hydrogen()->index()->type().IsSmi()) {
4102 __ test(ToRegister(instr->index()), Immediate(kSmiTagMask));
4103 DeoptimizeIf(not_zero, instr->environment());
4104 }
4105 __ cmp(ToRegister(instr->index()), ToOperand(instr->length())); 3882 __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
4106 DeoptimizeIf(above_equal, instr->environment()); 3883 DeoptimizeIf(above_equal, instr->environment());
4107 } 3884 }
4108 } 3885 }
4109 3886
4110 3887
4111 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { 3888 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4112 ElementsKind elements_kind = instr->elements_kind(); 3889 ElementsKind elements_kind = instr->elements_kind();
4113 LOperand* key = instr->key(); 3890 LOperand* key = instr->key();
4114 if (!key->IsConstantOperand() && 3891 if (!key->IsConstantOperand() &&
4115 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(), 3892 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
4116 elements_kind)) { 3893 elements_kind)) {
4117 __ SmiUntag(ToRegister(key)); 3894 __ SmiUntag(ToRegister(key));
4118 } 3895 }
4119 Operand operand(BuildFastArrayOperand( 3896 Operand operand(BuildFastArrayOperand(
4120 instr->elements(), 3897 instr->elements(),
4121 key, 3898 key,
4122 instr->hydrogen()->key()->representation(), 3899 instr->hydrogen()->key()->representation(),
4123 elements_kind, 3900 elements_kind,
4124 0, 3901 0,
4125 instr->additional_index())); 3902 instr->additional_index()));
4126 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { 3903 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
4127 CpuFeatures::Scope scope(SSE2);
4128 __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value())); 3904 __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
4129 __ movss(operand, xmm0); 3905 __ movss(operand, xmm0);
4130 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { 3906 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
4131 CpuFeatures::Scope scope(SSE2);
4132 __ movdbl(operand, ToDoubleRegister(instr->value())); 3907 __ movdbl(operand, ToDoubleRegister(instr->value()));
4133 } else { 3908 } else {
4134 Register value = ToRegister(instr->value()); 3909 Register value = ToRegister(instr->value());
4135 switch (elements_kind) { 3910 switch (elements_kind) {
4136 case EXTERNAL_PIXEL_ELEMENTS: 3911 case EXTERNAL_PIXEL_ELEMENTS:
4137 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: 3912 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
4138 case EXTERNAL_BYTE_ELEMENTS: 3913 case EXTERNAL_BYTE_ELEMENTS:
4139 __ mov_b(operand, value); 3914 __ mov_b(operand, value);
4140 break; 3915 break;
4141 case EXTERNAL_SHORT_ELEMENTS: 3916 case EXTERNAL_SHORT_ELEMENTS:
(...skipping 15 matching lines...) Expand all
4157 case DICTIONARY_ELEMENTS: 3932 case DICTIONARY_ELEMENTS:
4158 case NON_STRICT_ARGUMENTS_ELEMENTS: 3933 case NON_STRICT_ARGUMENTS_ELEMENTS:
4159 UNREACHABLE(); 3934 UNREACHABLE();
4160 break; 3935 break;
4161 } 3936 }
4162 } 3937 }
4163 } 3938 }
4164 3939
4165 3940
4166 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { 3941 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4167 CpuFeatures::Scope scope(SSE2);
4168 XMMRegister value = ToDoubleRegister(instr->value()); 3942 XMMRegister value = ToDoubleRegister(instr->value());
4169 3943
4170 if (instr->NeedsCanonicalization()) { 3944 if (instr->NeedsCanonicalization()) {
4171 Label have_value; 3945 Label have_value;
4172 3946
4173 __ ucomisd(value, value); 3947 __ ucomisd(value, value);
4174 __ j(parity_odd, &have_value); // NaN. 3948 __ j(parity_odd, &have_value); // NaN.
4175 3949
4176 ExternalReference canonical_nan_reference = 3950 ExternalReference canonical_nan_reference =
4177 ExternalReference::address_of_canonical_non_hole_nan(); 3951 ExternalReference::address_of_canonical_non_hole_nan();
(...skipping 230 matching lines...) Expand 10 before | Expand all | Expand 10 after
4408 4182
4409 void LCodeGen::DoStringAdd(LStringAdd* instr) { 4183 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4410 EmitPushTaggedOperand(instr->left()); 4184 EmitPushTaggedOperand(instr->left());
4411 EmitPushTaggedOperand(instr->right()); 4185 EmitPushTaggedOperand(instr->right());
4412 StringAddStub stub(NO_STRING_CHECK_IN_STUB); 4186 StringAddStub stub(NO_STRING_CHECK_IN_STUB);
4413 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 4187 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4414 } 4188 }
4415 4189
4416 4190
4417 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { 4191 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4418 if (CpuFeatures::IsSupported(SSE2)) { 4192 LOperand* input = instr->value();
4419 CpuFeatures::Scope scope(SSE2); 4193 ASSERT(input->IsRegister() || input->IsStackSlot());
4420 LOperand* input = instr->value(); 4194 LOperand* output = instr->result();
4421 ASSERT(input->IsRegister() || input->IsStackSlot()); 4195 ASSERT(output->IsDoubleRegister());
4422 LOperand* output = instr->result(); 4196 __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
4423 ASSERT(output->IsDoubleRegister());
4424 __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
4425 } else {
4426 UNREACHABLE();
4427 }
4428 } 4197 }
4429 4198
4430 4199
4431 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { 4200 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4432 CpuFeatures::Scope scope(SSE2);
4433 LOperand* input = instr->value(); 4201 LOperand* input = instr->value();
4434 LOperand* output = instr->result(); 4202 LOperand* output = instr->result();
4435 LOperand* temp = instr->temp(); 4203 LOperand* temp = instr->temp();
4436 4204
4437 __ LoadUint32(ToDoubleRegister(output), 4205 __ LoadUint32(ToDoubleRegister(output),
4438 ToRegister(input), 4206 ToRegister(input),
4439 ToDoubleRegister(temp)); 4207 ToDoubleRegister(temp));
4440 } 4208 }
4441 4209
4442 4210
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
4500 PushSafepointRegistersScope scope(this); 4268 PushSafepointRegistersScope scope(this);
4501 4269
4502 Label done; 4270 Label done;
4503 4271
4504 if (signedness == SIGNED_INT32) { 4272 if (signedness == SIGNED_INT32) {
4505 // There was overflow, so bits 30 and 31 of the original integer 4273 // There was overflow, so bits 30 and 31 of the original integer
4506 // disagree. Try to allocate a heap number in new space and store 4274 // disagree. Try to allocate a heap number in new space and store
4507 // the value in there. If that fails, call the runtime system. 4275 // the value in there. If that fails, call the runtime system.
4508 __ SmiUntag(reg); 4276 __ SmiUntag(reg);
4509 __ xor_(reg, 0x80000000); 4277 __ xor_(reg, 0x80000000);
4510 if (CpuFeatures::IsSupported(SSE2)) { 4278 __ cvtsi2sd(xmm0, Operand(reg));
4511 CpuFeatures::Scope feature_scope(SSE2);
4512 __ cvtsi2sd(xmm0, Operand(reg));
4513 } else {
4514 __ push(reg);
4515 __ fild_s(Operand(esp, 0));
4516 __ pop(reg);
4517 }
4518 } else { 4279 } else {
4519 if (CpuFeatures::IsSupported(SSE2)) { 4280 __ LoadUint32(xmm0, reg, xmm1);
4520 CpuFeatures::Scope feature_scope(SSE2);
4521 __ LoadUint32(xmm0, reg, xmm1);
4522 } else {
4523 UNREACHABLE();
4524 }
4525 } 4281 }
4526 4282
4527 if (FLAG_inline_new) { 4283 if (FLAG_inline_new) {
4528 __ AllocateHeapNumber(reg, tmp, no_reg, &slow); 4284 __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
4529 __ jmp(&done, Label::kNear); 4285 __ jmp(&done, Label::kNear);
4530 } 4286 }
4531 4287
4532 // Slow case: Call the runtime system to do the number allocation. 4288 // Slow case: Call the runtime system to do the number allocation.
4533 __ bind(&slow); 4289 __ bind(&slow);
4534 4290
4535 // TODO(3095996): Put a valid pointer value in the stack slot where the result 4291 // TODO(3095996): Put a valid pointer value in the stack slot where the result
4536 // register is stored, as this register is in the pointer map, but contains an 4292 // register is stored, as this register is in the pointer map, but contains an
4537 // integer value. 4293 // integer value.
4538 __ StoreToSafepointRegisterSlot(reg, Immediate(0)); 4294 __ StoreToSafepointRegisterSlot(reg, Immediate(0));
4539 // NumberTagI and NumberTagD use the context from the frame, rather than 4295 // NumberTagI and NumberTagD use the context from the frame, rather than
4540 // the environment's HContext or HInlinedContext value. 4296 // the environment's HContext or HInlinedContext value.
4541 // They only call Runtime::kAllocateHeapNumber. 4297 // They only call Runtime::kAllocateHeapNumber.
4542 // The corresponding HChange instructions are added in a phase that does 4298 // The corresponding HChange instructions are added in a phase that does
4543 // not have easy access to the local context. 4299 // not have easy access to the local context.
4544 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 4300 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
4545 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); 4301 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4546 RecordSafepointWithRegisters( 4302 RecordSafepointWithRegisters(
4547 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); 4303 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4548 if (!reg.is(eax)) __ mov(reg, eax); 4304 if (!reg.is(eax)) __ mov(reg, eax);
4549 4305
4550 // Done. Put the value in xmm0 into the value of the allocated heap 4306 // Done. Put the value in xmm0 into the value of the allocated heap
4551 // number. 4307 // number.
4552 __ bind(&done); 4308 __ bind(&done);
4553 if (CpuFeatures::IsSupported(SSE2)) { 4309 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
4554 CpuFeatures::Scope feature_scope(SSE2);
4555 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
4556 } else {
4557 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
4558 }
4559 __ StoreToSafepointRegisterSlot(reg, reg); 4310 __ StoreToSafepointRegisterSlot(reg, reg);
4560 } 4311 }
4561 4312
4562 4313
4563 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { 4314 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4564 class DeferredNumberTagD: public LDeferredCode { 4315 class DeferredNumberTagD: public LDeferredCode {
4565 public: 4316 public:
4566 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) 4317 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4567 : LDeferredCode(codegen), instr_(instr) { } 4318 : LDeferredCode(codegen), instr_(instr) { }
4568 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } 4319 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4569 virtual LInstruction* instr() { return instr_; } 4320 virtual LInstruction* instr() { return instr_; }
4570 private: 4321 private:
4571 LNumberTagD* instr_; 4322 LNumberTagD* instr_;
4572 }; 4323 };
4573 4324
4325 XMMRegister input_reg = ToDoubleRegister(instr->value());
4574 Register reg = ToRegister(instr->result()); 4326 Register reg = ToRegister(instr->result());
4575 Register tmp = ToRegister(instr->temp()); 4327 Register tmp = ToRegister(instr->temp());
4576 4328
4577 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); 4329 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4578 if (FLAG_inline_new) { 4330 if (FLAG_inline_new) {
4579 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); 4331 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
4580 } else { 4332 } else {
4581 __ jmp(deferred->entry()); 4333 __ jmp(deferred->entry());
4582 } 4334 }
4583 __ bind(deferred->exit()); 4335 __ bind(deferred->exit());
4584 if (CpuFeatures::IsSupported(SSE2)) { 4336 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
4585 CpuFeatures::Scope scope(SSE2);
4586 XMMRegister input_reg = ToDoubleRegister(instr->value());
4587 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
4588 } else {
4589 if (!IsX87TopOfStack(instr->value())) {
4590 __ fld_d(ToOperand(instr->value()));
4591 }
4592 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
4593 }
4594 } 4337 }
4595 4338
4596 4339
4597 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { 4340 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4598 // TODO(3095996): Get rid of this. For now, we need to make the 4341 // TODO(3095996): Get rid of this. For now, we need to make the
4599 // result register contain a valid pointer because it is already 4342 // result register contain a valid pointer because it is already
4600 // contained in the register pointer map. 4343 // contained in the register pointer map.
4601 Register reg = ToRegister(instr->result()); 4344 Register reg = ToRegister(instr->result());
4602 __ Set(reg, Immediate(0)); 4345 __ Set(reg, Immediate(0));
4603 4346
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after
4727 DeoptimizeIf(no_condition, instr->environment()); 4470 DeoptimizeIf(no_condition, instr->environment());
4728 4471
4729 // Reserve space for 64 bit answer. 4472 // Reserve space for 64 bit answer.
4730 __ bind(&convert); 4473 __ bind(&convert);
4731 __ sub(Operand(esp), Immediate(kDoubleSize)); 4474 __ sub(Operand(esp), Immediate(kDoubleSize));
4732 // Do conversion, which cannot fail because we checked the exponent. 4475 // Do conversion, which cannot fail because we checked the exponent.
4733 __ fisttp_d(Operand(esp, 0)); 4476 __ fisttp_d(Operand(esp, 0));
4734 __ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result. 4477 __ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result.
4735 __ add(Operand(esp), Immediate(kDoubleSize)); 4478 __ add(Operand(esp), Immediate(kDoubleSize));
4736 } else { 4479 } else {
4737 CpuFeatures::Scope scope(SSE2);
4738 XMMRegister xmm_temp = ToDoubleRegister(instr->temp()); 4480 XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
4739 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); 4481 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
4740 __ cvttsd2si(input_reg, Operand(xmm0)); 4482 __ cvttsd2si(input_reg, Operand(xmm0));
4741 __ cmp(input_reg, 0x80000000u); 4483 __ cmp(input_reg, 0x80000000u);
4742 __ j(not_equal, &done); 4484 __ j(not_equal, &done);
4743 // Check if the input was 0x8000000 (kMinInt). 4485 // Check if the input was 0x8000000 (kMinInt).
4744 // If no, then we got an overflow and we deoptimize. 4486 // If no, then we got an overflow and we deoptimize.
4745 ExternalReference min_int = ExternalReference::address_of_min_int(); 4487 ExternalReference min_int = ExternalReference::address_of_min_int();
4746 __ movdbl(xmm_temp, Operand::StaticVariable(min_int)); 4488 __ movdbl(xmm_temp, Operand::StaticVariable(min_int));
4747 __ ucomisd(xmm_temp, xmm0); 4489 __ ucomisd(xmm_temp, xmm0);
4748 DeoptimizeIf(not_equal, instr->environment()); 4490 DeoptimizeIf(not_equal, instr->environment());
4749 DeoptimizeIf(parity_even, instr->environment()); // NaN. 4491 DeoptimizeIf(parity_even, instr->environment()); // NaN.
4750 } 4492 }
4751 } else if (CpuFeatures::IsSupported(SSE2)) { 4493 } else {
4752 CpuFeatures::Scope scope(SSE2);
4753 // Deoptimize if we don't have a heap number. 4494 // Deoptimize if we don't have a heap number.
4754 __ RecordComment("Deferred TaggedToI: not a heap number"); 4495 __ RecordComment("Deferred TaggedToI: not a heap number");
4755 DeoptimizeIf(not_equal, instr->environment()); 4496 DeoptimizeIf(not_equal, instr->environment());
4756 4497
4757 XMMRegister xmm_temp = ToDoubleRegister(instr->temp()); 4498 XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
4758 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); 4499 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
4759 __ cvttsd2si(input_reg, Operand(xmm0)); 4500 __ cvttsd2si(input_reg, Operand(xmm0));
4760 __ cvtsi2sd(xmm_temp, Operand(input_reg)); 4501 __ cvtsi2sd(xmm_temp, Operand(input_reg));
4761 __ ucomisd(xmm0, xmm_temp); 4502 __ ucomisd(xmm0, xmm_temp);
4762 __ RecordComment("Deferred TaggedToI: lost precision"); 4503 __ RecordComment("Deferred TaggedToI: lost precision");
4763 DeoptimizeIf(not_equal, instr->environment()); 4504 DeoptimizeIf(not_equal, instr->environment());
4764 __ RecordComment("Deferred TaggedToI: NaN"); 4505 __ RecordComment("Deferred TaggedToI: NaN");
4765 DeoptimizeIf(parity_even, instr->environment()); // NaN. 4506 DeoptimizeIf(parity_even, instr->environment()); // NaN.
4766 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4507 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4767 __ test(input_reg, Operand(input_reg)); 4508 __ test(input_reg, Operand(input_reg));
4768 __ j(not_zero, &done); 4509 __ j(not_zero, &done);
4769 __ movmskpd(input_reg, xmm0); 4510 __ movmskpd(input_reg, xmm0);
4770 __ and_(input_reg, 1); 4511 __ and_(input_reg, 1);
4771 __ RecordComment("Deferred TaggedToI: minus zero"); 4512 __ RecordComment("Deferred TaggedToI: minus zero");
4772 DeoptimizeIf(not_zero, instr->environment()); 4513 DeoptimizeIf(not_zero, instr->environment());
4773 } 4514 }
4774 } else {
4775 UNREACHABLE();
4776 } 4515 }
4777 __ bind(&done); 4516 __ bind(&done);
4778 } 4517 }
4779 4518
4780 4519
4781 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { 4520 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4782 class DeferredTaggedToI: public LDeferredCode { 4521 class DeferredTaggedToI: public LDeferredCode {
4783 public: 4522 public:
4784 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) 4523 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4785 : LDeferredCode(codegen), instr_(instr) { } 4524 : LDeferredCode(codegen), instr_(instr) { }
(...skipping 22 matching lines...) Expand all
4808 4547
4809 4548
4810 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { 4549 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4811 LOperand* input = instr->value(); 4550 LOperand* input = instr->value();
4812 ASSERT(input->IsRegister()); 4551 ASSERT(input->IsRegister());
4813 LOperand* temp = instr->temp(); 4552 LOperand* temp = instr->temp();
4814 ASSERT(temp == NULL || temp->IsRegister()); 4553 ASSERT(temp == NULL || temp->IsRegister());
4815 LOperand* result = instr->result(); 4554 LOperand* result = instr->result();
4816 ASSERT(result->IsDoubleRegister()); 4555 ASSERT(result->IsDoubleRegister());
4817 4556
4818 if (CpuFeatures::IsSupported(SSE2)) { 4557 Register input_reg = ToRegister(input);
4819 CpuFeatures::Scope scope(SSE2); 4558 XMMRegister result_reg = ToDoubleRegister(result);
4820 Register input_reg = ToRegister(input);
4821 XMMRegister result_reg = ToDoubleRegister(result);
4822 4559
4823 bool deoptimize_on_minus_zero = 4560 bool deoptimize_on_minus_zero =
4824 instr->hydrogen()->deoptimize_on_minus_zero(); 4561 instr->hydrogen()->deoptimize_on_minus_zero();
4825 Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg; 4562 Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
4826 4563
4827 EmitNumberUntagD(input_reg, 4564 EmitNumberUntagD(input_reg,
4828 temp_reg, 4565 temp_reg,
4829 result_reg, 4566 result_reg,
4830 instr->hydrogen()->deoptimize_on_undefined(), 4567 instr->hydrogen()->deoptimize_on_undefined(),
4831 deoptimize_on_minus_zero, 4568 deoptimize_on_minus_zero,
4832 instr->environment()); 4569 instr->environment());
4833 } else {
4834 UNIMPLEMENTED();
4835 }
4836 } 4570 }
4837 4571
4838 4572
4839 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { 4573 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4840 LOperand* input = instr->value(); 4574 LOperand* input = instr->value();
4841 ASSERT(input->IsDoubleRegister()); 4575 ASSERT(input->IsDoubleRegister());
4842 LOperand* result = instr->result(); 4576 LOperand* result = instr->result();
4843 ASSERT(result->IsRegister()); 4577 ASSERT(result->IsRegister());
4844 CpuFeatures::Scope scope(SSE2);
4845 4578
4846 XMMRegister input_reg = ToDoubleRegister(input); 4579 XMMRegister input_reg = ToDoubleRegister(input);
4847 Register result_reg = ToRegister(result); 4580 Register result_reg = ToRegister(result);
4848 4581
4849 if (instr->truncating()) { 4582 if (instr->truncating()) {
4850 // Performs a truncating conversion of a floating point number as used by 4583 // Performs a truncating conversion of a floating point number as used by
4851 // the JS bitwise operations. 4584 // the JS bitwise operations.
4852 __ cvttsd2si(result_reg, Operand(input_reg)); 4585 __ cvttsd2si(result_reg, Operand(input_reg));
4853 __ cmp(result_reg, 0x80000000u); 4586 __ cmp(result_reg, 0x80000000u);
4854 if (CpuFeatures::IsSupported(SSE3)) { 4587 if (CpuFeatures::IsSupported(SSE3)) {
(...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after
5024 Operand operand = ToOperand(instr->value()); 4757 Operand operand = ToOperand(instr->value());
5025 __ cmp(operand, target); 4758 __ cmp(operand, target);
5026 } 4759 }
5027 DeoptimizeIf(not_equal, instr->environment()); 4760 DeoptimizeIf(not_equal, instr->environment());
5028 } 4761 }
5029 4762
5030 4763
5031 void LCodeGen::DoCheckMapCommon(Register reg, 4764 void LCodeGen::DoCheckMapCommon(Register reg,
5032 Handle<Map> map, 4765 Handle<Map> map,
5033 CompareMapMode mode, 4766 CompareMapMode mode,
5034 LInstruction* instr) { 4767 LEnvironment* env) {
5035 Label success; 4768 Label success;
5036 __ CompareMap(reg, map, &success, mode); 4769 __ CompareMap(reg, map, &success, mode);
5037 DeoptimizeIf(not_equal, instr->environment()); 4770 DeoptimizeIf(not_equal, env);
5038 __ bind(&success); 4771 __ bind(&success);
5039 } 4772 }
5040 4773
5041 4774
5042 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { 4775 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5043 LOperand* input = instr->value(); 4776 LOperand* input = instr->value();
5044 ASSERT(input->IsRegister()); 4777 ASSERT(input->IsRegister());
5045 Register reg = ToRegister(input); 4778 Register reg = ToRegister(input);
5046 4779
5047 Label success; 4780 Label success;
5048 SmallMapList* map_set = instr->hydrogen()->map_set(); 4781 SmallMapList* map_set = instr->hydrogen()->map_set();
5049 for (int i = 0; i < map_set->length() - 1; i++) { 4782 for (int i = 0; i < map_set->length() - 1; i++) {
5050 Handle<Map> map = map_set->at(i); 4783 Handle<Map> map = map_set->at(i);
5051 __ CompareMap(reg, map, &success, REQUIRE_EXACT_MAP); 4784 __ CompareMap(reg, map, &success, REQUIRE_EXACT_MAP);
5052 __ j(equal, &success); 4785 __ j(equal, &success);
5053 } 4786 }
5054 Handle<Map> map = map_set->last(); 4787 Handle<Map> map = map_set->last();
5055 DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr); 4788 DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment());
5056 __ bind(&success); 4789 __ bind(&success);
5057 } 4790 }
5058 4791
5059 4792
5060 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { 4793 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5061 CpuFeatures::Scope scope(SSE2);
5062 XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); 4794 XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
5063 Register result_reg = ToRegister(instr->result()); 4795 Register result_reg = ToRegister(instr->result());
5064 __ ClampDoubleToUint8(value_reg, xmm0, result_reg); 4796 __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
5065 } 4797 }
5066 4798
5067 4799
5068 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { 4800 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5069 ASSERT(instr->unclamped()->Equals(instr->result())); 4801 ASSERT(instr->unclamped()->Equals(instr->result()));
5070 Register value_reg = ToRegister(instr->result()); 4802 Register value_reg = ToRegister(instr->result());
5071 __ ClampUint8(value_reg); 4803 __ ClampUint8(value_reg);
5072 } 4804 }
5073 4805
5074 4806
5075 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { 4807 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5076 CpuFeatures::Scope scope(SSE2);
5077
5078 ASSERT(instr->unclamped()->Equals(instr->result())); 4808 ASSERT(instr->unclamped()->Equals(instr->result()));
5079 Register input_reg = ToRegister(instr->unclamped()); 4809 Register input_reg = ToRegister(instr->unclamped());
5080 Label is_smi, done, heap_number; 4810 Label is_smi, done, heap_number;
5081 4811
5082 __ JumpIfSmi(input_reg, &is_smi); 4812 __ JumpIfSmi(input_reg, &is_smi);
5083 4813
5084 // Check for heap number 4814 // Check for heap number
5085 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), 4815 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5086 factory()->heap_number_map()); 4816 factory()->heap_number_map());
5087 __ j(equal, &heap_number, Label::kNear); 4817 __ j(equal, &heap_number, Label::kNear);
(...skipping 26 matching lines...) Expand all
5114 4844
5115 Handle<JSObject> holder = instr->holder(); 4845 Handle<JSObject> holder = instr->holder();
5116 Handle<JSObject> current_prototype = instr->prototype(); 4846 Handle<JSObject> current_prototype = instr->prototype();
5117 4847
5118 // Load prototype object. 4848 // Load prototype object.
5119 __ LoadHeapObject(reg, current_prototype); 4849 __ LoadHeapObject(reg, current_prototype);
5120 4850
5121 // Check prototype maps up to the holder. 4851 // Check prototype maps up to the holder.
5122 while (!current_prototype.is_identical_to(holder)) { 4852 while (!current_prototype.is_identical_to(holder)) {
5123 DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()), 4853 DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
5124 ALLOW_ELEMENT_TRANSITION_MAPS, instr); 4854 ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
5125 4855
5126 current_prototype = 4856 current_prototype =
5127 Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype())); 4857 Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
5128 // Load next prototype object. 4858 // Load next prototype object.
5129 __ LoadHeapObject(reg, current_prototype); 4859 __ LoadHeapObject(reg, current_prototype);
5130 } 4860 }
5131 4861
5132 // Check the holder map. 4862 // Check the holder map.
5133 DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()), 4863 DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
5134 ALLOW_ELEMENT_TRANSITION_MAPS, instr); 4864 ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
5135 } 4865 }
5136 4866
5137 4867
5138 void LCodeGen::DoAllocateObject(LAllocateObject* instr) { 4868 void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
5139 class DeferredAllocateObject: public LDeferredCode { 4869 class DeferredAllocateObject: public LDeferredCode {
5140 public: 4870 public:
5141 DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr) 4871 DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
5142 : LDeferredCode(codegen), instr_(instr) { } 4872 : LDeferredCode(codegen), instr_(instr) { }
5143 virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); } 4873 virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
5144 virtual LInstruction* instr() { return instr_; } 4874 virtual LInstruction* instr() { return instr_; }
(...skipping 516 matching lines...) Expand 10 before | Expand all | Expand 10 after
5661 __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset)); 5391 __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
5662 5392
5663 // Check the marker in the calling frame. 5393 // Check the marker in the calling frame.
5664 __ bind(&check_frame_marker); 5394 __ bind(&check_frame_marker);
5665 __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset), 5395 __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
5666 Immediate(Smi::FromInt(StackFrame::CONSTRUCT))); 5396 Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
5667 } 5397 }
5668 5398
5669 5399
5670 void LCodeGen::EnsureSpaceForLazyDeopt() { 5400 void LCodeGen::EnsureSpaceForLazyDeopt() {
5671 if (!info()->IsStub()) { 5401 // Ensure that we have enough space after the previous lazy-bailout
5672 // Ensure that we have enough space after the previous lazy-bailout 5402 // instruction for patching the code here.
5673 // instruction for patching the code here. 5403 int current_pc = masm()->pc_offset();
5674 int current_pc = masm()->pc_offset(); 5404 int patch_size = Deoptimizer::patch_size();
5675 int patch_size = Deoptimizer::patch_size(); 5405 if (current_pc < last_lazy_deopt_pc_ + patch_size) {
5676 if (current_pc < last_lazy_deopt_pc_ + patch_size) { 5406 int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
5677 int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc; 5407 __ Nop(padding_size);
5678 __ Nop(padding_size);
5679 }
5680 } 5408 }
5681 last_lazy_deopt_pc_ = masm()->pc_offset(); 5409 last_lazy_deopt_pc_ = masm()->pc_offset();
5682 } 5410 }
5683 5411
5684 5412
5685 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { 5413 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5686 EnsureSpaceForLazyDeopt(); 5414 EnsureSpaceForLazyDeopt();
5687 ASSERT(instr->HasEnvironment()); 5415 ASSERT(instr->HasEnvironment());
5688 LEnvironment* env = instr->environment(); 5416 LEnvironment* env = instr->environment();
5689 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); 5417 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
(...skipping 203 matching lines...) Expand 10 before | Expand all | Expand 10 after
5893 FixedArray::kHeaderSize - kPointerSize)); 5621 FixedArray::kHeaderSize - kPointerSize));
5894 __ bind(&done); 5622 __ bind(&done);
5895 } 5623 }
5896 5624
5897 5625
5898 #undef __ 5626 #undef __
5899 5627
5900 } } // namespace v8::internal 5628 } } // namespace v8::internal
5901 5629
5902 #endif // V8_TARGET_ARCH_IA32 5630 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « src/ia32/lithium-codegen-ia32.h ('k') | src/ia32/lithium-gap-resolver-ia32.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698