Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(29)

Side by Side Diff: src/ia32/lithium-codegen-ia32.cc

Issue 10701054: Enable stub generation using Hydrogen/Lithium (again) (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Merge with latest Created 8 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ia32/lithium-codegen-ia32.h ('k') | src/ia32/lithium-gap-resolver-ia32.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 12 matching lines...) Expand all
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #include "v8.h" 28 #include "v8.h"
29 29
30 #if defined(V8_TARGET_ARCH_IA32) 30 #if defined(V8_TARGET_ARCH_IA32)
31 31
32 #include "ia32/lithium-codegen-ia32.h" 32 #include "ia32/lithium-codegen-ia32.h"
33 #include "ic.h"
33 #include "code-stubs.h" 34 #include "code-stubs.h"
34 #include "deoptimizer.h" 35 #include "deoptimizer.h"
35 #include "stub-cache.h" 36 #include "stub-cache.h"
36 #include "codegen.h" 37 #include "codegen.h"
37 38
38 namespace v8 { 39 namespace v8 {
39 namespace internal { 40 namespace internal {
40 41
41 42
42 // When invoking builtins, we need to record the safepoint in the middle of 43 // When invoking builtins, we need to record the safepoint in the middle of
(...skipping 20 matching lines...) Expand all
63 Safepoint::DeoptMode deopt_mode_; 64 Safepoint::DeoptMode deopt_mode_;
64 }; 65 };
65 66
66 67
67 #define __ masm()-> 68 #define __ masm()->
68 69
69 bool LCodeGen::GenerateCode() { 70 bool LCodeGen::GenerateCode() {
70 HPhase phase("Z_Code generation", chunk()); 71 HPhase phase("Z_Code generation", chunk());
71 ASSERT(is_unused()); 72 ASSERT(is_unused());
72 status_ = GENERATING; 73 status_ = GENERATING;
73 CpuFeatures::Scope scope(SSE2);
74 74
75 CodeStub::GenerateFPStubs(); 75 CodeStub::GenerateFPStubs();
76 76
77 // Open a frame scope to indicate that there is a frame on the stack. The 77 // Open a frame scope to indicate that there is a frame on the stack. The
78 // MANUAL indicates that the scope shouldn't actually generate code to set up 78 // MANUAL indicates that the scope shouldn't actually generate code to set up
79 // the frame (that is done in GeneratePrologue). 79 // the frame (that is done in GeneratePrologue).
80 FrameScope frame_scope(masm_, StackFrame::MANUAL); 80 FrameScope frame_scope(masm_, StackFrame::MANUAL);
81 81
82 dynamic_frame_alignment_ = (chunk()->num_double_slots() > 2 && 82 dynamic_frame_alignment_ = info()->IsOptimizing() &&
83 !chunk()->graph()->is_recursive()) || 83 ((chunk()->num_double_slots() > 2 &&
84 !info()->osr_ast_id().IsNone(); 84 !chunk()->graph()->is_recursive()) ||
85 !info()->osr_ast_id().IsNone());
85 86
86 return GeneratePrologue() && 87 return GeneratePrologue() &&
87 GenerateBody() && 88 GenerateBody() &&
88 GenerateDeferredCode() && 89 GenerateDeferredCode() &&
90 GenerateJumpTable() &&
89 GenerateSafepointTable(); 91 GenerateSafepointTable();
90 } 92 }
91 93
92 94
93 void LCodeGen::FinishCode(Handle<Code> code) { 95 void LCodeGen::FinishCode(Handle<Code> code) {
94 ASSERT(is_done()); 96 ASSERT(is_done());
95 code->set_stack_slots(GetStackSlotCount()); 97 code->set_stack_slots(GetStackSlotCount());
96 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); 98 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
97 PopulateDeoptimizationData(code); 99 PopulateDeoptimizationData(code);
98 Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code); 100 if (!info()->IsStub()) {
101 Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
102 }
99 } 103 }
100 104
101 105
102 void LCodeGen::Abort(const char* reason) { 106 void LCodeGen::Abort(const char* reason) {
103 info()->set_bailout_reason(reason); 107 info()->set_bailout_reason(reason);
104 status_ = ABORTED; 108 status_ = ABORTED;
105 } 109 }
106 110
107 111
108 void LCodeGen::Comment(const char* format, ...) { 112 void LCodeGen::Comment(const char* format, ...) {
(...skipping 10 matching lines...) Expand all
119 size_t length = builder.position(); 123 size_t length = builder.position();
120 Vector<char> copy = Vector<char>::New(length + 1); 124 Vector<char> copy = Vector<char>::New(length + 1);
121 memcpy(copy.start(), builder.Finalize(), copy.length()); 125 memcpy(copy.start(), builder.Finalize(), copy.length());
122 masm()->RecordComment(copy.start()); 126 masm()->RecordComment(copy.start());
123 } 127 }
124 128
125 129
126 bool LCodeGen::GeneratePrologue() { 130 bool LCodeGen::GeneratePrologue() {
127 ASSERT(is_generating()); 131 ASSERT(is_generating());
128 132
129 ProfileEntryHookStub::MaybeCallEntryHook(masm_); 133 if (info()->IsOptimizing()) {
134 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
130 135
131 #ifdef DEBUG 136 #ifdef DEBUG
132 if (strlen(FLAG_stop_at) > 0 && 137 if (strlen(FLAG_stop_at) > 0 &&
133 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { 138 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
134 __ int3(); 139 __ int3();
135 } 140 }
136 #endif 141 #endif
137 142
138 // Strict mode functions and builtins need to replace the receiver 143 // Strict mode functions and builtins need to replace the receiver
139 // with undefined when called as functions (without an explicit 144 // with undefined when called as functions (without an explicit
140 // receiver object). ecx is zero for method calls and non-zero for 145 // receiver object). ecx is zero for method calls and non-zero for
141 // function calls. 146 // function calls.
142 if (!info_->is_classic_mode() || info_->is_native()) { 147 if (!info_->is_classic_mode() || info_->is_native()) {
143 Label ok; 148 Label ok;
144 __ test(ecx, Operand(ecx)); 149 __ test(ecx, Operand(ecx));
145 __ j(zero, &ok, Label::kNear); 150 __ j(zero, &ok, Label::kNear);
146 // +1 for return address. 151 // +1 for return address.
147 int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize; 152 int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
148 __ mov(Operand(esp, receiver_offset), 153 __ mov(Operand(esp, receiver_offset),
149 Immediate(isolate()->factory()->undefined_value())); 154 Immediate(isolate()->factory()->undefined_value()));
150 __ bind(&ok); 155 __ bind(&ok);
151 } 156 }
152 157
158 if (dynamic_frame_alignment_) {
159 // Move state of dynamic frame alignment into edx.
160 __ mov(edx, Immediate(kNoAlignmentPadding));
153 161
154 if (dynamic_frame_alignment_) { 162 Label do_not_pad, align_loop;
155 // Move state of dynamic frame alignment into edx. 163 STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
156 __ mov(edx, Immediate(kNoAlignmentPadding)); 164 // Align esp + 4 to a multiple of 2 * kPointerSize.
165 __ test(esp, Immediate(kPointerSize));
166 __ j(not_zero, &do_not_pad, Label::kNear);
167 __ push(Immediate(0));
168 __ mov(ebx, esp);
169 __ mov(edx, Immediate(kAlignmentPaddingPushed));
170 // Copy arguments, receiver, and return address.
171 __ mov(ecx, Immediate(scope()->num_parameters() + 2));
157 172
158 Label do_not_pad, align_loop; 173 __ bind(&align_loop);
159 STATIC_ASSERT(kDoubleSize == 2 * kPointerSize); 174 __ mov(eax, Operand(ebx, 1 * kPointerSize));
160 // Align esp + 4 to a multiple of 2 * kPointerSize. 175 __ mov(Operand(ebx, 0), eax);
161 __ test(esp, Immediate(kPointerSize)); 176 __ add(Operand(ebx), Immediate(kPointerSize));
162 __ j(not_zero, &do_not_pad, Label::kNear); 177 __ dec(ecx);
163 __ push(Immediate(0)); 178 __ j(not_zero, &align_loop, Label::kNear);
164 __ mov(ebx, esp); 179 __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
165 __ mov(edx, Immediate(kAlignmentPaddingPushed)); 180 __ bind(&do_not_pad);
166 // Copy arguments, receiver, and return address. 181 }
167 __ mov(ecx, Immediate(scope()->num_parameters() + 2));
168
169 __ bind(&align_loop);
170 __ mov(eax, Operand(ebx, 1 * kPointerSize));
171 __ mov(Operand(ebx, 0), eax);
172 __ add(Operand(ebx), Immediate(kPointerSize));
173 __ dec(ecx);
174 __ j(not_zero, &align_loop, Label::kNear);
175 __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
176 __ bind(&do_not_pad);
177 } 182 }
178 183
179 info()->set_prologue_offset(masm_->pc_offset()); 184 info()->set_prologue_offset(masm_->pc_offset());
180 __ push(ebp); // Caller's frame pointer. 185 if (NeedsEagerFrame()) {
181 __ mov(ebp, esp); 186 ASSERT(!frame_is_built_);
182 __ push(esi); // Callee's context. 187 frame_is_built_ = true;
183 __ push(edi); // Callee's JS function. 188 __ push(ebp); // Caller's frame pointer.
189 __ mov(ebp, esp);
190 __ push(esi); // Callee's context.
191 if (info()->IsStub()) {
192 __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
193 } else {
194 __ push(edi); // Callee's JS function.
195 }
196 }
184 197
185 if (dynamic_frame_alignment_ && FLAG_debug_code) { 198 if (info()->IsOptimizing() &&
199 dynamic_frame_alignment_ &&
200 FLAG_debug_code) {
186 __ test(esp, Immediate(kPointerSize)); 201 __ test(esp, Immediate(kPointerSize));
187 __ Assert(zero, "frame is expected to be aligned"); 202 __ Assert(zero, "frame is expected to be aligned");
188 } 203 }
189 204
190 // Reserve space for the stack slots needed by the code. 205 // Reserve space for the stack slots needed by the code.
191 int slots = GetStackSlotCount(); 206 int slots = GetStackSlotCount();
192 ASSERT_GE(slots, 1); 207 ASSERT(slots != 0 || !info()->IsOptimizing());
193 if (slots == 1) { 208 if (slots > 0) {
194 if (dynamic_frame_alignment_) { 209 if (slots == 1) {
195 __ push(edx); 210 if (dynamic_frame_alignment_) {
211 __ push(edx);
212 } else {
213 __ push(Immediate(kNoAlignmentPadding));
214 }
196 } else { 215 } else {
197 __ push(Immediate(kNoAlignmentPadding)); 216 if (FLAG_debug_code) {
198 } 217 __ mov(Operand(eax), Immediate(slots));
199 } else { 218 Label loop;
200 if (FLAG_debug_code) { 219 __ bind(&loop);
201 __ mov(Operand(eax), Immediate(slots)); 220 __ push(Immediate(kSlotsZapValue));
202 Label loop; 221 __ dec(eax);
203 __ bind(&loop); 222 __ j(not_zero, &loop);
204 __ push(Immediate(kSlotsZapValue)); 223 } else {
205 __ dec(eax); 224 __ sub(Operand(esp), Immediate(slots * kPointerSize));
206 __ j(not_zero, &loop); 225 #ifdef _MSC_VER
207 } else { 226 // On windows, you may not access the stack more than one page below
208 __ sub(Operand(esp), Immediate(slots * kPointerSize)); 227 // the most recently mapped page. To make the allocated area randomly
209 #ifdef _MSC_VER 228 // accessible, we write to each page in turn (the value is irrelevant).
210 // On windows, you may not access the stack more than one page below 229 const int kPageSize = 4 * KB;
211 // the most recently mapped page. To make the allocated area randomly 230 for (int offset = slots * kPointerSize - kPageSize;
212 // accessible, we write to each page in turn (the value is irrelevant). 231 offset > 0;
213 const int kPageSize = 4 * KB; 232 offset -= kPageSize) {
214 for (int offset = slots * kPointerSize - kPageSize; 233 __ mov(Operand(esp, offset), eax);
215 offset > 0; 234 }
216 offset -= kPageSize) { 235 #endif
217 __ mov(Operand(esp, offset), eax);
218 } 236 }
219 #endif
220 }
221 237
222 // Store dynamic frame alignment state in the first local. 238 // Store dynamic frame alignment state in the first local.
223 if (dynamic_frame_alignment_) { 239 if (dynamic_frame_alignment_) {
224 __ mov(Operand(ebp, 240 __ mov(Operand(ebp,
225 JavaScriptFrameConstants::kDynamicAlignmentStateOffset), 241 JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
226 edx); 242 edx);
227 } else { 243 } else {
228 __ mov(Operand(ebp, 244 __ mov(Operand(ebp,
229 JavaScriptFrameConstants::kDynamicAlignmentStateOffset), 245 JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
230 Immediate(kNoAlignmentPadding)); 246 Immediate(kNoAlignmentPadding));
247 }
231 } 248 }
232 } 249 }
233 250
234 // Possibly allocate a local context. 251 // Possibly allocate a local context.
235 int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; 252 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
236 if (heap_slots > 0) { 253 if (heap_slots > 0) {
237 Comment(";;; Allocate local context"); 254 Comment(";;; Allocate local context");
238 // Argument to NewContext is the function, which is still in edi. 255 // Argument to NewContext is the function, which is still in edi.
239 __ push(edi); 256 __ push(edi);
240 if (heap_slots <= FastNewContextStub::kMaximumSlots) { 257 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
241 FastNewContextStub stub(heap_slots); 258 FastNewContextStub stub(heap_slots);
242 __ CallStub(&stub); 259 __ CallStub(&stub);
243 } else { 260 } else {
244 __ CallRuntime(Runtime::kNewFunctionContext, 1); 261 __ CallRuntime(Runtime::kNewFunctionContext, 1);
245 } 262 }
(...skipping 19 matching lines...) Expand all
265 context_offset, 282 context_offset,
266 eax, 283 eax,
267 ebx, 284 ebx,
268 kDontSaveFPRegs); 285 kDontSaveFPRegs);
269 } 286 }
270 } 287 }
271 Comment(";;; End allocate local context"); 288 Comment(";;; End allocate local context");
272 } 289 }
273 290
274 // Trace the call. 291 // Trace the call.
275 if (FLAG_trace) { 292 if (FLAG_trace && info()->IsOptimizing()) {
276 // We have not executed any compiled code yet, so esi still holds the 293 // We have not executed any compiled code yet, so esi still holds the
277 // incoming context. 294 // incoming context.
278 __ CallRuntime(Runtime::kTraceEnter, 0); 295 __ CallRuntime(Runtime::kTraceEnter, 0);
279 } 296 }
280 return !is_aborted(); 297 return !is_aborted();
281 } 298 }
282 299
283 300
284 bool LCodeGen::GenerateBody() { 301 bool LCodeGen::GenerateBody() {
285 ASSERT(is_generating()); 302 ASSERT(is_generating());
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
319 } 336 }
320 } 337 }
321 instr->CompileToNative(this); 338 instr->CompileToNative(this);
322 } 339 }
323 } 340 }
324 EnsureSpaceForLazyDeopt(); 341 EnsureSpaceForLazyDeopt();
325 return !is_aborted(); 342 return !is_aborted();
326 } 343 }
327 344
328 345
346 bool LCodeGen::GenerateJumpTable() {
347 Label needs_frame_not_call;
348 Label needs_frame_is_call;
349 for (int i = 0; i < jump_table_.length(); i++) {
350 __ bind(&jump_table_[i].label);
351 Address entry = jump_table_[i].address;
352 if (jump_table_[i].needs_frame) {
353 __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
354 if (jump_table_[i].is_lazy_deopt) {
355 if (needs_frame_is_call.is_bound()) {
356 __ jmp(&needs_frame_is_call);
357 } else {
358 __ bind(&needs_frame_is_call);
359 __ push(esi);
360 // This variant of deopt can only be used with stubs. Since we don't
361 // have a function pointer to install in the stack frame that we're
362 // building, install a special marker there instead.
363 ASSERT(info()->IsStub());
364 __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
365 // Push a PC inside the function so that the deopt code can find where
366 // the deopt comes from. It doesn't have to be the precise return
367 // address of a "calling" LAZY deopt, it only has to be somewhere
368 // inside the code body.
369 Label push_approx_pc;
370 __ call(&push_approx_pc);
371 __ bind(&push_approx_pc);
372 // Push the continuation which was stashed were the ebp should
373 // be. Replace it with the saved ebp.
374 __ push(MemOperand(esp, 3 * kPointerSize));
375 __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
376 __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
377 __ ret(0); // Call the continuation without clobbering registers.
378 }
379 } else {
380 if (needs_frame_not_call.is_bound()) {
381 __ jmp(&needs_frame_not_call);
382 } else {
383 __ bind(&needs_frame_not_call);
384 __ push(esi);
385 // This variant of deopt can only be used with stubs. Since we don't
386 // have a function pointer to install in the stack frame that we're
387 // building, install a special marker there instead.
388 ASSERT(info()->IsStub());
389 __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
390 // Push the continuation which was stashed were the ebp should
391 // be. Replace it with the saved ebp.
392 __ push(MemOperand(esp, 2 * kPointerSize));
393 __ mov(MemOperand(esp, 3 * kPointerSize), ebp);
394 __ lea(ebp, MemOperand(esp, 3 * kPointerSize));
395 __ ret(0); // Call the continuation without clobbering registers.
396 }
397 }
398 } else {
399 if (jump_table_[i].is_lazy_deopt) {
400 __ call(entry, RelocInfo::RUNTIME_ENTRY);
401 } else {
402 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
403 }
404 }
405 }
406 return !is_aborted();
407 }
408
409
329 bool LCodeGen::GenerateDeferredCode() { 410 bool LCodeGen::GenerateDeferredCode() {
330 ASSERT(is_generating()); 411 ASSERT(is_generating());
331 if (deferred_.length() > 0) { 412 if (deferred_.length() > 0) {
332 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { 413 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
333 LDeferredCode* code = deferred_[i]; 414 LDeferredCode* code = deferred_[i];
334 __ bind(code->entry()); 415 __ bind(code->entry());
416 if (NeedsDeferredFrame()) {
417 Comment(";;; Deferred build frame",
418 code->instruction_index(),
419 code->instr()->Mnemonic());
420 ASSERT(!frame_is_built_);
421 ASSERT(info()->IsStub());
422 frame_is_built_ = true;
423 // Build the frame in such a way that esi isn't trashed.
424 __ push(ebp); // Caller's frame pointer.
425 __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
426 __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
427 __ lea(ebp, Operand(esp, 2 * kPointerSize));
428 }
335 Comment(";;; Deferred code @%d: %s.", 429 Comment(";;; Deferred code @%d: %s.",
336 code->instruction_index(), 430 code->instruction_index(),
337 code->instr()->Mnemonic()); 431 code->instr()->Mnemonic());
338 code->Generate(); 432 code->Generate();
433 if (NeedsDeferredFrame()) {
434 Comment(";;; Deferred destroy frame",
435 code->instruction_index(),
436 code->instr()->Mnemonic());
437 ASSERT(frame_is_built_);
438 frame_is_built_ = false;
439 __ mov(esp, ebp);
440 __ pop(ebp);
441 }
339 __ jmp(code->exit()); 442 __ jmp(code->exit());
340 } 443 }
341 } 444 }
342 445
343 // Deferred code is the last part of the instruction sequence. Mark 446 // Deferred code is the last part of the instruction sequence. Mark
344 // the generated code as done unless we bailed out. 447 // the generated code as done unless we bailed out.
345 if (!is_aborted()) status_ = DONE; 448 if (!is_aborted()) status_ = DONE;
346 return !is_aborted(); 449 return !is_aborted();
347 } 450 }
348 451
349 452
350 bool LCodeGen::GenerateSafepointTable() { 453 bool LCodeGen::GenerateSafepointTable() {
351 ASSERT(is_done()); 454 ASSERT(is_done());
455 if (!info()->IsStub()) {
456 // For lazy deoptimization we need space to patch a call after every call.
457 // Ensure there is always space for such patching, even if the code ends
458 // in a call.
459 int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
460 while (masm()->pc_offset() < target_offset) {
461 masm()->nop();
462 }
463 }
352 safepoints_.Emit(masm(), GetStackSlotCount()); 464 safepoints_.Emit(masm(), GetStackSlotCount());
353 return !is_aborted(); 465 return !is_aborted();
354 } 466 }
355 467
356 468
357 Register LCodeGen::ToRegister(int index) const { 469 Register LCodeGen::ToRegister(int index) const {
358 return Register::FromAllocationIndex(index); 470 return Register::FromAllocationIndex(index);
359 } 471 }
360 472
361 473
362 XMMRegister LCodeGen::ToDoubleRegister(int index) const { 474 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
363 return XMMRegister::FromAllocationIndex(index); 475 return XMMRegister::FromAllocationIndex(index);
364 } 476 }
365 477
366 478
479 bool LCodeGen::IsX87TopOfStack(LOperand* op) const {
480 return op->IsDoubleRegister();
481 }
482
483
367 Register LCodeGen::ToRegister(LOperand* op) const { 484 Register LCodeGen::ToRegister(LOperand* op) const {
368 ASSERT(op->IsRegister()); 485 ASSERT(op->IsRegister());
369 return ToRegister(op->index()); 486 return ToRegister(op->index());
370 } 487 }
371 488
372 489
373 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { 490 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
374 ASSERT(op->IsDoubleRegister()); 491 ASSERT(op->IsDoubleRegister());
375 return ToDoubleRegister(op->index()); 492 return ToDoubleRegister(op->index());
376 } 493 }
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
442 // arguments index points to the first element of a sequence of tagged 559 // arguments index points to the first element of a sequence of tagged
443 // values on the stack that represent the arguments. This needs to be 560 // values on the stack that represent the arguments. This needs to be
444 // kept in sync with the LArgumentsElements implementation. 561 // kept in sync with the LArgumentsElements implementation.
445 *arguments_index = -environment->parameter_count(); 562 *arguments_index = -environment->parameter_count();
446 *arguments_count = environment->parameter_count(); 563 *arguments_count = environment->parameter_count();
447 564
448 WriteTranslation(environment->outer(), 565 WriteTranslation(environment->outer(),
449 translation, 566 translation,
450 arguments_index, 567 arguments_index,
451 arguments_count); 568 arguments_count);
452 int closure_id = *info()->closure() != *environment->closure() 569 bool has_closure_id = !info()->closure().is_null() &&
570 *info()->closure() != *environment->closure();
571 int closure_id = has_closure_id
453 ? DefineDeoptimizationLiteral(environment->closure()) 572 ? DefineDeoptimizationLiteral(environment->closure())
454 : Translation::kSelfLiteralId; 573 : Translation::kSelfLiteralId;
455 switch (environment->frame_type()) { 574 switch (environment->frame_type()) {
456 case JS_FUNCTION: 575 case JS_FUNCTION:
457 translation->BeginJSFrame(environment->ast_id(), closure_id, height); 576 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
458 break; 577 break;
459 case JS_CONSTRUCT: 578 case JS_CONSTRUCT:
460 translation->BeginConstructStubFrame(closure_id, translation_size); 579 translation->BeginConstructStubFrame(closure_id, translation_size);
461 break; 580 break;
462 case JS_GETTER: 581 case JS_GETTER:
463 ASSERT(translation_size == 1); 582 ASSERT(translation_size == 1);
464 ASSERT(height == 0); 583 ASSERT(height == 0);
465 translation->BeginGetterStubFrame(closure_id); 584 translation->BeginGetterStubFrame(closure_id);
466 break; 585 break;
467 case JS_SETTER: 586 case JS_SETTER:
468 ASSERT(translation_size == 2); 587 ASSERT(translation_size == 2);
469 ASSERT(height == 0); 588 ASSERT(height == 0);
470 translation->BeginSetterStubFrame(closure_id); 589 translation->BeginSetterStubFrame(closure_id);
471 break; 590 break;
472 case ARGUMENTS_ADAPTOR: 591 case ARGUMENTS_ADAPTOR:
473 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); 592 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
474 break; 593 break;
594 case STUB:
595 translation->BeginCompiledStubFrame();
596 break;
597 default:
598 UNREACHABLE();
475 } 599 }
476 600
477 // Inlined frames which push their arguments cause the index to be 601 // Inlined frames which push their arguments cause the index to be
478 // bumped and another stack area to be used for materialization. 602 // bumped and another stack area to be used for materialization.
479 if (environment->entry() != NULL && 603 if (environment->entry() != NULL &&
480 environment->entry()->arguments_pushed()) { 604 environment->entry()->arguments_pushed()) {
481 *arguments_index = *arguments_index < 0 605 *arguments_index = *arguments_index < 0
482 ? GetStackSlotCount() 606 ? GetStackSlotCount()
483 : *arguments_index + *arguments_count; 607 : *arguments_index + *arguments_count;
484 *arguments_count = environment->entry()->arguments_count() + 1; 608 *arguments_count = environment->entry()->arguments_count() + 1;
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after
599 int argc, 723 int argc,
600 LInstruction* instr) { 724 LInstruction* instr) {
601 ASSERT(instr != NULL); 725 ASSERT(instr != NULL);
602 ASSERT(instr->HasPointerMap()); 726 ASSERT(instr->HasPointerMap());
603 LPointerMap* pointers = instr->pointer_map(); 727 LPointerMap* pointers = instr->pointer_map();
604 RecordPosition(pointers->position()); 728 RecordPosition(pointers->position());
605 729
606 __ CallRuntime(fun, argc); 730 __ CallRuntime(fun, argc);
607 731
608 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 732 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
733
734 ASSERT(info()->is_calling());
609 } 735 }
610 736
611 737
612 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, 738 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
613 int argc, 739 int argc,
614 LInstruction* instr, 740 LInstruction* instr,
615 LOperand* context) { 741 LOperand* context) {
616 if (context->IsRegister()) { 742 if (context->IsRegister()) {
617 if (!ToRegister(context).is(esi)) { 743 if (!ToRegister(context).is(esi)) {
618 __ mov(esi, ToRegister(context)); 744 __ mov(esi, ToRegister(context));
619 } 745 }
620 } else if (context->IsStackSlot()) { 746 } else if (context->IsStackSlot()) {
621 __ mov(esi, ToOperand(context)); 747 __ mov(esi, ToOperand(context));
622 } else if (context->IsConstantOperand()) { 748 } else if (context->IsConstantOperand()) {
623 HConstant* constant = 749 HConstant* constant =
624 chunk_->LookupConstant(LConstantOperand::cast(context)); 750 chunk_->LookupConstant(LConstantOperand::cast(context));
625 __ LoadHeapObject(esi, Handle<Context>::cast(constant->handle())); 751 __ LoadHeapObject(esi, Handle<Context>::cast(constant->handle()));
626 } else { 752 } else {
627 UNREACHABLE(); 753 UNREACHABLE();
628 } 754 }
629 755
630 __ CallRuntimeSaveDoubles(id); 756 __ CallRuntimeSaveDoubles(id);
631 RecordSafepointWithRegisters( 757 RecordSafepointWithRegisters(
632 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); 758 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
759
760 ASSERT(info()->is_calling());
633 } 761 }
634 762
635 763
636 void LCodeGen::RegisterEnvironmentForDeoptimization( 764 void LCodeGen::RegisterEnvironmentForDeoptimization(
637 LEnvironment* environment, Safepoint::DeoptMode mode) { 765 LEnvironment* environment, Safepoint::DeoptMode mode) {
638 if (!environment->HasBeenRegistered()) { 766 if (!environment->HasBeenRegistered()) {
639 // Physical stack frame layout: 767 // Physical stack frame layout:
640 // -x ............. -4 0 ..................................... y 768 // -x ............. -4 0 ..................................... y
641 // [incoming arguments] [spill slots] [pushed outgoing arguments] 769 // [incoming arguments] [spill slots] [pushed outgoing arguments]
642 770
(...skipping 25 matching lines...) Expand all
668 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); 796 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
669 deoptimizations_.Add(environment, zone()); 797 deoptimizations_.Add(environment, zone());
670 } 798 }
671 } 799 }
672 800
673 801
674 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { 802 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
675 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 803 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
676 ASSERT(environment->HasBeenRegistered()); 804 ASSERT(environment->HasBeenRegistered());
677 int id = environment->deoptimization_index(); 805 int id = environment->deoptimization_index();
678 Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER); 806 ASSERT(info()->IsOptimizing() || info()->IsStub());
807 Deoptimizer::BailoutType bailout_type = frame_is_built_
808 ? Deoptimizer::EAGER
809 : Deoptimizer::LAZY;
810 Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type);
679 if (entry == NULL) { 811 if (entry == NULL) {
680 Abort("bailout was not prepared"); 812 Abort("bailout was not prepared");
681 return; 813 return;
682 } 814 }
683 815
684 if (FLAG_deopt_every_n_times != 0) { 816 if (FLAG_deopt_every_n_times != 0) {
685 Handle<SharedFunctionInfo> shared(info_->shared_info()); 817 Handle<SharedFunctionInfo> shared(info_->shared_info());
686 Label no_deopt; 818 Label no_deopt;
687 __ pushfd(); 819 __ pushfd();
688 __ push(eax); 820 __ push(eax);
(...skipping 13 matching lines...) Expand all
702 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); 834 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
703 835
704 __ bind(&no_deopt); 836 __ bind(&no_deopt);
705 __ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset), 837 __ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset),
706 eax); 838 eax);
707 __ pop(ebx); 839 __ pop(ebx);
708 __ pop(eax); 840 __ pop(eax);
709 __ popfd(); 841 __ popfd();
710 } 842 }
711 843
844 ASSERT(info()->IsStub() || frame_is_built_);
845 bool lazy_deopt_needed = info()->IsStub();
712 if (cc == no_condition) { 846 if (cc == no_condition) {
713 if (FLAG_trap_on_deopt) __ int3(); 847 if (FLAG_trap_on_deopt) __ int3();
714 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); 848 if (lazy_deopt_needed) {
849 __ call(entry, RelocInfo::RUNTIME_ENTRY);
850 } else {
851 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
852 }
715 } else { 853 } else {
854 Label done;
716 if (FLAG_trap_on_deopt) { 855 if (FLAG_trap_on_deopt) {
717 Label done;
718 __ j(NegateCondition(cc), &done, Label::kNear); 856 __ j(NegateCondition(cc), &done, Label::kNear);
719 __ int3(); 857 __ int3();
720 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); 858 }
721 __ bind(&done); 859 if (!lazy_deopt_needed && frame_is_built_) {
860 if (FLAG_trap_on_deopt) {
861 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
862 } else {
863 __ j(cc, entry, RelocInfo::RUNTIME_ENTRY);
864 }
722 } else { 865 } else {
723 __ j(cc, entry, RelocInfo::RUNTIME_ENTRY); 866 // We often have several deopts to the same entry, reuse the last
867 // jump entry if this is the case.
868 if (jump_table_.is_empty() ||
869 jump_table_.last().address != entry ||
870 jump_table_.last().needs_frame != !frame_is_built_ ||
871 jump_table_.last().is_lazy_deopt != lazy_deopt_needed) {
872 JumpTableEntry table_entry(entry, !frame_is_built_, lazy_deopt_needed);
873 jump_table_.Add(table_entry, zone());
874 }
875 if (FLAG_trap_on_deopt) {
876 __ jmp(&jump_table_.last().label);
877 } else {
878 __ j(cc, &jump_table_.last().label);
879 }
724 } 880 }
881 __ bind(&done);
725 } 882 }
726 } 883 }
727 884
728 885
729 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { 886 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
730 int length = deoptimizations_.length(); 887 int length = deoptimizations_.length();
731 if (length == 0) return; 888 if (length == 0) return;
732 Handle<DeoptimizationInputData> data = 889 Handle<DeoptimizationInputData> data =
733 factory()->NewDeoptimizationInputData(length, TENURED); 890 factory()->NewDeoptimizationInputData(length, TENURED);
734 891
(...skipping 680 matching lines...) Expand 10 before | Expand all | Expand 10 after
1415 // Use xor to produce +0.0 in a fast and compact way, but avoid to 1572 // Use xor to produce +0.0 in a fast and compact way, but avoid to
1416 // do so if the constant is -0.0. 1573 // do so if the constant is -0.0.
1417 if (BitCast<uint64_t, double>(v) == 0) { 1574 if (BitCast<uint64_t, double>(v) == 0) {
1418 __ xorps(res, res); 1575 __ xorps(res, res);
1419 } else { 1576 } else {
1420 Register temp = ToRegister(instr->temp()); 1577 Register temp = ToRegister(instr->temp());
1421 uint64_t int_val = BitCast<uint64_t, double>(v); 1578 uint64_t int_val = BitCast<uint64_t, double>(v);
1422 int32_t lower = static_cast<int32_t>(int_val); 1579 int32_t lower = static_cast<int32_t>(int_val);
1423 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); 1580 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
1424 if (CpuFeatures::IsSupported(SSE4_1)) { 1581 if (CpuFeatures::IsSupported(SSE4_1)) {
1425 CpuFeatures::Scope scope(SSE4_1); 1582 CpuFeatures::Scope scope1(SSE2);
1583 CpuFeatures::Scope scope2(SSE4_1);
1426 if (lower != 0) { 1584 if (lower != 0) {
1427 __ Set(temp, Immediate(lower)); 1585 __ Set(temp, Immediate(lower));
1428 __ movd(res, Operand(temp)); 1586 __ movd(res, Operand(temp));
1429 __ Set(temp, Immediate(upper)); 1587 __ Set(temp, Immediate(upper));
1430 __ pinsrd(res, Operand(temp), 1); 1588 __ pinsrd(res, Operand(temp), 1);
1431 } else { 1589 } else {
1432 __ xorps(res, res); 1590 __ xorps(res, res);
1433 __ Set(temp, Immediate(upper)); 1591 __ Set(temp, Immediate(upper));
1434 __ pinsrd(res, Operand(temp), 1); 1592 __ pinsrd(res, Operand(temp), 1);
1435 } 1593 }
1436 } else { 1594 } else {
1595 CpuFeatures::Scope scope(SSE2);
1437 __ Set(temp, Immediate(upper)); 1596 __ Set(temp, Immediate(upper));
1438 __ movd(res, Operand(temp)); 1597 __ movd(res, Operand(temp));
1439 __ psllq(res, 32); 1598 __ psllq(res, 32);
1440 if (lower != 0) { 1599 if (lower != 0) {
1441 __ Set(temp, Immediate(lower)); 1600 __ Set(temp, Immediate(lower));
1442 __ movd(xmm0, Operand(temp)); 1601 __ movd(xmm0, Operand(temp));
1443 __ por(res, xmm0); 1602 __ por(res, xmm0);
1444 } 1603 }
1445 } 1604 }
1446 } 1605 }
(...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after
1580 __ add(ToRegister(left), ToOperand(right)); 1739 __ add(ToRegister(left), ToOperand(right));
1581 } 1740 }
1582 1741
1583 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 1742 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1584 DeoptimizeIf(overflow, instr->environment()); 1743 DeoptimizeIf(overflow, instr->environment());
1585 } 1744 }
1586 } 1745 }
1587 1746
1588 1747
1589 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { 1748 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1749 CpuFeatures::Scope scope(SSE2);
1590 LOperand* left = instr->left(); 1750 LOperand* left = instr->left();
1591 LOperand* right = instr->right(); 1751 LOperand* right = instr->right();
1592 ASSERT(left->Equals(instr->result())); 1752 ASSERT(left->Equals(instr->result()));
1593 HMathMinMax::Operation operation = instr->hydrogen()->operation(); 1753 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1594 if (instr->hydrogen()->representation().IsInteger32()) { 1754 if (instr->hydrogen()->representation().IsInteger32()) {
1595 Label return_left; 1755 Label return_left;
1596 Condition condition = (operation == HMathMinMax::kMathMin) 1756 Condition condition = (operation == HMathMinMax::kMathMin)
1597 ? less_equal 1757 ? less_equal
1598 : greater_equal; 1758 : greater_equal;
1599 if (right->IsConstantOperand()) { 1759 if (right->IsConstantOperand()) {
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
1641 __ j(parity_even, &return_left, Label::kNear); // left == NaN. 1801 __ j(parity_even, &return_left, Label::kNear); // left == NaN.
1642 __ bind(&return_right); 1802 __ bind(&return_right);
1643 __ movsd(left_reg, right_reg); 1803 __ movsd(left_reg, right_reg);
1644 1804
1645 __ bind(&return_left); 1805 __ bind(&return_left);
1646 } 1806 }
1647 } 1807 }
1648 1808
1649 1809
1650 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { 1810 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1811 CpuFeatures::Scope scope(SSE2);
1651 XMMRegister left = ToDoubleRegister(instr->left()); 1812 XMMRegister left = ToDoubleRegister(instr->left());
1652 XMMRegister right = ToDoubleRegister(instr->right()); 1813 XMMRegister right = ToDoubleRegister(instr->right());
1653 XMMRegister result = ToDoubleRegister(instr->result()); 1814 XMMRegister result = ToDoubleRegister(instr->result());
1654 // Modulo uses a fixed result register. 1815 // Modulo uses a fixed result register.
1655 ASSERT(instr->op() == Token::MOD || left.is(result)); 1816 ASSERT(instr->op() == Token::MOD || left.is(result));
1656 switch (instr->op()) { 1817 switch (instr->op()) {
1657 case Token::ADD: 1818 case Token::ADD:
1658 __ addsd(left, right); 1819 __ addsd(left, right);
1659 break; 1820 break;
1660 case Token::SUB: 1821 case Token::SUB:
1661 __ subsd(left, right); 1822 __ subsd(left, right);
1662 break; 1823 break;
1663 case Token::MUL: 1824 case Token::MUL:
1664 __ mulsd(left, right); 1825 __ mulsd(left, right);
1665 break; 1826 break;
1666 case Token::DIV: 1827 case Token::DIV:
1667 __ divsd(left, right); 1828 __ divsd(left, right);
1668 break; 1829 break;
1669 case Token::MOD: { 1830 case Token::MOD: {
1670 // Pass two doubles as arguments on the stack. 1831 // Pass two doubles as arguments on the stack.
1671 __ PrepareCallCFunction(4, eax); 1832 __ PrepareCallCFunction(4, eax);
1672 __ movdbl(Operand(esp, 0 * kDoubleSize), left); 1833 __ movdbl(Operand(esp, 0 * kDoubleSize), left);
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
1725 } else { 1886 } else {
1726 __ j(cc, chunk_->GetAssemblyLabel(left_block)); 1887 __ j(cc, chunk_->GetAssemblyLabel(left_block));
1727 __ jmp(chunk_->GetAssemblyLabel(right_block)); 1888 __ jmp(chunk_->GetAssemblyLabel(right_block));
1728 } 1889 }
1729 } 1890 }
1730 1891
1731 1892
1732 void LCodeGen::DoBranch(LBranch* instr) { 1893 void LCodeGen::DoBranch(LBranch* instr) {
1733 int true_block = chunk_->LookupDestination(instr->true_block_id()); 1894 int true_block = chunk_->LookupDestination(instr->true_block_id());
1734 int false_block = chunk_->LookupDestination(instr->false_block_id()); 1895 int false_block = chunk_->LookupDestination(instr->false_block_id());
1896 CpuFeatures::Scope scope(SSE2);
1735 1897
1736 Representation r = instr->hydrogen()->value()->representation(); 1898 Representation r = instr->hydrogen()->value()->representation();
1737 if (r.IsInteger32()) { 1899 if (r.IsInteger32()) {
1738 Register reg = ToRegister(instr->value()); 1900 Register reg = ToRegister(instr->value());
1739 __ test(reg, Operand(reg)); 1901 __ test(reg, Operand(reg));
1740 EmitBranch(true_block, false_block, not_zero); 1902 EmitBranch(true_block, false_block, not_zero);
1741 } else if (r.IsDouble()) { 1903 } else if (r.IsDouble()) {
1742 XMMRegister reg = ToDoubleRegister(instr->value()); 1904 XMMRegister reg = ToDoubleRegister(instr->value());
1743 __ xorps(xmm0, xmm0); 1905 __ xorps(xmm0, xmm0);
1744 __ ucomisd(reg, xmm0); 1906 __ ucomisd(reg, xmm0);
(...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after
1884 return cond; 2046 return cond;
1885 } 2047 }
1886 2048
1887 2049
1888 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { 2050 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1889 LOperand* left = instr->left(); 2051 LOperand* left = instr->left();
1890 LOperand* right = instr->right(); 2052 LOperand* right = instr->right();
1891 int false_block = chunk_->LookupDestination(instr->false_block_id()); 2053 int false_block = chunk_->LookupDestination(instr->false_block_id());
1892 int true_block = chunk_->LookupDestination(instr->true_block_id()); 2054 int true_block = chunk_->LookupDestination(instr->true_block_id());
1893 Condition cc = TokenToCondition(instr->op(), instr->is_double()); 2055 Condition cc = TokenToCondition(instr->op(), instr->is_double());
2056 CpuFeatures::Scope scope(SSE2);
1894 2057
1895 if (left->IsConstantOperand() && right->IsConstantOperand()) { 2058 if (left->IsConstantOperand() && right->IsConstantOperand()) {
1896 // We can statically evaluate the comparison. 2059 // We can statically evaluate the comparison.
1897 double left_val = ToDouble(LConstantOperand::cast(left)); 2060 double left_val = ToDouble(LConstantOperand::cast(left));
1898 double right_val = ToDouble(LConstantOperand::cast(right)); 2061 double right_val = ToDouble(LConstantOperand::cast(right));
1899 int next_block = 2062 int next_block =
1900 EvalComparison(instr->op(), left_val, right_val) ? true_block 2063 EvalComparison(instr->op(), left_val, right_val) ? true_block
1901 : false_block; 2064 : false_block;
1902 EmitGoto(next_block); 2065 EmitGoto(next_block);
1903 } else { 2066 } else {
(...skipping 489 matching lines...) Expand 10 before | Expand all | Expand 10 after
2393 __ j(condition, &true_value, Label::kNear); 2556 __ j(condition, &true_value, Label::kNear);
2394 __ mov(ToRegister(instr->result()), factory()->false_value()); 2557 __ mov(ToRegister(instr->result()), factory()->false_value());
2395 __ jmp(&done, Label::kNear); 2558 __ jmp(&done, Label::kNear);
2396 __ bind(&true_value); 2559 __ bind(&true_value);
2397 __ mov(ToRegister(instr->result()), factory()->true_value()); 2560 __ mov(ToRegister(instr->result()), factory()->true_value());
2398 __ bind(&done); 2561 __ bind(&done);
2399 } 2562 }
2400 2563
2401 2564
2402 void LCodeGen::DoReturn(LReturn* instr) { 2565 void LCodeGen::DoReturn(LReturn* instr) {
2403 if (FLAG_trace) { 2566 if (FLAG_trace && info()->IsOptimizing()) {
2404 // Preserve the return value on the stack and rely on the runtime call 2567 // Preserve the return value on the stack and rely on the runtime call
2405 // to return the value in the same register. We're leaving the code 2568 // to return the value in the same register. We're leaving the code
2406 // managed by the register allocator and tearing down the frame, it's 2569 // managed by the register allocator and tearing down the frame, it's
2407 // safe to write to the context register. 2570 // safe to write to the context register.
2408 __ push(eax); 2571 __ push(eax);
2409 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 2572 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2410 __ CallRuntime(Runtime::kTraceExit, 1); 2573 __ CallRuntime(Runtime::kTraceExit, 1);
2411 } 2574 }
2412 if (dynamic_frame_alignment_) { 2575 if (dynamic_frame_alignment_) {
2413 // Fetch the state of the dynamic frame alignment. 2576 // Fetch the state of the dynamic frame alignment.
2414 __ mov(edx, Operand(ebp, 2577 __ mov(edx, Operand(ebp,
2415 JavaScriptFrameConstants::kDynamicAlignmentStateOffset)); 2578 JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
2416 } 2579 }
2417 __ mov(esp, ebp); 2580 if (NeedsEagerFrame()) {
2418 __ pop(ebp); 2581 __ mov(esp, ebp);
2582 __ pop(ebp);
2583 }
2419 if (dynamic_frame_alignment_) { 2584 if (dynamic_frame_alignment_) {
2420 Label no_padding; 2585 Label no_padding;
2421 __ cmp(edx, Immediate(kNoAlignmentPadding)); 2586 __ cmp(edx, Immediate(kNoAlignmentPadding));
2422 __ j(equal, &no_padding); 2587 __ j(equal, &no_padding);
2423 if (FLAG_debug_code) { 2588 if (FLAG_debug_code) {
2424 __ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize), 2589 __ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize),
2425 Immediate(kAlignmentZapValue)); 2590 Immediate(kAlignmentZapValue));
2426 __ Assert(equal, "expected alignment marker"); 2591 __ Assert(equal, "expected alignment marker");
2427 } 2592 }
2428 __ Ret((GetParameterCount() + 2) * kPointerSize, ecx); 2593 __ Ret((GetParameterCount() + 2) * kPointerSize, ecx);
2429 __ bind(&no_padding); 2594 __ bind(&no_padding);
2430 } 2595 }
2431 __ Ret((GetParameterCount() + 1) * kPointerSize, ecx); 2596 if (info()->IsStub()) {
2597 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2598 __ Ret();
2599 } else {
2600 __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
2601 }
2432 } 2602 }
2433 2603
2434 2604
2435 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { 2605 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2436 Register result = ToRegister(instr->result()); 2606 Register result = ToRegister(instr->result());
2437 __ mov(result, Operand::Cell(instr->hydrogen()->cell())); 2607 __ mov(result, Operand::Cell(instr->hydrogen()->cell()));
2438 if (instr->hydrogen()->RequiresHoleCheck()) { 2608 if (instr->hydrogen()->RequiresHoleCheck()) {
2439 __ cmp(result, factory()->the_hole_value()); 2609 __ cmp(result, factory()->the_hole_value());
2440 DeoptimizeIf(equal, instr->environment()); 2610 DeoptimizeIf(equal, instr->environment());
2441 } 2611 }
(...skipping 355 matching lines...) Expand 10 before | Expand all | Expand 10 after
2797 __ SmiUntag(ToRegister(key)); 2967 __ SmiUntag(ToRegister(key));
2798 } 2968 }
2799 Operand operand(BuildFastArrayOperand( 2969 Operand operand(BuildFastArrayOperand(
2800 instr->elements(), 2970 instr->elements(),
2801 key, 2971 key,
2802 instr->hydrogen()->key()->representation(), 2972 instr->hydrogen()->key()->representation(),
2803 elements_kind, 2973 elements_kind,
2804 0, 2974 0,
2805 instr->additional_index())); 2975 instr->additional_index()));
2806 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { 2976 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
2807 XMMRegister result(ToDoubleRegister(instr->result())); 2977 if (CpuFeatures::IsSupported(SSE2)) {
2808 __ movss(result, operand); 2978 CpuFeatures::Scope scope(SSE2);
2809 __ cvtss2sd(result, result); 2979 XMMRegister result(ToDoubleRegister(instr->result()));
2980 __ movss(result, operand);
2981 __ cvtss2sd(result, result);
2982 } else {
2983 __ fld_s(operand);
2984 HandleX87FPReturnValue(instr);
2985 }
2810 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { 2986 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
2811 __ movdbl(ToDoubleRegister(instr->result()), operand); 2987 if (CpuFeatures::IsSupported(SSE2)) {
2988 CpuFeatures::Scope scope(SSE2);
2989 __ movdbl(ToDoubleRegister(instr->result()), operand);
2990 } else {
2991 __ fld_d(operand);
2992 HandleX87FPReturnValue(instr);
2993 }
2812 } else { 2994 } else {
2813 Register result(ToRegister(instr->result())); 2995 Register result(ToRegister(instr->result()));
2814 switch (elements_kind) { 2996 switch (elements_kind) {
2815 case EXTERNAL_BYTE_ELEMENTS: 2997 case EXTERNAL_BYTE_ELEMENTS:
2816 __ movsx_b(result, operand); 2998 __ movsx_b(result, operand);
2817 break; 2999 break;
2818 case EXTERNAL_PIXEL_ELEMENTS: 3000 case EXTERNAL_PIXEL_ELEMENTS:
2819 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: 3001 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
2820 __ movzx_b(result, operand); 3002 __ movzx_b(result, operand);
2821 break; 3003 break;
(...skipping 23 matching lines...) Expand all
2845 case FAST_HOLEY_DOUBLE_ELEMENTS: 3027 case FAST_HOLEY_DOUBLE_ELEMENTS:
2846 case DICTIONARY_ELEMENTS: 3028 case DICTIONARY_ELEMENTS:
2847 case NON_STRICT_ARGUMENTS_ELEMENTS: 3029 case NON_STRICT_ARGUMENTS_ELEMENTS:
2848 UNREACHABLE(); 3030 UNREACHABLE();
2849 break; 3031 break;
2850 } 3032 }
2851 } 3033 }
2852 } 3034 }
2853 3035
2854 3036
3037 void LCodeGen::HandleX87FPReturnValue(LInstruction* instr) {
3038 if (IsX87TopOfStack(instr->result())) {
3039 // Return value is already on stack. If the value has no uses, then
3040 // pop it off the FP stack. Otherwise, make sure that there are enough
3041 // copies of the value on the stack to feed all of the usages, e.g.
3042 // when the following instruction uses the return value in multiple
3043 // inputs.
3044 int count = instr->hydrogen_value()->UseCount();
3045 if (count == 0) {
3046 __ fstp(0);
3047 } else {
3048 count--;
3049 ASSERT(count <= 7);
3050 while (count-- > 0) {
3051 __ fld(0);
3052 }
3053 }
3054 } else {
3055 __ fstp_d(ToOperand(instr->result()));
3056 }
3057 }
3058
3059
2855 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { 3060 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
2856 XMMRegister result = ToDoubleRegister(instr->result());
2857
2858 if (instr->hydrogen()->RequiresHoleCheck()) { 3061 if (instr->hydrogen()->RequiresHoleCheck()) {
2859 int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + 3062 int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
2860 sizeof(kHoleNanLower32); 3063 sizeof(kHoleNanLower32);
2861 Operand hole_check_operand = BuildFastArrayOperand( 3064 Operand hole_check_operand = BuildFastArrayOperand(
2862 instr->elements(), instr->key(), 3065 instr->elements(), instr->key(),
2863 instr->hydrogen()->key()->representation(), 3066 instr->hydrogen()->key()->representation(),
2864 FAST_DOUBLE_ELEMENTS, 3067 FAST_DOUBLE_ELEMENTS,
2865 offset, 3068 offset,
2866 instr->additional_index()); 3069 instr->additional_index());
2867 __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); 3070 __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
2868 DeoptimizeIf(equal, instr->environment()); 3071 DeoptimizeIf(equal, instr->environment());
2869 } 3072 }
2870 3073
2871 Operand double_load_operand = BuildFastArrayOperand( 3074 Operand double_load_operand = BuildFastArrayOperand(
2872 instr->elements(), 3075 instr->elements(),
2873 instr->key(), 3076 instr->key(),
2874 instr->hydrogen()->key()->representation(), 3077 instr->hydrogen()->key()->representation(),
2875 FAST_DOUBLE_ELEMENTS, 3078 FAST_DOUBLE_ELEMENTS,
2876 FixedDoubleArray::kHeaderSize - kHeapObjectTag, 3079 FixedDoubleArray::kHeaderSize - kHeapObjectTag,
2877 instr->additional_index()); 3080 instr->additional_index());
2878 __ movdbl(result, double_load_operand); 3081 if (CpuFeatures::IsSupported(SSE2)) {
3082 CpuFeatures::Scope scope(SSE2);
3083 XMMRegister result = ToDoubleRegister(instr->result());
3084 __ movdbl(result, double_load_operand);
3085 } else {
3086 __ fld_d(double_load_operand);
3087 HandleX87FPReturnValue(instr);
3088 }
2879 } 3089 }
2880 3090
2881 3091
2882 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { 3092 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
2883 Register result = ToRegister(instr->result()); 3093 Register result = ToRegister(instr->result());
2884 3094
2885 // Load the result. 3095 // Load the result.
2886 __ mov(result, 3096 __ mov(result,
2887 BuildFastArrayOperand(instr->elements(), 3097 BuildFastArrayOperand(instr->elements(),
2888 instr->key(), 3098 instr->key(),
(...skipping 395 matching lines...) Expand 10 before | Expand all | Expand 10 after
3284 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); 3494 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3285 } 3495 }
3286 virtual LInstruction* instr() { return instr_; } 3496 virtual LInstruction* instr() { return instr_; }
3287 private: 3497 private:
3288 LUnaryMathOperation* instr_; 3498 LUnaryMathOperation* instr_;
3289 }; 3499 };
3290 3500
3291 ASSERT(instr->value()->Equals(instr->result())); 3501 ASSERT(instr->value()->Equals(instr->result()));
3292 Representation r = instr->hydrogen()->value()->representation(); 3502 Representation r = instr->hydrogen()->value()->representation();
3293 3503
3504 CpuFeatures::Scope scope(SSE2);
3294 if (r.IsDouble()) { 3505 if (r.IsDouble()) {
3295 XMMRegister scratch = xmm0; 3506 XMMRegister scratch = xmm0;
3296 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3507 XMMRegister input_reg = ToDoubleRegister(instr->value());
3297 __ xorps(scratch, scratch); 3508 __ xorps(scratch, scratch);
3298 __ subsd(scratch, input_reg); 3509 __ subsd(scratch, input_reg);
3299 __ pand(input_reg, scratch); 3510 __ pand(input_reg, scratch);
3300 } else if (r.IsInteger32()) { 3511 } else if (r.IsInteger32()) {
3301 EmitIntegerMathAbs(instr); 3512 EmitIntegerMathAbs(instr);
3302 } else { // Tagged case. 3513 } else { // Tagged case.
3303 DeferredMathAbsTaggedHeapNumber* deferred = 3514 DeferredMathAbsTaggedHeapNumber* deferred =
3304 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); 3515 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3305 Register input_reg = ToRegister(instr->value()); 3516 Register input_reg = ToRegister(instr->value());
3306 // Smi check. 3517 // Smi check.
3307 __ JumpIfNotSmi(input_reg, deferred->entry()); 3518 __ JumpIfNotSmi(input_reg, deferred->entry());
3308 EmitIntegerMathAbs(instr); 3519 EmitIntegerMathAbs(instr);
3309 __ bind(deferred->exit()); 3520 __ bind(deferred->exit());
3310 } 3521 }
3311 } 3522 }
3312 3523
3313 3524
3314 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { 3525 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
3526 CpuFeatures::Scope scope(SSE2);
3315 XMMRegister xmm_scratch = xmm0; 3527 XMMRegister xmm_scratch = xmm0;
3316 Register output_reg = ToRegister(instr->result()); 3528 Register output_reg = ToRegister(instr->result());
3317 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3529 XMMRegister input_reg = ToDoubleRegister(instr->value());
3318 3530
3319 if (CpuFeatures::IsSupported(SSE4_1)) { 3531 if (CpuFeatures::IsSupported(SSE4_1)) {
3320 CpuFeatures::Scope scope(SSE4_1); 3532 CpuFeatures::Scope scope(SSE4_1);
3321 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3533 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3322 // Deoptimize on negative zero. 3534 // Deoptimize on negative zero.
3323 Label non_zero; 3535 Label non_zero;
3324 __ xorps(xmm_scratch, xmm_scratch); // Zero the register. 3536 __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
3369 __ ucomisd(input_reg, xmm_scratch); 3581 __ ucomisd(input_reg, xmm_scratch);
3370 __ j(equal, &done, Label::kNear); 3582 __ j(equal, &done, Label::kNear);
3371 __ sub(output_reg, Immediate(1)); 3583 __ sub(output_reg, Immediate(1));
3372 DeoptimizeIf(overflow, instr->environment()); 3584 DeoptimizeIf(overflow, instr->environment());
3373 3585
3374 __ bind(&done); 3586 __ bind(&done);
3375 } 3587 }
3376 } 3588 }
3377 3589
3378 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { 3590 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3591 CpuFeatures::Scope scope(SSE2);
3379 XMMRegister xmm_scratch = xmm0; 3592 XMMRegister xmm_scratch = xmm0;
3380 Register output_reg = ToRegister(instr->result()); 3593 Register output_reg = ToRegister(instr->result());
3381 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3594 XMMRegister input_reg = ToDoubleRegister(instr->value());
3382 3595
3383 Label below_half, done; 3596 Label below_half, done;
3384 // xmm_scratch = 0.5 3597 // xmm_scratch = 0.5
3385 ExternalReference one_half = ExternalReference::address_of_one_half(); 3598 ExternalReference one_half = ExternalReference::address_of_one_half();
3386 __ movdbl(xmm_scratch, Operand::StaticVariable(one_half)); 3599 __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
3387 __ ucomisd(xmm_scratch, input_reg); 3600 __ ucomisd(xmm_scratch, input_reg);
3388 __ j(above, &below_half); 3601 __ j(above, &below_half);
(...skipping 25 matching lines...) Expand all
3414 __ cvtss2sd(xmm_scratch, xmm_scratch); 3627 __ cvtss2sd(xmm_scratch, xmm_scratch);
3415 __ ucomisd(input_reg, xmm_scratch); 3628 __ ucomisd(input_reg, xmm_scratch);
3416 DeoptimizeIf(below, instr->environment()); 3629 DeoptimizeIf(below, instr->environment());
3417 } 3630 }
3418 __ Set(output_reg, Immediate(0)); 3631 __ Set(output_reg, Immediate(0));
3419 __ bind(&done); 3632 __ bind(&done);
3420 } 3633 }
3421 3634
3422 3635
3423 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { 3636 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
3637 CpuFeatures::Scope scope(SSE2);
3424 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3638 XMMRegister input_reg = ToDoubleRegister(instr->value());
3425 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); 3639 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
3426 __ sqrtsd(input_reg, input_reg); 3640 __ sqrtsd(input_reg, input_reg);
3427 } 3641 }
3428 3642
3429 3643
3430 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { 3644 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3645 CpuFeatures::Scope scope(SSE2);
3431 XMMRegister xmm_scratch = xmm0; 3646 XMMRegister xmm_scratch = xmm0;
3432 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3647 XMMRegister input_reg = ToDoubleRegister(instr->value());
3433 Register scratch = ToRegister(instr->temp()); 3648 Register scratch = ToRegister(instr->temp());
3434 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); 3649 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
3435 3650
3436 // Note that according to ECMA-262 15.8.2.13: 3651 // Note that according to ECMA-262 15.8.2.13:
3437 // Math.pow(-Infinity, 0.5) == Infinity 3652 // Math.pow(-Infinity, 0.5) == Infinity
3438 // Math.sqrt(-Infinity) == NaN 3653 // Math.sqrt(-Infinity) == NaN
3439 Label done, sqrt; 3654 Label done, sqrt;
3440 // Check base for -Infinity. According to IEEE-754, single-precision 3655 // Check base for -Infinity. According to IEEE-754, single-precision
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
3497 DeferredDoRandom(LCodeGen* codegen, LRandom* instr) 3712 DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
3498 : LDeferredCode(codegen), instr_(instr) { } 3713 : LDeferredCode(codegen), instr_(instr) { }
3499 virtual void Generate() { codegen()->DoDeferredRandom(instr_); } 3714 virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
3500 virtual LInstruction* instr() { return instr_; } 3715 virtual LInstruction* instr() { return instr_; }
3501 private: 3716 private:
3502 LRandom* instr_; 3717 LRandom* instr_;
3503 }; 3718 };
3504 3719
3505 DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr); 3720 DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
3506 3721
3722 CpuFeatures::Scope scope(SSE2);
3507 // Having marked this instruction as a call we can use any 3723 // Having marked this instruction as a call we can use any
3508 // registers. 3724 // registers.
3509 ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); 3725 ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
3510 ASSERT(ToRegister(instr->global_object()).is(eax)); 3726 ASSERT(ToRegister(instr->global_object()).is(eax));
3511 // Assert that the register size is indeed the size of each seed. 3727 // Assert that the register size is indeed the size of each seed.
3512 static const int kSeedSize = sizeof(uint32_t); 3728 static const int kSeedSize = sizeof(uint32_t);
3513 STATIC_ASSERT(kPointerSize == kSeedSize); 3729 STATIC_ASSERT(kPointerSize == kSeedSize);
3514 3730
3515 __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset)); 3731 __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset));
3516 static const int kRandomSeedOffset = 3732 static const int kRandomSeedOffset =
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
3564 3780
3565 void LCodeGen::DoDeferredRandom(LRandom* instr) { 3781 void LCodeGen::DoDeferredRandom(LRandom* instr) {
3566 __ PrepareCallCFunction(1, ebx); 3782 __ PrepareCallCFunction(1, ebx);
3567 __ mov(Operand(esp, 0), eax); 3783 __ mov(Operand(esp, 0), eax);
3568 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); 3784 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
3569 // Return value is in eax. 3785 // Return value is in eax.
3570 } 3786 }
3571 3787
3572 3788
3573 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { 3789 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
3790 CpuFeatures::Scope scope(SSE2);
3574 ASSERT(instr->value()->Equals(instr->result())); 3791 ASSERT(instr->value()->Equals(instr->result()));
3575 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3792 XMMRegister input_reg = ToDoubleRegister(instr->value());
3576 Label positive, done, zero; 3793 Label positive, done, zero;
3577 __ xorps(xmm0, xmm0); 3794 __ xorps(xmm0, xmm0);
3578 __ ucomisd(input_reg, xmm0); 3795 __ ucomisd(input_reg, xmm0);
3579 __ j(above, &positive, Label::kNear); 3796 __ j(above, &positive, Label::kNear);
3580 __ j(equal, &zero, Label::kNear); 3797 __ j(equal, &zero, Label::kNear);
3581 ExternalReference nan = 3798 ExternalReference nan =
3582 ExternalReference::address_of_canonical_non_hole_nan(); 3799 ExternalReference::address_of_canonical_non_hole_nan();
3583 __ movdbl(input_reg, Operand::StaticVariable(nan)); 3800 __ movdbl(input_reg, Operand::StaticVariable(nan));
(...skipping 11 matching lines...) Expand all
3595 __ fld_d(Operand(esp, 0)); 3812 __ fld_d(Operand(esp, 0));
3596 __ fyl2x(); 3813 __ fyl2x();
3597 __ fstp_d(Operand(esp, 0)); 3814 __ fstp_d(Operand(esp, 0));
3598 __ movdbl(input_reg, Operand(esp, 0)); 3815 __ movdbl(input_reg, Operand(esp, 0));
3599 __ add(Operand(esp), Immediate(kDoubleSize)); 3816 __ add(Operand(esp), Immediate(kDoubleSize));
3600 __ bind(&done); 3817 __ bind(&done);
3601 } 3818 }
3602 3819
3603 3820
3604 void LCodeGen::DoMathExp(LMathExp* instr) { 3821 void LCodeGen::DoMathExp(LMathExp* instr) {
3822 CpuFeatures::Scope scope(SSE2);
3605 XMMRegister input = ToDoubleRegister(instr->value()); 3823 XMMRegister input = ToDoubleRegister(instr->value());
3606 XMMRegister result = ToDoubleRegister(instr->result()); 3824 XMMRegister result = ToDoubleRegister(instr->result());
3607 Register temp1 = ToRegister(instr->temp1()); 3825 Register temp1 = ToRegister(instr->temp1());
3608 Register temp2 = ToRegister(instr->temp2()); 3826 Register temp2 = ToRegister(instr->temp2());
3609 3827
3610 MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2); 3828 MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2);
3611 } 3829 }
3612 3830
3613 3831
3614 void LCodeGen::DoMathTan(LUnaryMathOperation* instr) { 3832 void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
(...skipping 248 matching lines...) Expand 10 before | Expand all | Expand 10 after
3863 int constant_index = 4081 int constant_index =
3864 ToInteger32(LConstantOperand::cast(instr->index())); 4082 ToInteger32(LConstantOperand::cast(instr->index()));
3865 if (instr->hydrogen()->length()->representation().IsTagged()) { 4083 if (instr->hydrogen()->length()->representation().IsTagged()) {
3866 __ cmp(ToOperand(instr->length()), 4084 __ cmp(ToOperand(instr->length()),
3867 Immediate(Smi::FromInt(constant_index))); 4085 Immediate(Smi::FromInt(constant_index)));
3868 } else { 4086 } else {
3869 __ cmp(ToOperand(instr->length()), Immediate(constant_index)); 4087 __ cmp(ToOperand(instr->length()), Immediate(constant_index));
3870 } 4088 }
3871 DeoptimizeIf(below_equal, instr->environment()); 4089 DeoptimizeIf(below_equal, instr->environment());
3872 } else { 4090 } else {
4091 if (instr->hydrogen()->index()->representation().IsTagged() &&
4092 !instr->hydrogen()->index()->type().IsSmi()) {
4093 __ test(ToRegister(instr->index()), Immediate(kSmiTagMask));
4094 DeoptimizeIf(not_zero, instr->environment());
4095 }
3873 __ cmp(ToRegister(instr->index()), ToOperand(instr->length())); 4096 __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
3874 DeoptimizeIf(above_equal, instr->environment()); 4097 DeoptimizeIf(above_equal, instr->environment());
3875 } 4098 }
3876 } 4099 }
3877 4100
3878 4101
3879 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { 4102 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
3880 ElementsKind elements_kind = instr->elements_kind(); 4103 ElementsKind elements_kind = instr->elements_kind();
3881 LOperand* key = instr->key(); 4104 LOperand* key = instr->key();
3882 if (!key->IsConstantOperand() && 4105 if (!key->IsConstantOperand() &&
3883 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(), 4106 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
3884 elements_kind)) { 4107 elements_kind)) {
3885 __ SmiUntag(ToRegister(key)); 4108 __ SmiUntag(ToRegister(key));
3886 } 4109 }
3887 Operand operand(BuildFastArrayOperand( 4110 Operand operand(BuildFastArrayOperand(
3888 instr->elements(), 4111 instr->elements(),
3889 key, 4112 key,
3890 instr->hydrogen()->key()->representation(), 4113 instr->hydrogen()->key()->representation(),
3891 elements_kind, 4114 elements_kind,
3892 0, 4115 0,
3893 instr->additional_index())); 4116 instr->additional_index()));
3894 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { 4117 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
4118 CpuFeatures::Scope scope(SSE2);
3895 __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value())); 4119 __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
3896 __ movss(operand, xmm0); 4120 __ movss(operand, xmm0);
3897 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { 4121 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
4122 CpuFeatures::Scope scope(SSE2);
3898 __ movdbl(operand, ToDoubleRegister(instr->value())); 4123 __ movdbl(operand, ToDoubleRegister(instr->value()));
3899 } else { 4124 } else {
3900 Register value = ToRegister(instr->value()); 4125 Register value = ToRegister(instr->value());
3901 switch (elements_kind) { 4126 switch (elements_kind) {
3902 case EXTERNAL_PIXEL_ELEMENTS: 4127 case EXTERNAL_PIXEL_ELEMENTS:
3903 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: 4128 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3904 case EXTERNAL_BYTE_ELEMENTS: 4129 case EXTERNAL_BYTE_ELEMENTS:
3905 __ mov_b(operand, value); 4130 __ mov_b(operand, value);
3906 break; 4131 break;
3907 case EXTERNAL_SHORT_ELEMENTS: 4132 case EXTERNAL_SHORT_ELEMENTS:
(...skipping 15 matching lines...) Expand all
3923 case DICTIONARY_ELEMENTS: 4148 case DICTIONARY_ELEMENTS:
3924 case NON_STRICT_ARGUMENTS_ELEMENTS: 4149 case NON_STRICT_ARGUMENTS_ELEMENTS:
3925 UNREACHABLE(); 4150 UNREACHABLE();
3926 break; 4151 break;
3927 } 4152 }
3928 } 4153 }
3929 } 4154 }
3930 4155
3931 4156
3932 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { 4157 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4158 CpuFeatures::Scope scope(SSE2);
3933 XMMRegister value = ToDoubleRegister(instr->value()); 4159 XMMRegister value = ToDoubleRegister(instr->value());
3934 4160
3935 if (instr->NeedsCanonicalization()) { 4161 if (instr->NeedsCanonicalization()) {
3936 Label have_value; 4162 Label have_value;
3937 4163
3938 __ ucomisd(value, value); 4164 __ ucomisd(value, value);
3939 __ j(parity_odd, &have_value); // NaN. 4165 __ j(parity_odd, &have_value); // NaN.
3940 4166
3941 ExternalReference canonical_nan_reference = 4167 ExternalReference canonical_nan_reference =
3942 ExternalReference::address_of_canonical_non_hole_nan(); 4168 ExternalReference::address_of_canonical_non_hole_nan();
(...skipping 230 matching lines...) Expand 10 before | Expand all | Expand 10 after
4173 4399
4174 void LCodeGen::DoStringAdd(LStringAdd* instr) { 4400 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4175 EmitPushTaggedOperand(instr->left()); 4401 EmitPushTaggedOperand(instr->left());
4176 EmitPushTaggedOperand(instr->right()); 4402 EmitPushTaggedOperand(instr->right());
4177 StringAddStub stub(NO_STRING_CHECK_IN_STUB); 4403 StringAddStub stub(NO_STRING_CHECK_IN_STUB);
4178 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 4404 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4179 } 4405 }
4180 4406
4181 4407
4182 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { 4408 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4183 LOperand* input = instr->value(); 4409 if (CpuFeatures::IsSupported(SSE2)) {
4184 ASSERT(input->IsRegister() || input->IsStackSlot()); 4410 CpuFeatures::Scope scope(SSE2);
4185 LOperand* output = instr->result(); 4411 LOperand* input = instr->value();
4186 ASSERT(output->IsDoubleRegister()); 4412 ASSERT(input->IsRegister() || input->IsStackSlot());
4187 __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input)); 4413 LOperand* output = instr->result();
4414 ASSERT(output->IsDoubleRegister());
4415 __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
4416 } else {
4417 UNREACHABLE();
4418 }
4188 } 4419 }
4189 4420
4190 4421
4191 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { 4422 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4423 CpuFeatures::Scope scope(SSE2);
4192 LOperand* input = instr->value(); 4424 LOperand* input = instr->value();
4193 LOperand* output = instr->result(); 4425 LOperand* output = instr->result();
4194 LOperand* temp = instr->temp(); 4426 LOperand* temp = instr->temp();
4195 4427
4196 __ LoadUint32(ToDoubleRegister(output), 4428 __ LoadUint32(ToDoubleRegister(output),
4197 ToRegister(input), 4429 ToRegister(input),
4198 ToDoubleRegister(temp)); 4430 ToDoubleRegister(temp));
4199 } 4431 }
4200 4432
4201 4433
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
4259 PushSafepointRegistersScope scope(this); 4491 PushSafepointRegistersScope scope(this);
4260 4492
4261 Label done; 4493 Label done;
4262 4494
4263 if (signedness == SIGNED_INT32) { 4495 if (signedness == SIGNED_INT32) {
4264 // There was overflow, so bits 30 and 31 of the original integer 4496 // There was overflow, so bits 30 and 31 of the original integer
4265 // disagree. Try to allocate a heap number in new space and store 4497 // disagree. Try to allocate a heap number in new space and store
4266 // the value in there. If that fails, call the runtime system. 4498 // the value in there. If that fails, call the runtime system.
4267 __ SmiUntag(reg); 4499 __ SmiUntag(reg);
4268 __ xor_(reg, 0x80000000); 4500 __ xor_(reg, 0x80000000);
4269 __ cvtsi2sd(xmm0, Operand(reg)); 4501 if (CpuFeatures::IsSupported(SSE2)) {
4502 CpuFeatures::Scope feature_scope(SSE2);
4503 __ cvtsi2sd(xmm0, Operand(reg));
4504 } else {
4505 __ push(reg);
4506 __ fild_s(Operand(esp, 0));
4507 __ pop(reg);
4508 }
4270 } else { 4509 } else {
4271 __ LoadUint32(xmm0, reg, xmm1); 4510 if (CpuFeatures::IsSupported(SSE2)) {
4511 CpuFeatures::Scope feature_scope(SSE2);
4512 __ LoadUint32(xmm0, reg, xmm1);
4513 } else {
4514 UNREACHABLE();
4515 }
4272 } 4516 }
4273 4517
4274 if (FLAG_inline_new) { 4518 if (FLAG_inline_new) {
4275 __ AllocateHeapNumber(reg, tmp, no_reg, &slow); 4519 __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
4276 __ jmp(&done, Label::kNear); 4520 __ jmp(&done, Label::kNear);
4277 } 4521 }
4278 4522
4279 // Slow case: Call the runtime system to do the number allocation. 4523 // Slow case: Call the runtime system to do the number allocation.
4280 __ bind(&slow); 4524 __ bind(&slow);
4281 4525
4282 // TODO(3095996): Put a valid pointer value in the stack slot where the result 4526 // TODO(3095996): Put a valid pointer value in the stack slot where the result
4283 // register is stored, as this register is in the pointer map, but contains an 4527 // register is stored, as this register is in the pointer map, but contains an
4284 // integer value. 4528 // integer value.
4285 __ StoreToSafepointRegisterSlot(reg, Immediate(0)); 4529 __ StoreToSafepointRegisterSlot(reg, Immediate(0));
4286 // NumberTagI and NumberTagD use the context from the frame, rather than 4530 // NumberTagI and NumberTagD use the context from the frame, rather than
4287 // the environment's HContext or HInlinedContext value. 4531 // the environment's HContext or HInlinedContext value.
4288 // They only call Runtime::kAllocateHeapNumber. 4532 // They only call Runtime::kAllocateHeapNumber.
4289 // The corresponding HChange instructions are added in a phase that does 4533 // The corresponding HChange instructions are added in a phase that does
4290 // not have easy access to the local context. 4534 // not have easy access to the local context.
4291 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 4535 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
4292 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); 4536 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4293 RecordSafepointWithRegisters( 4537 RecordSafepointWithRegisters(
4294 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); 4538 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4295 if (!reg.is(eax)) __ mov(reg, eax); 4539 if (!reg.is(eax)) __ mov(reg, eax);
4296 4540
4297 // Done. Put the value in xmm0 into the value of the allocated heap 4541 // Done. Put the value in xmm0 into the value of the allocated heap
4298 // number. 4542 // number.
4299 __ bind(&done); 4543 __ bind(&done);
4300 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0); 4544 if (CpuFeatures::IsSupported(SSE2)) {
4545 CpuFeatures::Scope feature_scope(SSE2);
4546 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
4547 } else {
4548 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
4549 }
4301 __ StoreToSafepointRegisterSlot(reg, reg); 4550 __ StoreToSafepointRegisterSlot(reg, reg);
4302 } 4551 }
4303 4552
4304 4553
4305 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { 4554 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4306 class DeferredNumberTagD: public LDeferredCode { 4555 class DeferredNumberTagD: public LDeferredCode {
4307 public: 4556 public:
4308 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) 4557 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4309 : LDeferredCode(codegen), instr_(instr) { } 4558 : LDeferredCode(codegen), instr_(instr) { }
4310 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } 4559 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4311 virtual LInstruction* instr() { return instr_; } 4560 virtual LInstruction* instr() { return instr_; }
4312 private: 4561 private:
4313 LNumberTagD* instr_; 4562 LNumberTagD* instr_;
4314 }; 4563 };
4315 4564
4316 XMMRegister input_reg = ToDoubleRegister(instr->value());
4317 Register reg = ToRegister(instr->result()); 4565 Register reg = ToRegister(instr->result());
4318 Register tmp = ToRegister(instr->temp()); 4566 Register tmp = ToRegister(instr->temp());
4319 4567
4320 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); 4568 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4321 if (FLAG_inline_new) { 4569 if (FLAG_inline_new) {
4322 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); 4570 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
4323 } else { 4571 } else {
4324 __ jmp(deferred->entry()); 4572 __ jmp(deferred->entry());
4325 } 4573 }
4326 __ bind(deferred->exit()); 4574 __ bind(deferred->exit());
4327 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); 4575 if (CpuFeatures::IsSupported(SSE2)) {
4576 CpuFeatures::Scope scope(SSE2);
4577 XMMRegister input_reg = ToDoubleRegister(instr->value());
4578 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
4579 } else {
4580 if (!IsX87TopOfStack(instr->value())) {
4581 __ fld_d(ToOperand(instr->value()));
4582 }
4583 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
4584 }
4328 } 4585 }
4329 4586
4330 4587
4331 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { 4588 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4332 // TODO(3095996): Get rid of this. For now, we need to make the 4589 // TODO(3095996): Get rid of this. For now, we need to make the
4333 // result register contain a valid pointer because it is already 4590 // result register contain a valid pointer because it is already
4334 // contained in the register pointer map. 4591 // contained in the register pointer map.
4335 Register reg = ToRegister(instr->result()); 4592 Register reg = ToRegister(instr->result());
4336 __ Set(reg, Immediate(0)); 4593 __ Set(reg, Immediate(0));
4337 4594
(...skipping 136 matching lines...) Expand 10 before | Expand all | Expand 10 after
4474 __ cmp(input_reg, 0x80000000u); 4731 __ cmp(input_reg, 0x80000000u);
4475 __ j(not_equal, &done); 4732 __ j(not_equal, &done);
4476 // Check if the input was 0x8000000 (kMinInt). 4733 // Check if the input was 0x8000000 (kMinInt).
4477 // If no, then we got an overflow and we deoptimize. 4734 // If no, then we got an overflow and we deoptimize.
4478 ExternalReference min_int = ExternalReference::address_of_min_int(); 4735 ExternalReference min_int = ExternalReference::address_of_min_int();
4479 __ movdbl(xmm_temp, Operand::StaticVariable(min_int)); 4736 __ movdbl(xmm_temp, Operand::StaticVariable(min_int));
4480 __ ucomisd(xmm_temp, xmm0); 4737 __ ucomisd(xmm_temp, xmm0);
4481 DeoptimizeIf(not_equal, instr->environment()); 4738 DeoptimizeIf(not_equal, instr->environment());
4482 DeoptimizeIf(parity_even, instr->environment()); // NaN. 4739 DeoptimizeIf(parity_even, instr->environment()); // NaN.
4483 } 4740 }
4484 } else { 4741 } else if (CpuFeatures::IsSupported(SSE2)) {
4742 CpuFeatures::Scope scope(SSE2);
4485 // Deoptimize if we don't have a heap number. 4743 // Deoptimize if we don't have a heap number.
4486 __ RecordComment("Deferred TaggedToI: not a heap number"); 4744 __ RecordComment("Deferred TaggedToI: not a heap number");
4487 DeoptimizeIf(not_equal, instr->environment()); 4745 DeoptimizeIf(not_equal, instr->environment());
4488 4746
4489 XMMRegister xmm_temp = ToDoubleRegister(instr->temp()); 4747 XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
4490 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); 4748 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
4491 __ cvttsd2si(input_reg, Operand(xmm0)); 4749 __ cvttsd2si(input_reg, Operand(xmm0));
4492 __ cvtsi2sd(xmm_temp, Operand(input_reg)); 4750 __ cvtsi2sd(xmm_temp, Operand(input_reg));
4493 __ ucomisd(xmm0, xmm_temp); 4751 __ ucomisd(xmm0, xmm_temp);
4494 __ RecordComment("Deferred TaggedToI: lost precision"); 4752 __ RecordComment("Deferred TaggedToI: lost precision");
4495 DeoptimizeIf(not_equal, instr->environment()); 4753 DeoptimizeIf(not_equal, instr->environment());
4496 __ RecordComment("Deferred TaggedToI: NaN"); 4754 __ RecordComment("Deferred TaggedToI: NaN");
4497 DeoptimizeIf(parity_even, instr->environment()); // NaN. 4755 DeoptimizeIf(parity_even, instr->environment()); // NaN.
4498 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4756 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4499 __ test(input_reg, Operand(input_reg)); 4757 __ test(input_reg, Operand(input_reg));
4500 __ j(not_zero, &done); 4758 __ j(not_zero, &done);
4501 __ movmskpd(input_reg, xmm0); 4759 __ movmskpd(input_reg, xmm0);
4502 __ and_(input_reg, 1); 4760 __ and_(input_reg, 1);
4503 __ RecordComment("Deferred TaggedToI: minus zero"); 4761 __ RecordComment("Deferred TaggedToI: minus zero");
4504 DeoptimizeIf(not_zero, instr->environment()); 4762 DeoptimizeIf(not_zero, instr->environment());
4505 } 4763 }
4764 } else {
4765 UNREACHABLE();
4506 } 4766 }
4507 __ bind(&done); 4767 __ bind(&done);
4508 } 4768 }
4509 4769
4510 4770
4511 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { 4771 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4512 class DeferredTaggedToI: public LDeferredCode { 4772 class DeferredTaggedToI: public LDeferredCode {
4513 public: 4773 public:
4514 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) 4774 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4515 : LDeferredCode(codegen), instr_(instr) { } 4775 : LDeferredCode(codegen), instr_(instr) { }
(...skipping 22 matching lines...) Expand all
4538 4798
4539 4799
4540 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { 4800 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4541 LOperand* input = instr->value(); 4801 LOperand* input = instr->value();
4542 ASSERT(input->IsRegister()); 4802 ASSERT(input->IsRegister());
4543 LOperand* temp = instr->temp(); 4803 LOperand* temp = instr->temp();
4544 ASSERT(temp == NULL || temp->IsRegister()); 4804 ASSERT(temp == NULL || temp->IsRegister());
4545 LOperand* result = instr->result(); 4805 LOperand* result = instr->result();
4546 ASSERT(result->IsDoubleRegister()); 4806 ASSERT(result->IsDoubleRegister());
4547 4807
4548 Register input_reg = ToRegister(input); 4808 if (CpuFeatures::IsSupported(SSE2)) {
4549 XMMRegister result_reg = ToDoubleRegister(result); 4809 CpuFeatures::Scope scope(SSE2);
4810 Register input_reg = ToRegister(input);
4811 XMMRegister result_reg = ToDoubleRegister(result);
4550 4812
4551 bool deoptimize_on_minus_zero = 4813 bool deoptimize_on_minus_zero =
4552 instr->hydrogen()->deoptimize_on_minus_zero(); 4814 instr->hydrogen()->deoptimize_on_minus_zero();
4553 Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg; 4815 Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
4554 4816
4555 EmitNumberUntagD(input_reg, 4817 EmitNumberUntagD(input_reg,
4556 temp_reg, 4818 temp_reg,
4557 result_reg, 4819 result_reg,
4558 instr->hydrogen()->deoptimize_on_undefined(), 4820 instr->hydrogen()->deoptimize_on_undefined(),
4559 deoptimize_on_minus_zero, 4821 deoptimize_on_minus_zero,
4560 instr->environment()); 4822 instr->environment());
4823 } else {
4824 UNIMPLEMENTED();
4825 }
4561 } 4826 }
4562 4827
4563 4828
4564 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { 4829 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4565 LOperand* input = instr->value(); 4830 LOperand* input = instr->value();
4566 ASSERT(input->IsDoubleRegister()); 4831 ASSERT(input->IsDoubleRegister());
4567 LOperand* result = instr->result(); 4832 LOperand* result = instr->result();
4568 ASSERT(result->IsRegister()); 4833 ASSERT(result->IsRegister());
4834 CpuFeatures::Scope scope(SSE2);
4569 4835
4570 XMMRegister input_reg = ToDoubleRegister(input); 4836 XMMRegister input_reg = ToDoubleRegister(input);
4571 Register result_reg = ToRegister(result); 4837 Register result_reg = ToRegister(result);
4572 4838
4573 if (instr->truncating()) { 4839 if (instr->truncating()) {
4574 // Performs a truncating conversion of a floating point number as used by 4840 // Performs a truncating conversion of a floating point number as used by
4575 // the JS bitwise operations. 4841 // the JS bitwise operations.
4576 __ cvttsd2si(result_reg, Operand(input_reg)); 4842 __ cvttsd2si(result_reg, Operand(input_reg));
4577 __ cmp(result_reg, 0x80000000u); 4843 __ cmp(result_reg, 0x80000000u);
4578 if (CpuFeatures::IsSupported(SSE3)) { 4844 if (CpuFeatures::IsSupported(SSE3)) {
(...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after
4748 Operand operand = ToOperand(instr->value()); 5014 Operand operand = ToOperand(instr->value());
4749 __ cmp(operand, target); 5015 __ cmp(operand, target);
4750 } 5016 }
4751 DeoptimizeIf(not_equal, instr->environment()); 5017 DeoptimizeIf(not_equal, instr->environment());
4752 } 5018 }
4753 5019
4754 5020
4755 void LCodeGen::DoCheckMapCommon(Register reg, 5021 void LCodeGen::DoCheckMapCommon(Register reg,
4756 Handle<Map> map, 5022 Handle<Map> map,
4757 CompareMapMode mode, 5023 CompareMapMode mode,
4758 LEnvironment* env) { 5024 LInstruction* instr) {
4759 Label success; 5025 Label success;
4760 __ CompareMap(reg, map, &success, mode); 5026 __ CompareMap(reg, map, &success, mode);
4761 DeoptimizeIf(not_equal, env); 5027 DeoptimizeIf(not_equal, instr->environment());
4762 __ bind(&success); 5028 __ bind(&success);
4763 } 5029 }
4764 5030
4765 5031
4766 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { 5032 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4767 LOperand* input = instr->value(); 5033 LOperand* input = instr->value();
4768 ASSERT(input->IsRegister()); 5034 ASSERT(input->IsRegister());
4769 Register reg = ToRegister(input); 5035 Register reg = ToRegister(input);
4770 5036
4771 Label success; 5037 Label success;
4772 SmallMapList* map_set = instr->hydrogen()->map_set(); 5038 SmallMapList* map_set = instr->hydrogen()->map_set();
4773 for (int i = 0; i < map_set->length() - 1; i++) { 5039 for (int i = 0; i < map_set->length() - 1; i++) {
4774 Handle<Map> map = map_set->at(i); 5040 Handle<Map> map = map_set->at(i);
4775 __ CompareMap(reg, map, &success, REQUIRE_EXACT_MAP); 5041 __ CompareMap(reg, map, &success, REQUIRE_EXACT_MAP);
4776 __ j(equal, &success); 5042 __ j(equal, &success);
4777 } 5043 }
4778 Handle<Map> map = map_set->last(); 5044 Handle<Map> map = map_set->last();
4779 DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment()); 5045 DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr);
4780 __ bind(&success); 5046 __ bind(&success);
4781 } 5047 }
4782 5048
4783 5049
4784 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { 5050 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5051 CpuFeatures::Scope scope(SSE2);
4785 XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); 5052 XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
4786 Register result_reg = ToRegister(instr->result()); 5053 Register result_reg = ToRegister(instr->result());
4787 __ ClampDoubleToUint8(value_reg, xmm0, result_reg); 5054 __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
4788 } 5055 }
4789 5056
4790 5057
4791 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { 5058 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4792 ASSERT(instr->unclamped()->Equals(instr->result())); 5059 ASSERT(instr->unclamped()->Equals(instr->result()));
4793 Register value_reg = ToRegister(instr->result()); 5060 Register value_reg = ToRegister(instr->result());
4794 __ ClampUint8(value_reg); 5061 __ ClampUint8(value_reg);
4795 } 5062 }
4796 5063
4797 5064
4798 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { 5065 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5066 CpuFeatures::Scope scope(SSE2);
5067
4799 ASSERT(instr->unclamped()->Equals(instr->result())); 5068 ASSERT(instr->unclamped()->Equals(instr->result()));
4800 Register input_reg = ToRegister(instr->unclamped()); 5069 Register input_reg = ToRegister(instr->unclamped());
4801 Label is_smi, done, heap_number; 5070 Label is_smi, done, heap_number;
4802 5071
4803 __ JumpIfSmi(input_reg, &is_smi); 5072 __ JumpIfSmi(input_reg, &is_smi);
4804 5073
4805 // Check for heap number 5074 // Check for heap number
4806 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), 5075 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4807 factory()->heap_number_map()); 5076 factory()->heap_number_map());
4808 __ j(equal, &heap_number, Label::kNear); 5077 __ j(equal, &heap_number, Label::kNear);
(...skipping 26 matching lines...) Expand all
4835 5104
4836 Handle<JSObject> holder = instr->holder(); 5105 Handle<JSObject> holder = instr->holder();
4837 Handle<JSObject> current_prototype = instr->prototype(); 5106 Handle<JSObject> current_prototype = instr->prototype();
4838 5107
4839 // Load prototype object. 5108 // Load prototype object.
4840 __ LoadHeapObject(reg, current_prototype); 5109 __ LoadHeapObject(reg, current_prototype);
4841 5110
4842 // Check prototype maps up to the holder. 5111 // Check prototype maps up to the holder.
4843 while (!current_prototype.is_identical_to(holder)) { 5112 while (!current_prototype.is_identical_to(holder)) {
4844 DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()), 5113 DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
4845 ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment()); 5114 ALLOW_ELEMENT_TRANSITION_MAPS, instr);
4846 5115
4847 current_prototype = 5116 current_prototype =
4848 Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype())); 5117 Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
4849 // Load next prototype object. 5118 // Load next prototype object.
4850 __ LoadHeapObject(reg, current_prototype); 5119 __ LoadHeapObject(reg, current_prototype);
4851 } 5120 }
4852 5121
4853 // Check the holder map. 5122 // Check the holder map.
4854 DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()), 5123 DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
4855 ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment()); 5124 ALLOW_ELEMENT_TRANSITION_MAPS, instr);
4856 } 5125 }
4857 5126
4858 5127
4859 void LCodeGen::DoAllocateObject(LAllocateObject* instr) { 5128 void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
4860 class DeferredAllocateObject: public LDeferredCode { 5129 class DeferredAllocateObject: public LDeferredCode {
4861 public: 5130 public:
4862 DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr) 5131 DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
4863 : LDeferredCode(codegen), instr_(instr) { } 5132 : LDeferredCode(codegen), instr_(instr) { }
4864 virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); } 5133 virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
4865 virtual LInstruction* instr() { return instr_; } 5134 virtual LInstruction* instr() { return instr_; }
(...skipping 516 matching lines...) Expand 10 before | Expand all | Expand 10 after
5382 __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset)); 5651 __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
5383 5652
5384 // Check the marker in the calling frame. 5653 // Check the marker in the calling frame.
5385 __ bind(&check_frame_marker); 5654 __ bind(&check_frame_marker);
5386 __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset), 5655 __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
5387 Immediate(Smi::FromInt(StackFrame::CONSTRUCT))); 5656 Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
5388 } 5657 }
5389 5658
5390 5659
5391 void LCodeGen::EnsureSpaceForLazyDeopt() { 5660 void LCodeGen::EnsureSpaceForLazyDeopt() {
5392 // Ensure that we have enough space after the previous lazy-bailout 5661 if (!info()->IsStub()) {
5393 // instruction for patching the code here. 5662 // Ensure that we have enough space after the previous lazy-bailout
5394 int current_pc = masm()->pc_offset(); 5663 // instruction for patching the code here.
5395 int patch_size = Deoptimizer::patch_size(); 5664 int current_pc = masm()->pc_offset();
5396 if (current_pc < last_lazy_deopt_pc_ + patch_size) { 5665 int patch_size = Deoptimizer::patch_size();
5397 int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc; 5666 if (current_pc < last_lazy_deopt_pc_ + patch_size) {
5398 __ Nop(padding_size); 5667 int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
5668 __ Nop(padding_size);
5669 }
5399 } 5670 }
5400 last_lazy_deopt_pc_ = masm()->pc_offset(); 5671 last_lazy_deopt_pc_ = masm()->pc_offset();
5401 } 5672 }
5402 5673
5403 5674
5404 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { 5675 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5405 EnsureSpaceForLazyDeopt(); 5676 EnsureSpaceForLazyDeopt();
5406 ASSERT(instr->HasEnvironment()); 5677 ASSERT(instr->HasEnvironment());
5407 LEnvironment* env = instr->environment(); 5678 LEnvironment* env = instr->environment();
5408 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); 5679 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
(...skipping 203 matching lines...) Expand 10 before | Expand all | Expand 10 after
5612 FixedArray::kHeaderSize - kPointerSize)); 5883 FixedArray::kHeaderSize - kPointerSize));
5613 __ bind(&done); 5884 __ bind(&done);
5614 } 5885 }
5615 5886
5616 5887
5617 #undef __ 5888 #undef __
5618 5889
5619 } } // namespace v8::internal 5890 } } // namespace v8::internal
5620 5891
5621 #endif // V8_TARGET_ARCH_IA32 5892 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « src/ia32/lithium-codegen-ia32.h ('k') | src/ia32/lithium-gap-resolver-ia32.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698