Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(186)

Side by Side Diff: src/ia32/lithium-codegen-ia32.cc

Issue 10701054: Enable stub generation using Hydrogen/Lithium (again) (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Review feedback Created 8 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 12 matching lines...) Expand all
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #include "v8.h" 28 #include "v8.h"
29 29
30 #if defined(V8_TARGET_ARCH_IA32) 30 #if defined(V8_TARGET_ARCH_IA32)
31 31
32 #include "ia32/lithium-codegen-ia32.h" 32 #include "ia32/lithium-codegen-ia32.h"
33 #include "ic.h"
33 #include "code-stubs.h" 34 #include "code-stubs.h"
34 #include "deoptimizer.h" 35 #include "deoptimizer.h"
35 #include "stub-cache.h" 36 #include "stub-cache.h"
36 #include "codegen.h" 37 #include "codegen.h"
37 38
38 namespace v8 { 39 namespace v8 {
39 namespace internal { 40 namespace internal {
40 41
41 42
42 // When invoking builtins, we need to record the safepoint in the middle of 43 // When invoking builtins, we need to record the safepoint in the middle of
(...skipping 20 matching lines...) Expand all
63 Safepoint::DeoptMode deopt_mode_; 64 Safepoint::DeoptMode deopt_mode_;
64 }; 65 };
65 66
66 67
67 #define __ masm()-> 68 #define __ masm()->
68 69
69 bool LCodeGen::GenerateCode() { 70 bool LCodeGen::GenerateCode() {
70 HPhase phase("Z_Code generation", chunk()); 71 HPhase phase("Z_Code generation", chunk());
71 ASSERT(is_unused()); 72 ASSERT(is_unused());
72 status_ = GENERATING; 73 status_ = GENERATING;
73 CpuFeatures::Scope scope(SSE2);
74 74
75 CodeStub::GenerateFPStubs(); 75 CodeStub::GenerateFPStubs();
76 76
77 // Open a frame scope to indicate that there is a frame on the stack. The 77 // Open a frame scope to indicate that there is a frame on the stack. The
78 // MANUAL indicates that the scope shouldn't actually generate code to set up 78 // MANUAL indicates that the scope shouldn't actually generate code to set up
79 // the frame (that is done in GeneratePrologue). 79 // the frame (that is done in GeneratePrologue).
80 FrameScope frame_scope(masm_, StackFrame::MANUAL); 80 FrameScope frame_scope(masm_, StackFrame::MANUAL);
81 81
82 dynamic_frame_alignment_ = (chunk()->num_double_slots() > 2 && 82 dynamic_frame_alignment_ = info()->IsOptimizing() &&
83 !chunk()->graph()->is_recursive()) || 83 ((chunk()->num_double_slots() > 2 &&
84 !info()->osr_ast_id().IsNone(); 84 !chunk()->graph()->is_recursive()) ||
85 !info()->osr_ast_id().IsNone());
85 86
86 return GeneratePrologue() && 87 return GeneratePrologue() &&
87 GenerateBody() && 88 GenerateBody() &&
88 GenerateDeferredCode() && 89 GenerateDeferredCode() &&
90 GenerateJumpTable() &&
89 GenerateSafepointTable(); 91 GenerateSafepointTable();
90 } 92 }
91 93
92 94
93 void LCodeGen::FinishCode(Handle<Code> code) { 95 void LCodeGen::FinishCode(Handle<Code> code) {
94 ASSERT(is_done()); 96 ASSERT(is_done());
95 code->set_stack_slots(GetStackSlotCount()); 97 code->set_stack_slots(GetStackSlotCount());
96 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); 98 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
97 PopulateDeoptimizationData(code); 99 PopulateDeoptimizationData(code);
98 Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code); 100 if (!info()->IsStub()) {
101 Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
102 }
99 } 103 }
100 104
101 105
102 void LCodeGen::Abort(const char* reason) { 106 void LCodeGen::Abort(const char* reason) {
103 info()->set_bailout_reason(reason); 107 info()->set_bailout_reason(reason);
104 status_ = ABORTED; 108 status_ = ABORTED;
105 } 109 }
106 110
107 111
108 void LCodeGen::Comment(const char* format, ...) { 112 void LCodeGen::Comment(const char* format, ...) {
(...skipping 10 matching lines...) Expand all
119 size_t length = builder.position(); 123 size_t length = builder.position();
120 Vector<char> copy = Vector<char>::New(length + 1); 124 Vector<char> copy = Vector<char>::New(length + 1);
121 memcpy(copy.start(), builder.Finalize(), copy.length()); 125 memcpy(copy.start(), builder.Finalize(), copy.length());
122 masm()->RecordComment(copy.start()); 126 masm()->RecordComment(copy.start());
123 } 127 }
124 128
125 129
126 bool LCodeGen::GeneratePrologue() { 130 bool LCodeGen::GeneratePrologue() {
127 ASSERT(is_generating()); 131 ASSERT(is_generating());
128 132
129 ProfileEntryHookStub::MaybeCallEntryHook(masm_); 133 if (info()->IsOptimizing()) {
134 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
130 135
131 #ifdef DEBUG 136 #ifdef DEBUG
132 if (strlen(FLAG_stop_at) > 0 && 137 if (strlen(FLAG_stop_at) > 0 &&
133 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { 138 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
134 __ int3(); 139 __ int3();
135 } 140 }
136 #endif 141 #endif
137 142
138 // Strict mode functions and builtins need to replace the receiver 143 // Strict mode functions and builtins need to replace the receiver
139 // with undefined when called as functions (without an explicit 144 // with undefined when called as functions (without an explicit
140 // receiver object). ecx is zero for method calls and non-zero for 145 // receiver object). ecx is zero for method calls and non-zero for
141 // function calls. 146 // function calls.
142 if (!info_->is_classic_mode() || info_->is_native()) { 147 if (!info_->is_classic_mode() || info_->is_native()) {
143 Label begin; 148 Label begin;
144 __ bind(&begin); 149 __ bind(&begin);
145 Label ok; 150 Label ok;
146 __ test(ecx, Operand(ecx)); 151 __ test(ecx, Operand(ecx));
147 __ j(zero, &ok, Label::kNear); 152 __ j(zero, &ok, Label::kNear);
148 // +1 for return address. 153 // +1 for return address.
149 int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize; 154 int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
150 __ mov(Operand(esp, receiver_offset), 155 __ mov(Operand(esp, receiver_offset),
151 Immediate(isolate()->factory()->undefined_value())); 156 Immediate(isolate()->factory()->undefined_value()));
152 __ bind(&ok); 157 __ bind(&ok);
153 ASSERT(!FLAG_age_code || 158 ASSERT(!FLAG_age_code ||
154 (kSizeOfOptimizedStrictModePrologue == ok.pos() - begin.pos())); 159 (kSizeOfOptimizedStrictModePrologue == ok.pos() - begin.pos()));
160 }
155 } 161 }
156 162
157
158 if (dynamic_frame_alignment_) { 163 if (dynamic_frame_alignment_) {
159 Label begin; 164 Label begin;
160 __ bind(&begin); 165 __ bind(&begin);
161 // Move state of dynamic frame alignment into edx. 166 // Move state of dynamic frame alignment into edx.
162 __ mov(edx, Immediate(kNoAlignmentPadding)); 167 __ mov(edx, Immediate(kNoAlignmentPadding));
163 168
164 Label do_not_pad, align_loop; 169 Label do_not_pad, align_loop;
165 STATIC_ASSERT(kDoubleSize == 2 * kPointerSize); 170 STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
166 // Align esp + 4 to a multiple of 2 * kPointerSize. 171 // Align esp + 4 to a multiple of 2 * kPointerSize.
167 __ test(esp, Immediate(kPointerSize)); 172 __ test(esp, Immediate(kPointerSize));
(...skipping 10 matching lines...) Expand all
178 __ add(Operand(ebx), Immediate(kPointerSize)); 183 __ add(Operand(ebx), Immediate(kPointerSize));
179 __ dec(ecx); 184 __ dec(ecx);
180 __ j(not_zero, &align_loop, Label::kNear); 185 __ j(not_zero, &align_loop, Label::kNear);
181 __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue)); 186 __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
182 __ bind(&do_not_pad); 187 __ bind(&do_not_pad);
183 ASSERT(!FLAG_age_code || 188 ASSERT(!FLAG_age_code ||
184 (kSizeOfOptimizedAlignStackPrologue == 189 (kSizeOfOptimizedAlignStackPrologue ==
185 do_not_pad.pos() - begin.pos())); 190 do_not_pad.pos() - begin.pos()));
186 } 191 }
187 192
188 __ push(ebp); // Caller's frame pointer. 193 if (NeedsEagerFrame()) {
189 __ mov(ebp, esp); 194 ASSERT(!frame_is_built_);
190 __ push(esi); // Callee's context. 195 frame_is_built_ = true;
191 __ push(edi); // Callee's JS function. 196 __ push(ebp); // Caller's frame pointer.
197 __ mov(ebp, esp);
198 if (info()->IsStub()) {
199 __ push(esi);
Jakob Kummerow 2012/11/28 16:28:22 nit: could hoist this out of the if/else blocks.
danno 2012/11/30 16:23:24 Done.
danno 2012/11/30 16:23:24 Done.
200 __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
201 } else {
202 __ push(esi); // Callee's context.
203 __ push(edi); // Callee's JS function.
204 }
205 }
192 206
193 if (dynamic_frame_alignment_ && FLAG_debug_code) { 207 if (dynamic_frame_alignment_ && FLAG_debug_code) {
194 __ test(esp, Immediate(kPointerSize)); 208 __ test(esp, Immediate(kPointerSize));
195 __ Assert(zero, "frame is expected to be aligned"); 209 __ Assert(zero, "frame is expected to be aligned");
196 } 210 }
197 211
198 // Reserve space for the stack slots needed by the code. 212 // Reserve space for the stack slots needed by the code.
199 int slots = GetStackSlotCount(); 213 int slots = GetStackSlotCount();
200 ASSERT_GE(slots, 1); 214 ASSERT(slots != 0 || !info()->IsOptimizing());
201 if (slots == 1) { 215 if (slots > 0) {
202 if (dynamic_frame_alignment_) { 216 if (slots == 1) {
203 __ push(edx); 217 if (dynamic_frame_alignment_) {
218 __ push(edx);
219 } else {
220 __ push(Immediate(kNoAlignmentPadding));
221 }
204 } else { 222 } else {
205 __ push(Immediate(kNoAlignmentPadding)); 223 if (FLAG_debug_code) {
206 } 224 __ mov(Operand(eax), Immediate(slots));
207 } else { 225 Label loop;
208 if (FLAG_debug_code) { 226 __ bind(&loop);
209 __ mov(Operand(eax), Immediate(slots)); 227 __ push(Immediate(kSlotsZapValue));
210 Label loop; 228 __ dec(eax);
211 __ bind(&loop); 229 __ j(not_zero, &loop);
212 __ push(Immediate(kSlotsZapValue)); 230 } else {
213 __ dec(eax); 231 __ sub(Operand(esp), Immediate(slots * kPointerSize));
214 __ j(not_zero, &loop); 232 #ifdef _MSC_VER
215 } else { 233 // On windows, you may not access the stack more than one page below
216 __ sub(Operand(esp), Immediate(slots * kPointerSize)); 234 // the most recently mapped page. To make the allocated area randomly
217 #ifdef _MSC_VER 235 // accessible, we write to each page in turn (the value is irrelevant).
218 // On windows, you may not access the stack more than one page below 236 const int kPageSize = 4 * KB;
219 // the most recently mapped page. To make the allocated area randomly 237 for (int offset = slots * kPointerSize - kPageSize;
220 // accessible, we write to each page in turn (the value is irrelevant). 238 offset > 0;
221 const int kPageSize = 4 * KB; 239 offset -= kPageSize) {
222 for (int offset = slots * kPointerSize - kPageSize; 240 __ mov(Operand(esp, offset), eax);
223 offset > 0; 241 }
224 offset -= kPageSize) { 242 #endif
225 __ mov(Operand(esp, offset), eax);
226 } 243 }
227 #endif
228 }
229 244
230 // Store dynamic frame alignment state in the first local. 245 // Store dynamic frame alignment state in the first local.
231 if (dynamic_frame_alignment_) { 246 if (dynamic_frame_alignment_) {
232 __ mov(Operand(ebp, 247 __ mov(Operand(ebp,
233 JavaScriptFrameConstants::kDynamicAlignmentStateOffset), 248 JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
234 edx); 249 edx);
235 } else { 250 } else {
236 __ mov(Operand(ebp, 251 __ mov(Operand(ebp,
237 JavaScriptFrameConstants::kDynamicAlignmentStateOffset), 252 JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
238 Immediate(kNoAlignmentPadding)); 253 Immediate(kNoAlignmentPadding));
254 }
239 } 255 }
240 } 256 }
241 257
242 // Possibly allocate a local context. 258 // Possibly allocate a local context.
243 int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; 259 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
244 if (heap_slots > 0) { 260 if (heap_slots > 0) {
245 Comment(";;; Allocate local context"); 261 Comment(";;; Allocate local context");
246 // Argument to NewContext is the function, which is still in edi. 262 // Argument to NewContext is the function, which is still in edi.
247 __ push(edi); 263 __ push(edi);
248 if (heap_slots <= FastNewContextStub::kMaximumSlots) { 264 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
249 FastNewContextStub stub(heap_slots); 265 FastNewContextStub stub(heap_slots);
250 __ CallStub(&stub); 266 __ CallStub(&stub);
251 } else { 267 } else {
252 __ CallRuntime(Runtime::kNewFunctionContext, 1); 268 __ CallRuntime(Runtime::kNewFunctionContext, 1);
253 } 269 }
(...skipping 19 matching lines...) Expand all
273 context_offset, 289 context_offset,
274 eax, 290 eax,
275 ebx, 291 ebx,
276 kDontSaveFPRegs); 292 kDontSaveFPRegs);
277 } 293 }
278 } 294 }
279 Comment(";;; End allocate local context"); 295 Comment(";;; End allocate local context");
280 } 296 }
281 297
282 // Trace the call. 298 // Trace the call.
283 if (FLAG_trace) { 299 if (FLAG_trace && info()->IsOptimizing()) {
284 // We have not executed any compiled code yet, so esi still holds the 300 // We have not executed any compiled code yet, so esi still holds the
285 // incoming context. 301 // incoming context.
286 __ CallRuntime(Runtime::kTraceEnter, 0); 302 __ CallRuntime(Runtime::kTraceEnter, 0);
287 } 303 }
288 return !is_aborted(); 304 return !is_aborted();
289 } 305 }
290 306
291 307
292 bool LCodeGen::GenerateBody() { 308 bool LCodeGen::GenerateBody() {
293 ASSERT(is_generating()); 309 ASSERT(is_generating());
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
327 } 343 }
328 } 344 }
329 instr->CompileToNative(this); 345 instr->CompileToNative(this);
330 } 346 }
331 } 347 }
332 EnsureSpaceForLazyDeopt(); 348 EnsureSpaceForLazyDeopt();
333 return !is_aborted(); 349 return !is_aborted();
334 } 350 }
335 351
336 352
353 bool LCodeGen::GenerateJumpTable() {
354 Label needs_frame_not_call;
355 bool has_generated_needs_frame_not_call = false;
356 Label needs_frame_is_call;
357 bool has_generated_needs_frame_is_call = false;
358 for (int i = 0; i < jump_table_.length(); i++) {
359 __ bind(&jump_table_[i].label);
360 Address entry = jump_table_[i].address;
361 if (jump_table_[i].needs_frame) {
362 __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
363 if (jump_table_[i].is_lazy_deopt) {
364 if (!has_generated_needs_frame_is_call) {
Jakob Kummerow 2012/11/28 16:28:22 I think you can replace the check for !has_generat
danno 2012/11/30 16:23:24 Done.
365 has_generated_needs_frame_is_call = true;
366 __ bind(&needs_frame_is_call);
367 __ push(esi);
368 // If there is not frame, we don't have access to the JSFunction that
Jakob Kummerow 2012/11/28 16:28:22 "there is not frame" has not grammar. Aside from t
danno 2012/11/30 16:23:24 Done.
369 // needs to be put into the frame.
370 ASSERT(info()->IsStub());
371 __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
372 // Push a PC inside the function so that the deopt code can find where
373 // the deopt comes from. It doesn't have to be the precise return
374 // address of a "calling" LAZY deopt, it only has to be somewhere
375 // inside the code body.
376 Label push_approx_pc;
377 __ call(&push_approx_pc);
378 __ bind(&push_approx_pc);
379 // Push the continuation which was stashed were the ebp should
380 // be. Replace it with the saved ebp.
381 __ push(MemOperand(esp, 3 * kPointerSize));
382 __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
383 __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
384 __ ret(0); // Call the continuation without clobbering registers.
385 } else {
386 __ jmp(&needs_frame_is_call);
387 }
388 } else {
389 if (!has_generated_needs_frame_not_call) {
Jakob Kummerow 2012/11/28 16:28:22 same here
danno 2012/11/30 16:23:24 Done.
390 has_generated_needs_frame_not_call = true;
391 __ bind(&needs_frame_not_call);
392 __ push(esi);
393 // If there is not frame, we don't have access to the JSFunction that
Jakob Kummerow 2012/11/28 16:28:22 see above.
danno 2012/11/30 16:23:24 Done.
394 // needs to be put into the frame.
395 ASSERT(info()->IsStub());
396 __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
397 // Push the continuation which was stashed were the ebp should
398 // be. Replace it with the saved ebp.
399 __ push(MemOperand(esp, 2 * kPointerSize));
400 __ mov(MemOperand(esp, 3 * kPointerSize), ebp);
401 __ lea(ebp, MemOperand(esp, 3 * kPointerSize));
402 __ ret(0); // Call the continuation without clobbering registers.
403 } else {
404 __ jmp(&needs_frame_not_call);
405 }
406 }
407 } else {
408 if (jump_table_[i].is_lazy_deopt) {
409 __ call(entry, RelocInfo::RUNTIME_ENTRY);
410 } else {
411 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
412 }
413 }
414 }
415 return !is_aborted();
416 }
417
418
337 bool LCodeGen::GenerateDeferredCode() { 419 bool LCodeGen::GenerateDeferredCode() {
338 ASSERT(is_generating()); 420 ASSERT(is_generating());
339 if (deferred_.length() > 0) { 421 if (deferred_.length() > 0) {
340 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { 422 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
341 LDeferredCode* code = deferred_[i]; 423 LDeferredCode* code = deferred_[i];
342 __ bind(code->entry()); 424 __ bind(code->entry());
425 if (NeedsDeferredFrame()) {
426 Comment(";;; Deferred build frame",
427 code->instruction_index(),
428 code->instr()->Mnemonic());
429 ASSERT(!frame_is_built_);
430 ASSERT(info()->IsStub());
431 frame_is_built_ = true;
432 // Build the frame in such a way that esi isn't trashed.
433 __ push(ebp); // Caller's frame pointer.
434 __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
435 __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
436 __ lea(ebp, Operand(esp, 2 * kPointerSize));
437 }
343 Comment(";;; Deferred code @%d: %s.", 438 Comment(";;; Deferred code @%d: %s.",
344 code->instruction_index(), 439 code->instruction_index(),
345 code->instr()->Mnemonic()); 440 code->instr()->Mnemonic());
346 code->Generate(); 441 code->Generate();
442 if (NeedsDeferredFrame()) {
443 Comment(";;; Deferred destory frame",
Jakob Kummerow 2012/11/28 16:28:22 nit: "destroy"
danno 2012/11/30 16:23:24 Done.
444 code->instruction_index(),
445 code->instr()->Mnemonic());
446 ASSERT(frame_is_built_);
447 frame_is_built_ = false;
448 __ mov(esp, ebp);
449 __ pop(ebp);
450 }
347 __ jmp(code->exit()); 451 __ jmp(code->exit());
348 } 452 }
349 } 453 }
350 454
351 // Deferred code is the last part of the instruction sequence. Mark 455 // Deferred code is the last part of the instruction sequence. Mark
352 // the generated code as done unless we bailed out. 456 // the generated code as done unless we bailed out.
353 if (!is_aborted()) status_ = DONE; 457 if (!is_aborted()) status_ = DONE;
354 return !is_aborted(); 458 return !is_aborted();
355 } 459 }
356 460
357 461
358 bool LCodeGen::GenerateSafepointTable() { 462 bool LCodeGen::GenerateSafepointTable() {
359 ASSERT(is_done()); 463 ASSERT(is_done());
464 if (!info()->IsStub()) {
465 // For lazy deoptimization we need space to patch a call after every call.
466 // Ensure there is always space for such patching, even if the code ends
467 // in a call.
468 int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
469 while (masm()->pc_offset() < target_offset) {
470 masm()->nop();
471 }
472 }
360 safepoints_.Emit(masm(), GetStackSlotCount()); 473 safepoints_.Emit(masm(), GetStackSlotCount());
361 return !is_aborted(); 474 return !is_aborted();
362 } 475 }
363 476
364 477
365 Register LCodeGen::ToRegister(int index) const { 478 Register LCodeGen::ToRegister(int index) const {
366 return Register::FromAllocationIndex(index); 479 return Register::FromAllocationIndex(index);
367 } 480 }
368 481
369 482
370 XMMRegister LCodeGen::ToDoubleRegister(int index) const { 483 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
371 return XMMRegister::FromAllocationIndex(index); 484 return XMMRegister::FromAllocationIndex(index);
372 } 485 }
373 486
374 487
488 bool LCodeGen::IsX87TopOfStack(LOperand* op) const {
489 return op->IsDoubleRegister();
490 }
491
492
375 Register LCodeGen::ToRegister(LOperand* op) const { 493 Register LCodeGen::ToRegister(LOperand* op) const {
376 ASSERT(op->IsRegister()); 494 ASSERT(op->IsRegister());
377 return ToRegister(op->index()); 495 return ToRegister(op->index());
378 } 496 }
379 497
380 498
381 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { 499 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
382 ASSERT(op->IsDoubleRegister()); 500 ASSERT(op->IsDoubleRegister());
383 return ToDoubleRegister(op->index()); 501 return ToDoubleRegister(op->index());
384 } 502 }
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
450 // arguments index points to the first element of a sequence of tagged 568 // arguments index points to the first element of a sequence of tagged
451 // values on the stack that represent the arguments. This needs to be 569 // values on the stack that represent the arguments. This needs to be
452 // kept in sync with the LArgumentsElements implementation. 570 // kept in sync with the LArgumentsElements implementation.
453 *arguments_index = -environment->parameter_count(); 571 *arguments_index = -environment->parameter_count();
454 *arguments_count = environment->parameter_count(); 572 *arguments_count = environment->parameter_count();
455 573
456 WriteTranslation(environment->outer(), 574 WriteTranslation(environment->outer(),
457 translation, 575 translation,
458 arguments_index, 576 arguments_index,
459 arguments_count); 577 arguments_count);
460 int closure_id = *info()->closure() != *environment->closure() 578 bool has_closure_id = !info()->closure().is_null() &&
579 *info()->closure() != *environment->closure();
580 int closure_id = has_closure_id
461 ? DefineDeoptimizationLiteral(environment->closure()) 581 ? DefineDeoptimizationLiteral(environment->closure())
462 : Translation::kSelfLiteralId; 582 : Translation::kSelfLiteralId;
463 switch (environment->frame_type()) { 583 switch (environment->frame_type()) {
464 case JS_FUNCTION: 584 case JS_FUNCTION:
465 translation->BeginJSFrame(environment->ast_id(), closure_id, height); 585 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
466 break; 586 break;
467 case JS_CONSTRUCT: 587 case JS_CONSTRUCT:
468 translation->BeginConstructStubFrame(closure_id, translation_size); 588 translation->BeginConstructStubFrame(closure_id, translation_size);
469 break; 589 break;
470 case JS_GETTER: 590 case JS_GETTER:
471 ASSERT(translation_size == 1); 591 ASSERT(translation_size == 1);
472 ASSERT(height == 0); 592 ASSERT(height == 0);
473 translation->BeginGetterStubFrame(closure_id); 593 translation->BeginGetterStubFrame(closure_id);
474 break; 594 break;
475 case JS_SETTER: 595 case JS_SETTER:
476 ASSERT(translation_size == 2); 596 ASSERT(translation_size == 2);
477 ASSERT(height == 0); 597 ASSERT(height == 0);
478 translation->BeginSetterStubFrame(closure_id); 598 translation->BeginSetterStubFrame(closure_id);
479 break; 599 break;
480 case ARGUMENTS_ADAPTOR: 600 case ARGUMENTS_ADAPTOR:
481 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); 601 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
482 break; 602 break;
603 case STUB:
604 translation->BeginCompiledStubFrame();
605 break;
606 default:
607 UNREACHABLE();
483 } 608 }
484 609
485 // Inlined frames which push their arguments cause the index to be 610 // Inlined frames which push their arguments cause the index to be
486 // bumped and another stack area to be used for materialization. 611 // bumped and another stack area to be used for materialization.
487 if (environment->entry() != NULL && 612 if (environment->entry() != NULL &&
488 environment->entry()->arguments_pushed()) { 613 environment->entry()->arguments_pushed()) {
489 *arguments_index = *arguments_index < 0 614 *arguments_index = *arguments_index < 0
490 ? GetStackSlotCount() 615 ? GetStackSlotCount()
491 : *arguments_index + *arguments_count; 616 : *arguments_index + *arguments_count;
492 *arguments_count = environment->entry()->arguments_count() + 1; 617 *arguments_count = environment->entry()->arguments_count() + 1;
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after
607 int argc, 732 int argc,
608 LInstruction* instr) { 733 LInstruction* instr) {
609 ASSERT(instr != NULL); 734 ASSERT(instr != NULL);
610 ASSERT(instr->HasPointerMap()); 735 ASSERT(instr->HasPointerMap());
611 LPointerMap* pointers = instr->pointer_map(); 736 LPointerMap* pointers = instr->pointer_map();
612 RecordPosition(pointers->position()); 737 RecordPosition(pointers->position());
613 738
614 __ CallRuntime(fun, argc); 739 __ CallRuntime(fun, argc);
615 740
616 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 741 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
742
743 ASSERT(info()->is_calling());
617 } 744 }
618 745
619 746
620 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, 747 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
621 int argc, 748 int argc,
622 LInstruction* instr, 749 LInstruction* instr,
623 LOperand* context) { 750 LOperand* context) {
624 if (context->IsRegister()) { 751 if (context->IsRegister()) {
625 if (!ToRegister(context).is(esi)) { 752 if (!ToRegister(context).is(esi)) {
626 __ mov(esi, ToRegister(context)); 753 __ mov(esi, ToRegister(context));
627 } 754 }
628 } else if (context->IsStackSlot()) { 755 } else if (context->IsStackSlot()) {
629 __ mov(esi, ToOperand(context)); 756 __ mov(esi, ToOperand(context));
630 } else if (context->IsConstantOperand()) { 757 } else if (context->IsConstantOperand()) {
631 HConstant* constant = 758 HConstant* constant =
632 chunk_->LookupConstant(LConstantOperand::cast(context)); 759 chunk_->LookupConstant(LConstantOperand::cast(context));
633 __ LoadHeapObject(esi, Handle<Context>::cast(constant->handle())); 760 __ LoadHeapObject(esi, Handle<Context>::cast(constant->handle()));
634 } else { 761 } else {
635 UNREACHABLE(); 762 UNREACHABLE();
636 } 763 }
637 764
638 __ CallRuntimeSaveDoubles(id); 765 __ CallRuntimeSaveDoubles(id);
639 RecordSafepointWithRegisters( 766 RecordSafepointWithRegisters(
640 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); 767 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
768
769 ASSERT(info()->is_calling());
641 } 770 }
642 771
643 772
644 void LCodeGen::RegisterEnvironmentForDeoptimization( 773 void LCodeGen::RegisterEnvironmentForDeoptimization(
645 LEnvironment* environment, Safepoint::DeoptMode mode) { 774 LEnvironment* environment, Safepoint::DeoptMode mode) {
646 if (!environment->HasBeenRegistered()) { 775 if (!environment->HasBeenRegistered()) {
647 // Physical stack frame layout: 776 // Physical stack frame layout:
648 // -x ............. -4 0 ..................................... y 777 // -x ............. -4 0 ..................................... y
649 // [incoming arguments] [spill slots] [pushed outgoing arguments] 778 // [incoming arguments] [spill slots] [pushed outgoing arguments]
650 779
(...skipping 25 matching lines...) Expand all
676 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); 805 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
677 deoptimizations_.Add(environment, zone()); 806 deoptimizations_.Add(environment, zone());
678 } 807 }
679 } 808 }
680 809
681 810
682 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { 811 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
683 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 812 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
684 ASSERT(environment->HasBeenRegistered()); 813 ASSERT(environment->HasBeenRegistered());
685 int id = environment->deoptimization_index(); 814 int id = environment->deoptimization_index();
686 Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER); 815 ASSERT(info()->IsOptimizing() || info()->IsStub());
816 Deoptimizer::BailoutType bailout_type = frame_is_built_
817 ? Deoptimizer::EAGER
818 : Deoptimizer::LAZY;
819 Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type);
687 if (entry == NULL) { 820 if (entry == NULL) {
688 Abort("bailout was not prepared"); 821 Abort("bailout was not prepared");
689 return; 822 return;
690 } 823 }
691 824
692 if (FLAG_deopt_every_n_times != 0) { 825 if (FLAG_deopt_every_n_times != 0) {
693 Handle<SharedFunctionInfo> shared(info_->shared_info()); 826 Handle<SharedFunctionInfo> shared(info_->shared_info());
694 Label no_deopt; 827 Label no_deopt;
695 __ pushfd(); 828 __ pushfd();
696 __ push(eax); 829 __ push(eax);
(...skipping 13 matching lines...) Expand all
710 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); 843 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
711 844
712 __ bind(&no_deopt); 845 __ bind(&no_deopt);
713 __ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset), 846 __ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset),
714 eax); 847 eax);
715 __ pop(ebx); 848 __ pop(ebx);
716 __ pop(eax); 849 __ pop(eax);
717 __ popfd(); 850 __ popfd();
718 } 851 }
719 852
853 ASSERT(info()->IsStub() || frame_is_built_);
854 bool lazy_deopt_needed = info()->IsStub();
720 if (cc == no_condition) { 855 if (cc == no_condition) {
721 if (FLAG_trap_on_deopt) __ int3(); 856 if (FLAG_trap_on_deopt) __ int3();
722 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); 857 if (lazy_deopt_needed) {
858 __ call(entry, RelocInfo::RUNTIME_ENTRY);
859 } else {
860 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
861 }
723 } else { 862 } else {
863 Label done;
724 if (FLAG_trap_on_deopt) { 864 if (FLAG_trap_on_deopt) {
725 Label done;
726 __ j(NegateCondition(cc), &done, Label::kNear); 865 __ j(NegateCondition(cc), &done, Label::kNear);
727 __ int3(); 866 __ int3();
728 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); 867 }
729 __ bind(&done); 868 if (!lazy_deopt_needed && frame_is_built_) {
869 if (FLAG_trap_on_deopt) {
870 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
871 } else {
872 __ j(cc, entry, RelocInfo::RUNTIME_ENTRY);
873 }
730 } else { 874 } else {
731 __ j(cc, entry, RelocInfo::RUNTIME_ENTRY); 875 // We often have several deopts to the same entry, reuse the last
876 // jump entry if this is the case.
877 if (jump_table_.is_empty() ||
878 jump_table_.last().address != entry ||
879 jump_table_.last().needs_frame != !frame_is_built_ ||
880 jump_table_.last().is_lazy_deopt != lazy_deopt_needed) {
881 JumpTableEntry table_entry(entry, !frame_is_built_, lazy_deopt_needed);
882 jump_table_.Add(table_entry, zone());
883 }
884 if (FLAG_trap_on_deopt) {
885 __ jmp(&jump_table_.last().label);
886 } else {
887 __ j(cc, &jump_table_.last().label);
888 }
732 } 889 }
890 __ bind(&done);
733 } 891 }
734 } 892 }
735 893
736 894
737 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { 895 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
738 int length = deoptimizations_.length(); 896 int length = deoptimizations_.length();
739 if (length == 0) return; 897 if (length == 0) return;
740 Handle<DeoptimizationInputData> data = 898 Handle<DeoptimizationInputData> data =
741 factory()->NewDeoptimizationInputData(length, TENURED); 899 factory()->NewDeoptimizationInputData(length, TENURED);
742 900
(...skipping 680 matching lines...) Expand 10 before | Expand all | Expand 10 after
1423 // Use xor to produce +0.0 in a fast and compact way, but avoid to 1581 // Use xor to produce +0.0 in a fast and compact way, but avoid to
1424 // do so if the constant is -0.0. 1582 // do so if the constant is -0.0.
1425 if (BitCast<uint64_t, double>(v) == 0) { 1583 if (BitCast<uint64_t, double>(v) == 0) {
1426 __ xorps(res, res); 1584 __ xorps(res, res);
1427 } else { 1585 } else {
1428 Register temp = ToRegister(instr->temp()); 1586 Register temp = ToRegister(instr->temp());
1429 uint64_t int_val = BitCast<uint64_t, double>(v); 1587 uint64_t int_val = BitCast<uint64_t, double>(v);
1430 int32_t lower = static_cast<int32_t>(int_val); 1588 int32_t lower = static_cast<int32_t>(int_val);
1431 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); 1589 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
1432 if (CpuFeatures::IsSupported(SSE4_1)) { 1590 if (CpuFeatures::IsSupported(SSE4_1)) {
1433 CpuFeatures::Scope scope(SSE4_1); 1591 CpuFeatures::Scope scope1(SSE2);
1592 CpuFeatures::Scope scope2(SSE4_1);
1434 if (lower != 0) { 1593 if (lower != 0) {
1435 __ Set(temp, Immediate(lower)); 1594 __ Set(temp, Immediate(lower));
1436 __ movd(res, Operand(temp)); 1595 __ movd(res, Operand(temp));
1437 __ Set(temp, Immediate(upper)); 1596 __ Set(temp, Immediate(upper));
1438 __ pinsrd(res, Operand(temp), 1); 1597 __ pinsrd(res, Operand(temp), 1);
1439 } else { 1598 } else {
1440 __ xorps(res, res); 1599 __ xorps(res, res);
1441 __ Set(temp, Immediate(upper)); 1600 __ Set(temp, Immediate(upper));
1442 __ pinsrd(res, Operand(temp), 1); 1601 __ pinsrd(res, Operand(temp), 1);
1443 } 1602 }
1444 } else { 1603 } else {
1604 ASSERT(CpuFeatures::IsSupported(SSE2));
Jakob Kummerow 2012/11/28 16:28:22 nit: not needed. The Scope c'tor right below conta
danno 2012/11/30 16:23:24 Done.
1605 CpuFeatures::Scope scope(SSE2);
1445 __ Set(temp, Immediate(upper)); 1606 __ Set(temp, Immediate(upper));
1446 __ movd(res, Operand(temp)); 1607 __ movd(res, Operand(temp));
1447 __ psllq(res, 32); 1608 __ psllq(res, 32);
1448 if (lower != 0) { 1609 if (lower != 0) {
1449 __ Set(temp, Immediate(lower)); 1610 __ Set(temp, Immediate(lower));
1450 __ movd(xmm0, Operand(temp)); 1611 __ movd(xmm0, Operand(temp));
1451 __ por(res, xmm0); 1612 __ por(res, xmm0);
1452 } 1613 }
1453 } 1614 }
1454 } 1615 }
(...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after
1588 __ add(ToRegister(left), ToOperand(right)); 1749 __ add(ToRegister(left), ToOperand(right));
1589 } 1750 }
1590 1751
1591 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { 1752 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1592 DeoptimizeIf(overflow, instr->environment()); 1753 DeoptimizeIf(overflow, instr->environment());
1593 } 1754 }
1594 } 1755 }
1595 1756
1596 1757
1597 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { 1758 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1759 CpuFeatures::Scope scope(SSE2);
1598 LOperand* left = instr->left(); 1760 LOperand* left = instr->left();
1599 LOperand* right = instr->right(); 1761 LOperand* right = instr->right();
1600 ASSERT(left->Equals(instr->result())); 1762 ASSERT(left->Equals(instr->result()));
1601 HMathMinMax::Operation operation = instr->hydrogen()->operation(); 1763 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1602 if (instr->hydrogen()->representation().IsInteger32()) { 1764 if (instr->hydrogen()->representation().IsInteger32()) {
1603 Label return_left; 1765 Label return_left;
1604 Condition condition = (operation == HMathMinMax::kMathMin) 1766 Condition condition = (operation == HMathMinMax::kMathMin)
1605 ? less_equal 1767 ? less_equal
1606 : greater_equal; 1768 : greater_equal;
1607 if (right->IsConstantOperand()) { 1769 if (right->IsConstantOperand()) {
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
1649 __ j(parity_even, &return_left, Label::kNear); // left == NaN. 1811 __ j(parity_even, &return_left, Label::kNear); // left == NaN.
1650 __ bind(&return_right); 1812 __ bind(&return_right);
1651 __ movsd(left_reg, right_reg); 1813 __ movsd(left_reg, right_reg);
1652 1814
1653 __ bind(&return_left); 1815 __ bind(&return_left);
1654 } 1816 }
1655 } 1817 }
1656 1818
1657 1819
1658 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { 1820 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1821 ASSERT(CpuFeatures::IsSupported(SSE2));
Jakob Kummerow 2012/11/28 16:28:22 nit: not needed
danno 2012/11/30 16:23:24 Done.
1822 CpuFeatures::Scope scope(SSE2);
1659 XMMRegister left = ToDoubleRegister(instr->left()); 1823 XMMRegister left = ToDoubleRegister(instr->left());
1660 XMMRegister right = ToDoubleRegister(instr->right()); 1824 XMMRegister right = ToDoubleRegister(instr->right());
1661 XMMRegister result = ToDoubleRegister(instr->result()); 1825 XMMRegister result = ToDoubleRegister(instr->result());
1662 // Modulo uses a fixed result register. 1826 // Modulo uses a fixed result register.
1663 ASSERT(instr->op() == Token::MOD || left.is(result)); 1827 ASSERT(instr->op() == Token::MOD || left.is(result));
1664 switch (instr->op()) { 1828 switch (instr->op()) {
1665 case Token::ADD: 1829 case Token::ADD:
1666 __ addsd(left, right); 1830 __ addsd(left, right);
1667 break; 1831 break;
1668 case Token::SUB: 1832 case Token::SUB:
1669 __ subsd(left, right); 1833 __ subsd(left, right);
1670 break; 1834 break;
1671 case Token::MUL: 1835 case Token::MUL:
1672 __ mulsd(left, right); 1836 __ mulsd(left, right);
1673 break; 1837 break;
1674 case Token::DIV: 1838 case Token::DIV:
1675 __ divsd(left, right); 1839 __ divsd(left, right);
1676 break; 1840 break;
1677 case Token::MOD: { 1841 case Token::MOD: {
1678 // Pass two doubles as arguments on the stack. 1842 // Pass two doubles as arguments on the stack.
1679 __ PrepareCallCFunction(4, eax); 1843 __ PrepareCallCFunction(4, eax);
1680 __ movdbl(Operand(esp, 0 * kDoubleSize), left); 1844 __ movdbl(Operand(esp, 0 * kDoubleSize), left);
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
1733 } else { 1897 } else {
1734 __ j(cc, chunk_->GetAssemblyLabel(left_block)); 1898 __ j(cc, chunk_->GetAssemblyLabel(left_block));
1735 __ jmp(chunk_->GetAssemblyLabel(right_block)); 1899 __ jmp(chunk_->GetAssemblyLabel(right_block));
1736 } 1900 }
1737 } 1901 }
1738 1902
1739 1903
1740 void LCodeGen::DoBranch(LBranch* instr) { 1904 void LCodeGen::DoBranch(LBranch* instr) {
1741 int true_block = chunk_->LookupDestination(instr->true_block_id()); 1905 int true_block = chunk_->LookupDestination(instr->true_block_id());
1742 int false_block = chunk_->LookupDestination(instr->false_block_id()); 1906 int false_block = chunk_->LookupDestination(instr->false_block_id());
1907 CpuFeatures::Scope scope(SSE2);
1743 1908
1744 Representation r = instr->hydrogen()->value()->representation(); 1909 Representation r = instr->hydrogen()->value()->representation();
1745 if (r.IsInteger32()) { 1910 if (r.IsInteger32()) {
1746 Register reg = ToRegister(instr->value()); 1911 Register reg = ToRegister(instr->value());
1747 __ test(reg, Operand(reg)); 1912 __ test(reg, Operand(reg));
1748 EmitBranch(true_block, false_block, not_zero); 1913 EmitBranch(true_block, false_block, not_zero);
1749 } else if (r.IsDouble()) { 1914 } else if (r.IsDouble()) {
1750 XMMRegister reg = ToDoubleRegister(instr->value()); 1915 XMMRegister reg = ToDoubleRegister(instr->value());
1751 __ xorps(xmm0, xmm0); 1916 __ xorps(xmm0, xmm0);
1752 __ ucomisd(reg, xmm0); 1917 __ ucomisd(reg, xmm0);
(...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after
1892 return cond; 2057 return cond;
1893 } 2058 }
1894 2059
1895 2060
1896 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { 2061 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1897 LOperand* left = instr->left(); 2062 LOperand* left = instr->left();
1898 LOperand* right = instr->right(); 2063 LOperand* right = instr->right();
1899 int false_block = chunk_->LookupDestination(instr->false_block_id()); 2064 int false_block = chunk_->LookupDestination(instr->false_block_id());
1900 int true_block = chunk_->LookupDestination(instr->true_block_id()); 2065 int true_block = chunk_->LookupDestination(instr->true_block_id());
1901 Condition cc = TokenToCondition(instr->op(), instr->is_double()); 2066 Condition cc = TokenToCondition(instr->op(), instr->is_double());
2067 CpuFeatures::Scope scope(SSE2);
1902 2068
1903 if (left->IsConstantOperand() && right->IsConstantOperand()) { 2069 if (left->IsConstantOperand() && right->IsConstantOperand()) {
1904 // We can statically evaluate the comparison. 2070 // We can statically evaluate the comparison.
1905 double left_val = ToDouble(LConstantOperand::cast(left)); 2071 double left_val = ToDouble(LConstantOperand::cast(left));
1906 double right_val = ToDouble(LConstantOperand::cast(right)); 2072 double right_val = ToDouble(LConstantOperand::cast(right));
1907 int next_block = 2073 int next_block =
1908 EvalComparison(instr->op(), left_val, right_val) ? true_block 2074 EvalComparison(instr->op(), left_val, right_val) ? true_block
1909 : false_block; 2075 : false_block;
1910 EmitGoto(next_block); 2076 EmitGoto(next_block);
1911 } else { 2077 } else {
(...skipping 489 matching lines...) Expand 10 before | Expand all | Expand 10 after
2401 __ j(condition, &true_value, Label::kNear); 2567 __ j(condition, &true_value, Label::kNear);
2402 __ mov(ToRegister(instr->result()), factory()->false_value()); 2568 __ mov(ToRegister(instr->result()), factory()->false_value());
2403 __ jmp(&done, Label::kNear); 2569 __ jmp(&done, Label::kNear);
2404 __ bind(&true_value); 2570 __ bind(&true_value);
2405 __ mov(ToRegister(instr->result()), factory()->true_value()); 2571 __ mov(ToRegister(instr->result()), factory()->true_value());
2406 __ bind(&done); 2572 __ bind(&done);
2407 } 2573 }
2408 2574
2409 2575
2410 void LCodeGen::DoReturn(LReturn* instr) { 2576 void LCodeGen::DoReturn(LReturn* instr) {
2411 if (FLAG_trace) { 2577 if (FLAG_trace && info()->IsOptimizing()) {
2412 // Preserve the return value on the stack and rely on the runtime call 2578 // Preserve the return value on the stack and rely on the runtime call
2413 // to return the value in the same register. We're leaving the code 2579 // to return the value in the same register. We're leaving the code
2414 // managed by the register allocator and tearing down the frame, it's 2580 // managed by the register allocator and tearing down the frame, it's
2415 // safe to write to the context register. 2581 // safe to write to the context register.
2416 __ push(eax); 2582 __ push(eax);
2417 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 2583 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2418 __ CallRuntime(Runtime::kTraceExit, 1); 2584 __ CallRuntime(Runtime::kTraceExit, 1);
2419 } 2585 }
2420 if (dynamic_frame_alignment_) { 2586 if (dynamic_frame_alignment_) {
2421 // Fetch the state of the dynamic frame alignment. 2587 // Fetch the state of the dynamic frame alignment.
2422 __ mov(edx, Operand(ebp, 2588 __ mov(edx, Operand(ebp,
2423 JavaScriptFrameConstants::kDynamicAlignmentStateOffset)); 2589 JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
2424 } 2590 }
2425 __ mov(esp, ebp); 2591 if (NeedsEagerFrame()) {
2426 __ pop(ebp); 2592 __ mov(esp, ebp);
2593 __ pop(ebp);
2594 }
2427 if (dynamic_frame_alignment_) { 2595 if (dynamic_frame_alignment_) {
2428 Label no_padding; 2596 Label no_padding;
2429 __ cmp(edx, Immediate(kNoAlignmentPadding)); 2597 __ cmp(edx, Immediate(kNoAlignmentPadding));
2430 __ j(equal, &no_padding); 2598 __ j(equal, &no_padding);
2431 if (FLAG_debug_code) { 2599 if (FLAG_debug_code) {
2432 __ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize), 2600 __ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize),
2433 Immediate(kAlignmentZapValue)); 2601 Immediate(kAlignmentZapValue));
2434 __ Assert(equal, "expected alignment marker"); 2602 __ Assert(equal, "expected alignment marker");
2435 } 2603 }
2436 __ Ret((GetParameterCount() + 2) * kPointerSize, ecx); 2604 __ Ret((GetParameterCount() + 2) * kPointerSize, ecx);
2437 __ bind(&no_padding); 2605 __ bind(&no_padding);
2438 } 2606 }
2439 __ Ret((GetParameterCount() + 1) * kPointerSize, ecx); 2607 if (info()->IsStub()) {
2608 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2609 __ Ret();
2610 } else {
2611 __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
2612 }
2440 } 2613 }
2441 2614
2442 2615
2443 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { 2616 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2444 Register result = ToRegister(instr->result()); 2617 Register result = ToRegister(instr->result());
2445 __ mov(result, Operand::Cell(instr->hydrogen()->cell())); 2618 __ mov(result, Operand::Cell(instr->hydrogen()->cell()));
2446 if (instr->hydrogen()->RequiresHoleCheck()) { 2619 if (instr->hydrogen()->RequiresHoleCheck()) {
2447 __ cmp(result, factory()->the_hole_value()); 2620 __ cmp(result, factory()->the_hole_value());
2448 DeoptimizeIf(equal, instr->environment()); 2621 DeoptimizeIf(equal, instr->environment());
2449 } 2622 }
(...skipping 355 matching lines...) Expand 10 before | Expand all | Expand 10 after
2805 __ SmiUntag(ToRegister(key)); 2978 __ SmiUntag(ToRegister(key));
2806 } 2979 }
2807 Operand operand(BuildFastArrayOperand( 2980 Operand operand(BuildFastArrayOperand(
2808 instr->elements(), 2981 instr->elements(),
2809 key, 2982 key,
2810 instr->hydrogen()->key()->representation(), 2983 instr->hydrogen()->key()->representation(),
2811 elements_kind, 2984 elements_kind,
2812 0, 2985 0,
2813 instr->additional_index())); 2986 instr->additional_index()));
2814 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { 2987 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
2815 XMMRegister result(ToDoubleRegister(instr->result())); 2988 if (CpuFeatures::IsSupported(SSE2)) {
2816 __ movss(result, operand); 2989 CpuFeatures::Scope scope(SSE2);
2817 __ cvtss2sd(result, result); 2990 XMMRegister result(ToDoubleRegister(instr->result()));
2991 __ movss(result, operand);
2992 __ cvtss2sd(result, result);
2993 } else {
2994 __ fld_s(operand);
2995 HandleX87FPReturnValue(instr);
2996 }
2818 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { 2997 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
2819 __ movdbl(ToDoubleRegister(instr->result()), operand); 2998 if (CpuFeatures::IsSupported(SSE2)) {
2999 CpuFeatures::Scope scope(SSE2);
3000 __ movdbl(ToDoubleRegister(instr->result()), operand);
3001 } else {
3002 __ fld_d(operand);
3003 HandleX87FPReturnValue(instr);
3004 }
2820 } else { 3005 } else {
2821 Register result(ToRegister(instr->result())); 3006 Register result(ToRegister(instr->result()));
2822 switch (elements_kind) { 3007 switch (elements_kind) {
2823 case EXTERNAL_BYTE_ELEMENTS: 3008 case EXTERNAL_BYTE_ELEMENTS:
2824 __ movsx_b(result, operand); 3009 __ movsx_b(result, operand);
2825 break; 3010 break;
2826 case EXTERNAL_PIXEL_ELEMENTS: 3011 case EXTERNAL_PIXEL_ELEMENTS:
2827 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: 3012 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
2828 __ movzx_b(result, operand); 3013 __ movzx_b(result, operand);
2829 break; 3014 break;
(...skipping 23 matching lines...) Expand all
2853 case FAST_HOLEY_DOUBLE_ELEMENTS: 3038 case FAST_HOLEY_DOUBLE_ELEMENTS:
2854 case DICTIONARY_ELEMENTS: 3039 case DICTIONARY_ELEMENTS:
2855 case NON_STRICT_ARGUMENTS_ELEMENTS: 3040 case NON_STRICT_ARGUMENTS_ELEMENTS:
2856 UNREACHABLE(); 3041 UNREACHABLE();
2857 break; 3042 break;
2858 } 3043 }
2859 } 3044 }
2860 } 3045 }
2861 3046
2862 3047
3048 void LCodeGen::HandleX87FPReturnValue(LInstruction* instr) {
3049 if (IsX87TopOfStack(instr->result())) {
3050 // Return value is already on stack. If the value has no uses, then
3051 // pop it off the FP stack. Otherwise, make sure that there are enough
3052 // copies of the value on the stack to feed all of the usages, e.g.
3053 // when the following instruction uses the return value in multiple
3054 // inputs.
3055 int count = instr->hydrogen_value()->UseCount();
3056 if (count == 0) {
3057 __ fstp(0);
3058 } else {
3059 count--;
3060 ASSERT(count <= 7);
3061 while (count-- > 0) {
3062 __ fld(0);
3063 }
3064 }
3065 } else {
3066 __ fstp_d(ToOperand(instr->result()));
3067 }
3068 }
3069
3070
2863 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { 3071 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
2864 XMMRegister result = ToDoubleRegister(instr->result());
2865
2866 if (instr->hydrogen()->RequiresHoleCheck()) { 3072 if (instr->hydrogen()->RequiresHoleCheck()) {
2867 int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + 3073 int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
2868 sizeof(kHoleNanLower32); 3074 sizeof(kHoleNanLower32);
2869 Operand hole_check_operand = BuildFastArrayOperand( 3075 Operand hole_check_operand = BuildFastArrayOperand(
2870 instr->elements(), instr->key(), 3076 instr->elements(), instr->key(),
2871 instr->hydrogen()->key()->representation(), 3077 instr->hydrogen()->key()->representation(),
2872 FAST_DOUBLE_ELEMENTS, 3078 FAST_DOUBLE_ELEMENTS,
2873 offset, 3079 offset,
2874 instr->additional_index()); 3080 instr->additional_index());
2875 __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); 3081 __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
2876 DeoptimizeIf(equal, instr->environment()); 3082 DeoptimizeIf(equal, instr->environment());
2877 } 3083 }
2878 3084
2879 Operand double_load_operand = BuildFastArrayOperand( 3085 Operand double_load_operand = BuildFastArrayOperand(
2880 instr->elements(), 3086 instr->elements(),
2881 instr->key(), 3087 instr->key(),
2882 instr->hydrogen()->key()->representation(), 3088 instr->hydrogen()->key()->representation(),
2883 FAST_DOUBLE_ELEMENTS, 3089 FAST_DOUBLE_ELEMENTS,
2884 FixedDoubleArray::kHeaderSize - kHeapObjectTag, 3090 FixedDoubleArray::kHeaderSize - kHeapObjectTag,
2885 instr->additional_index()); 3091 instr->additional_index());
2886 __ movdbl(result, double_load_operand); 3092 if (CpuFeatures::IsSupported(SSE2)) {
3093 CpuFeatures::Scope scope(SSE2);
3094 XMMRegister result = ToDoubleRegister(instr->result());
3095 __ movdbl(result, double_load_operand);
3096 } else {
3097 __ fld_d(double_load_operand);
3098 HandleX87FPReturnValue(instr);
3099 }
2887 } 3100 }
2888 3101
2889 3102
2890 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { 3103 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
2891 Register result = ToRegister(instr->result()); 3104 Register result = ToRegister(instr->result());
2892 3105
2893 // Load the result. 3106 // Load the result.
2894 __ mov(result, 3107 __ mov(result,
2895 BuildFastArrayOperand(instr->elements(), 3108 BuildFastArrayOperand(instr->elements(),
2896 instr->key(), 3109 instr->key(),
(...skipping 395 matching lines...) Expand 10 before | Expand all | Expand 10 after
3292 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); 3505 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3293 } 3506 }
3294 virtual LInstruction* instr() { return instr_; } 3507 virtual LInstruction* instr() { return instr_; }
3295 private: 3508 private:
3296 LUnaryMathOperation* instr_; 3509 LUnaryMathOperation* instr_;
3297 }; 3510 };
3298 3511
3299 ASSERT(instr->value()->Equals(instr->result())); 3512 ASSERT(instr->value()->Equals(instr->result()));
3300 Representation r = instr->hydrogen()->value()->representation(); 3513 Representation r = instr->hydrogen()->value()->representation();
3301 3514
3515 CpuFeatures::Scope scope(SSE2);
3302 if (r.IsDouble()) { 3516 if (r.IsDouble()) {
3303 XMMRegister scratch = xmm0; 3517 XMMRegister scratch = xmm0;
3304 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3518 XMMRegister input_reg = ToDoubleRegister(instr->value());
3305 __ xorps(scratch, scratch); 3519 __ xorps(scratch, scratch);
3306 __ subsd(scratch, input_reg); 3520 __ subsd(scratch, input_reg);
3307 __ pand(input_reg, scratch); 3521 __ pand(input_reg, scratch);
3308 } else if (r.IsInteger32()) { 3522 } else if (r.IsInteger32()) {
3309 EmitIntegerMathAbs(instr); 3523 EmitIntegerMathAbs(instr);
3310 } else { // Tagged case. 3524 } else { // Tagged case.
3311 DeferredMathAbsTaggedHeapNumber* deferred = 3525 DeferredMathAbsTaggedHeapNumber* deferred =
3312 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); 3526 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3313 Register input_reg = ToRegister(instr->value()); 3527 Register input_reg = ToRegister(instr->value());
3314 // Smi check. 3528 // Smi check.
3315 __ JumpIfNotSmi(input_reg, deferred->entry()); 3529 __ JumpIfNotSmi(input_reg, deferred->entry());
3316 EmitIntegerMathAbs(instr); 3530 EmitIntegerMathAbs(instr);
3317 __ bind(deferred->exit()); 3531 __ bind(deferred->exit());
3318 } 3532 }
3319 } 3533 }
3320 3534
3321 3535
3322 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { 3536 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
3537 CpuFeatures::Scope scope(SSE2);
3323 XMMRegister xmm_scratch = xmm0; 3538 XMMRegister xmm_scratch = xmm0;
3324 Register output_reg = ToRegister(instr->result()); 3539 Register output_reg = ToRegister(instr->result());
3325 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3540 XMMRegister input_reg = ToDoubleRegister(instr->value());
3326 3541
3327 if (CpuFeatures::IsSupported(SSE4_1)) { 3542 if (CpuFeatures::IsSupported(SSE4_1)) {
3328 CpuFeatures::Scope scope(SSE4_1); 3543 CpuFeatures::Scope scope(SSE4_1);
3329 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3544 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3330 // Deoptimize on negative zero. 3545 // Deoptimize on negative zero.
3331 Label non_zero; 3546 Label non_zero;
3332 __ xorps(xmm_scratch, xmm_scratch); // Zero the register. 3547 __ xorps(xmm_scratch, xmm_scratch); // Zero the register.
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
3377 __ ucomisd(input_reg, xmm_scratch); 3592 __ ucomisd(input_reg, xmm_scratch);
3378 __ j(equal, &done, Label::kNear); 3593 __ j(equal, &done, Label::kNear);
3379 __ sub(output_reg, Immediate(1)); 3594 __ sub(output_reg, Immediate(1));
3380 DeoptimizeIf(overflow, instr->environment()); 3595 DeoptimizeIf(overflow, instr->environment());
3381 3596
3382 __ bind(&done); 3597 __ bind(&done);
3383 } 3598 }
3384 } 3599 }
3385 3600
3386 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { 3601 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3602 CpuFeatures::Scope scope(SSE2);
3387 XMMRegister xmm_scratch = xmm0; 3603 XMMRegister xmm_scratch = xmm0;
3388 Register output_reg = ToRegister(instr->result()); 3604 Register output_reg = ToRegister(instr->result());
3389 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3605 XMMRegister input_reg = ToDoubleRegister(instr->value());
3390 3606
3391 Label below_half, done; 3607 Label below_half, done;
3392 // xmm_scratch = 0.5 3608 // xmm_scratch = 0.5
3393 ExternalReference one_half = ExternalReference::address_of_one_half(); 3609 ExternalReference one_half = ExternalReference::address_of_one_half();
3394 __ movdbl(xmm_scratch, Operand::StaticVariable(one_half)); 3610 __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
3395 __ ucomisd(xmm_scratch, input_reg); 3611 __ ucomisd(xmm_scratch, input_reg);
3396 __ j(above, &below_half); 3612 __ j(above, &below_half);
(...skipping 25 matching lines...) Expand all
3422 __ cvtss2sd(xmm_scratch, xmm_scratch); 3638 __ cvtss2sd(xmm_scratch, xmm_scratch);
3423 __ ucomisd(input_reg, xmm_scratch); 3639 __ ucomisd(input_reg, xmm_scratch);
3424 DeoptimizeIf(below, instr->environment()); 3640 DeoptimizeIf(below, instr->environment());
3425 } 3641 }
3426 __ Set(output_reg, Immediate(0)); 3642 __ Set(output_reg, Immediate(0));
3427 __ bind(&done); 3643 __ bind(&done);
3428 } 3644 }
3429 3645
3430 3646
3431 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { 3647 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
3648 CpuFeatures::Scope scope(SSE2);
3432 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3649 XMMRegister input_reg = ToDoubleRegister(instr->value());
3433 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); 3650 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
3434 __ sqrtsd(input_reg, input_reg); 3651 __ sqrtsd(input_reg, input_reg);
3435 } 3652 }
3436 3653
3437 3654
3438 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { 3655 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3656 CpuFeatures::Scope scope(SSE2);
3439 XMMRegister xmm_scratch = xmm0; 3657 XMMRegister xmm_scratch = xmm0;
3440 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3658 XMMRegister input_reg = ToDoubleRegister(instr->value());
3441 Register scratch = ToRegister(instr->temp()); 3659 Register scratch = ToRegister(instr->temp());
3442 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); 3660 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
3443 3661
3444 // Note that according to ECMA-262 15.8.2.13: 3662 // Note that according to ECMA-262 15.8.2.13:
3445 // Math.pow(-Infinity, 0.5) == Infinity 3663 // Math.pow(-Infinity, 0.5) == Infinity
3446 // Math.sqrt(-Infinity) == NaN 3664 // Math.sqrt(-Infinity) == NaN
3447 Label done, sqrt; 3665 Label done, sqrt;
3448 // Check base for -Infinity. According to IEEE-754, single-precision 3666 // Check base for -Infinity. According to IEEE-754, single-precision
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
3505 DeferredDoRandom(LCodeGen* codegen, LRandom* instr) 3723 DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
3506 : LDeferredCode(codegen), instr_(instr) { } 3724 : LDeferredCode(codegen), instr_(instr) { }
3507 virtual void Generate() { codegen()->DoDeferredRandom(instr_); } 3725 virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
3508 virtual LInstruction* instr() { return instr_; } 3726 virtual LInstruction* instr() { return instr_; }
3509 private: 3727 private:
3510 LRandom* instr_; 3728 LRandom* instr_;
3511 }; 3729 };
3512 3730
3513 DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr); 3731 DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
3514 3732
3733 CpuFeatures::Scope scope(SSE2);
3515 // Having marked this instruction as a call we can use any 3734 // Having marked this instruction as a call we can use any
3516 // registers. 3735 // registers.
3517 ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); 3736 ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
3518 ASSERT(ToRegister(instr->global_object()).is(eax)); 3737 ASSERT(ToRegister(instr->global_object()).is(eax));
3519 // Assert that the register size is indeed the size of each seed. 3738 // Assert that the register size is indeed the size of each seed.
3520 static const int kSeedSize = sizeof(uint32_t); 3739 static const int kSeedSize = sizeof(uint32_t);
3521 STATIC_ASSERT(kPointerSize == kSeedSize); 3740 STATIC_ASSERT(kPointerSize == kSeedSize);
3522 3741
3523 __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset)); 3742 __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset));
3524 static const int kRandomSeedOffset = 3743 static const int kRandomSeedOffset =
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
3572 3791
3573 void LCodeGen::DoDeferredRandom(LRandom* instr) { 3792 void LCodeGen::DoDeferredRandom(LRandom* instr) {
3574 __ PrepareCallCFunction(1, ebx); 3793 __ PrepareCallCFunction(1, ebx);
3575 __ mov(Operand(esp, 0), eax); 3794 __ mov(Operand(esp, 0), eax);
3576 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); 3795 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
3577 // Return value is in eax. 3796 // Return value is in eax.
3578 } 3797 }
3579 3798
3580 3799
3581 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { 3800 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
3801 CpuFeatures::Scope scope(SSE2);
3582 ASSERT(instr->value()->Equals(instr->result())); 3802 ASSERT(instr->value()->Equals(instr->result()));
3583 XMMRegister input_reg = ToDoubleRegister(instr->value()); 3803 XMMRegister input_reg = ToDoubleRegister(instr->value());
3584 Label positive, done, zero; 3804 Label positive, done, zero;
3585 __ xorps(xmm0, xmm0); 3805 __ xorps(xmm0, xmm0);
3586 __ ucomisd(input_reg, xmm0); 3806 __ ucomisd(input_reg, xmm0);
3587 __ j(above, &positive, Label::kNear); 3807 __ j(above, &positive, Label::kNear);
3588 __ j(equal, &zero, Label::kNear); 3808 __ j(equal, &zero, Label::kNear);
3589 ExternalReference nan = 3809 ExternalReference nan =
3590 ExternalReference::address_of_canonical_non_hole_nan(); 3810 ExternalReference::address_of_canonical_non_hole_nan();
3591 __ movdbl(input_reg, Operand::StaticVariable(nan)); 3811 __ movdbl(input_reg, Operand::StaticVariable(nan));
(...skipping 11 matching lines...) Expand all
3603 __ fld_d(Operand(esp, 0)); 3823 __ fld_d(Operand(esp, 0));
3604 __ fyl2x(); 3824 __ fyl2x();
3605 __ fstp_d(Operand(esp, 0)); 3825 __ fstp_d(Operand(esp, 0));
3606 __ movdbl(input_reg, Operand(esp, 0)); 3826 __ movdbl(input_reg, Operand(esp, 0));
3607 __ add(Operand(esp), Immediate(kDoubleSize)); 3827 __ add(Operand(esp), Immediate(kDoubleSize));
3608 __ bind(&done); 3828 __ bind(&done);
3609 } 3829 }
3610 3830
3611 3831
3612 void LCodeGen::DoMathExp(LMathExp* instr) { 3832 void LCodeGen::DoMathExp(LMathExp* instr) {
3833 ASSERT(CpuFeatures::IsSupported(SSE2));
Jakob Kummerow 2012/11/28 16:28:22 nit: not needed
danno 2012/11/30 16:23:24 Done.
3834 CpuFeatures::Scope scope(SSE2);
3613 XMMRegister input = ToDoubleRegister(instr->value()); 3835 XMMRegister input = ToDoubleRegister(instr->value());
3614 XMMRegister result = ToDoubleRegister(instr->result()); 3836 XMMRegister result = ToDoubleRegister(instr->result());
3615 Register temp1 = ToRegister(instr->temp1()); 3837 Register temp1 = ToRegister(instr->temp1());
3616 Register temp2 = ToRegister(instr->temp2()); 3838 Register temp2 = ToRegister(instr->temp2());
3617 3839
3618 MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2); 3840 MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2);
3619 } 3841 }
3620 3842
3621 3843
3622 void LCodeGen::DoMathTan(LUnaryMathOperation* instr) { 3844 void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
(...skipping 248 matching lines...) Expand 10 before | Expand all | Expand 10 after
3871 int constant_index = 4093 int constant_index =
3872 ToInteger32(LConstantOperand::cast(instr->index())); 4094 ToInteger32(LConstantOperand::cast(instr->index()));
3873 if (instr->hydrogen()->length()->representation().IsTagged()) { 4095 if (instr->hydrogen()->length()->representation().IsTagged()) {
3874 __ cmp(ToOperand(instr->length()), 4096 __ cmp(ToOperand(instr->length()),
3875 Immediate(Smi::FromInt(constant_index))); 4097 Immediate(Smi::FromInt(constant_index)));
3876 } else { 4098 } else {
3877 __ cmp(ToOperand(instr->length()), Immediate(constant_index)); 4099 __ cmp(ToOperand(instr->length()), Immediate(constant_index));
3878 } 4100 }
3879 DeoptimizeIf(below_equal, instr->environment()); 4101 DeoptimizeIf(below_equal, instr->environment());
3880 } else { 4102 } else {
4103 if (instr->hydrogen()->index()->representation().IsTagged() &&
4104 !instr->hydrogen()->index()->type().IsSmi()) {
4105 __ test(ToRegister(instr->index()), Immediate(kSmiTagMask));
4106 DeoptimizeIf(not_zero, instr->environment());
4107 }
3881 __ cmp(ToRegister(instr->index()), ToOperand(instr->length())); 4108 __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
3882 DeoptimizeIf(above_equal, instr->environment()); 4109 DeoptimizeIf(above_equal, instr->environment());
3883 } 4110 }
3884 } 4111 }
3885 4112
3886 4113
3887 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { 4114 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
3888 ElementsKind elements_kind = instr->elements_kind(); 4115 ElementsKind elements_kind = instr->elements_kind();
3889 LOperand* key = instr->key(); 4116 LOperand* key = instr->key();
3890 if (!key->IsConstantOperand() && 4117 if (!key->IsConstantOperand() &&
3891 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(), 4118 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
3892 elements_kind)) { 4119 elements_kind)) {
3893 __ SmiUntag(ToRegister(key)); 4120 __ SmiUntag(ToRegister(key));
3894 } 4121 }
3895 Operand operand(BuildFastArrayOperand( 4122 Operand operand(BuildFastArrayOperand(
3896 instr->elements(), 4123 instr->elements(),
3897 key, 4124 key,
3898 instr->hydrogen()->key()->representation(), 4125 instr->hydrogen()->key()->representation(),
3899 elements_kind, 4126 elements_kind,
3900 0, 4127 0,
3901 instr->additional_index())); 4128 instr->additional_index()));
3902 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { 4129 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
4130 CpuFeatures::Scope scope(SSE2);
3903 __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value())); 4131 __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
3904 __ movss(operand, xmm0); 4132 __ movss(operand, xmm0);
3905 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { 4133 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
4134 CpuFeatures::Scope scope(SSE2);
3906 __ movdbl(operand, ToDoubleRegister(instr->value())); 4135 __ movdbl(operand, ToDoubleRegister(instr->value()));
3907 } else { 4136 } else {
3908 Register value = ToRegister(instr->value()); 4137 Register value = ToRegister(instr->value());
3909 switch (elements_kind) { 4138 switch (elements_kind) {
3910 case EXTERNAL_PIXEL_ELEMENTS: 4139 case EXTERNAL_PIXEL_ELEMENTS:
3911 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: 4140 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3912 case EXTERNAL_BYTE_ELEMENTS: 4141 case EXTERNAL_BYTE_ELEMENTS:
3913 __ mov_b(operand, value); 4142 __ mov_b(operand, value);
3914 break; 4143 break;
3915 case EXTERNAL_SHORT_ELEMENTS: 4144 case EXTERNAL_SHORT_ELEMENTS:
(...skipping 15 matching lines...) Expand all
3931 case DICTIONARY_ELEMENTS: 4160 case DICTIONARY_ELEMENTS:
3932 case NON_STRICT_ARGUMENTS_ELEMENTS: 4161 case NON_STRICT_ARGUMENTS_ELEMENTS:
3933 UNREACHABLE(); 4162 UNREACHABLE();
3934 break; 4163 break;
3935 } 4164 }
3936 } 4165 }
3937 } 4166 }
3938 4167
3939 4168
3940 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { 4169 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4170 CpuFeatures::Scope scope(SSE2);
3941 XMMRegister value = ToDoubleRegister(instr->value()); 4171 XMMRegister value = ToDoubleRegister(instr->value());
3942 4172
3943 if (instr->NeedsCanonicalization()) { 4173 if (instr->NeedsCanonicalization()) {
3944 Label have_value; 4174 Label have_value;
3945 4175
3946 __ ucomisd(value, value); 4176 __ ucomisd(value, value);
3947 __ j(parity_odd, &have_value); // NaN. 4177 __ j(parity_odd, &have_value); // NaN.
3948 4178
3949 ExternalReference canonical_nan_reference = 4179 ExternalReference canonical_nan_reference =
3950 ExternalReference::address_of_canonical_non_hole_nan(); 4180 ExternalReference::address_of_canonical_non_hole_nan();
(...skipping 230 matching lines...) Expand 10 before | Expand all | Expand 10 after
4181 4411
4182 void LCodeGen::DoStringAdd(LStringAdd* instr) { 4412 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4183 EmitPushTaggedOperand(instr->left()); 4413 EmitPushTaggedOperand(instr->left());
4184 EmitPushTaggedOperand(instr->right()); 4414 EmitPushTaggedOperand(instr->right());
4185 StringAddStub stub(NO_STRING_CHECK_IN_STUB); 4415 StringAddStub stub(NO_STRING_CHECK_IN_STUB);
4186 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 4416 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4187 } 4417 }
4188 4418
4189 4419
4190 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { 4420 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4191 LOperand* input = instr->value(); 4421 if (CpuFeatures::IsSupported(SSE2)) {
4192 ASSERT(input->IsRegister() || input->IsStackSlot()); 4422 CpuFeatures::Scope scope(SSE2);
4193 LOperand* output = instr->result(); 4423 LOperand* input = instr->value();
4194 ASSERT(output->IsDoubleRegister()); 4424 ASSERT(input->IsRegister() || input->IsStackSlot());
4195 __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input)); 4425 LOperand* output = instr->result();
4426 ASSERT(output->IsDoubleRegister());
4427 __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
4428 } else {
4429 UNREACHABLE();
4430 }
4196 } 4431 }
4197 4432
4198 4433
4199 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { 4434 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4435 CpuFeatures::Scope scope(SSE2);
4200 LOperand* input = instr->value(); 4436 LOperand* input = instr->value();
4201 LOperand* output = instr->result(); 4437 LOperand* output = instr->result();
4202 LOperand* temp = instr->temp(); 4438 LOperand* temp = instr->temp();
4203 4439
4204 __ LoadUint32(ToDoubleRegister(output), 4440 __ LoadUint32(ToDoubleRegister(output),
4205 ToRegister(input), 4441 ToRegister(input),
4206 ToDoubleRegister(temp)); 4442 ToDoubleRegister(temp));
4207 } 4443 }
4208 4444
4209 4445
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
4267 PushSafepointRegistersScope scope(this); 4503 PushSafepointRegistersScope scope(this);
4268 4504
4269 Label done; 4505 Label done;
4270 4506
4271 if (signedness == SIGNED_INT32) { 4507 if (signedness == SIGNED_INT32) {
4272 // There was overflow, so bits 30 and 31 of the original integer 4508 // There was overflow, so bits 30 and 31 of the original integer
4273 // disagree. Try to allocate a heap number in new space and store 4509 // disagree. Try to allocate a heap number in new space and store
4274 // the value in there. If that fails, call the runtime system. 4510 // the value in there. If that fails, call the runtime system.
4275 __ SmiUntag(reg); 4511 __ SmiUntag(reg);
4276 __ xor_(reg, 0x80000000); 4512 __ xor_(reg, 0x80000000);
4277 __ cvtsi2sd(xmm0, Operand(reg)); 4513 if (CpuFeatures::IsSupported(SSE2)) {
4514 CpuFeatures::Scope feature_scope(SSE2);
4515 __ cvtsi2sd(xmm0, Operand(reg));
4516 } else {
4517 __ push(reg);
4518 __ fild_s(Operand(esp, 0));
4519 __ pop(reg);
4520 }
4278 } else { 4521 } else {
4279 __ LoadUint32(xmm0, reg, xmm1); 4522 if (CpuFeatures::IsSupported(SSE2)) {
4523 CpuFeatures::Scope feature_scope(SSE2);
4524 __ LoadUint32(xmm0, reg, xmm1);
4525 } else {
4526 UNREACHABLE();
4527 }
4280 } 4528 }
4281 4529
4282 if (FLAG_inline_new) { 4530 if (FLAG_inline_new) {
4283 __ AllocateHeapNumber(reg, tmp, no_reg, &slow); 4531 __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
4284 __ jmp(&done, Label::kNear); 4532 __ jmp(&done, Label::kNear);
4285 } 4533 }
4286 4534
4287 // Slow case: Call the runtime system to do the number allocation. 4535 // Slow case: Call the runtime system to do the number allocation.
4288 __ bind(&slow); 4536 __ bind(&slow);
4289 4537
4290 // TODO(3095996): Put a valid pointer value in the stack slot where the result 4538 // TODO(3095996): Put a valid pointer value in the stack slot where the result
4291 // register is stored, as this register is in the pointer map, but contains an 4539 // register is stored, as this register is in the pointer map, but contains an
4292 // integer value. 4540 // integer value.
4293 __ StoreToSafepointRegisterSlot(reg, Immediate(0)); 4541 __ StoreToSafepointRegisterSlot(reg, Immediate(0));
4294 // NumberTagI and NumberTagD use the context from the frame, rather than 4542 // NumberTagI and NumberTagD use the context from the frame, rather than
4295 // the environment's HContext or HInlinedContext value. 4543 // the environment's HContext or HInlinedContext value.
4296 // They only call Runtime::kAllocateHeapNumber. 4544 // They only call Runtime::kAllocateHeapNumber.
4297 // The corresponding HChange instructions are added in a phase that does 4545 // The corresponding HChange instructions are added in a phase that does
4298 // not have easy access to the local context. 4546 // not have easy access to the local context.
4299 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); 4547 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
4300 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); 4548 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4301 RecordSafepointWithRegisters( 4549 RecordSafepointWithRegisters(
4302 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); 4550 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4303 if (!reg.is(eax)) __ mov(reg, eax); 4551 if (!reg.is(eax)) __ mov(reg, eax);
4304 4552
4305 // Done. Put the value in xmm0 into the value of the allocated heap 4553 // Done. Put the value in xmm0 into the value of the allocated heap
4306 // number. 4554 // number.
4307 __ bind(&done); 4555 __ bind(&done);
4308 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0); 4556 if (CpuFeatures::IsSupported(SSE2)) {
4557 CpuFeatures::Scope feature_scope(SSE2);
4558 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
4559 } else {
4560 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
4561 }
4309 __ StoreToSafepointRegisterSlot(reg, reg); 4562 __ StoreToSafepointRegisterSlot(reg, reg);
4310 } 4563 }
4311 4564
4312 4565
4313 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { 4566 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4314 class DeferredNumberTagD: public LDeferredCode { 4567 class DeferredNumberTagD: public LDeferredCode {
4315 public: 4568 public:
4316 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) 4569 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4317 : LDeferredCode(codegen), instr_(instr) { } 4570 : LDeferredCode(codegen), instr_(instr) { }
4318 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } 4571 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4319 virtual LInstruction* instr() { return instr_; } 4572 virtual LInstruction* instr() { return instr_; }
4320 private: 4573 private:
4321 LNumberTagD* instr_; 4574 LNumberTagD* instr_;
4322 }; 4575 };
4323 4576
4324 XMMRegister input_reg = ToDoubleRegister(instr->value());
4325 Register reg = ToRegister(instr->result()); 4577 Register reg = ToRegister(instr->result());
4326 Register tmp = ToRegister(instr->temp()); 4578 Register tmp = ToRegister(instr->temp());
4327 4579
4328 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); 4580 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4329 if (FLAG_inline_new) { 4581 if (FLAG_inline_new) {
4330 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); 4582 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
4331 } else { 4583 } else {
4332 __ jmp(deferred->entry()); 4584 __ jmp(deferred->entry());
4333 } 4585 }
4334 __ bind(deferred->exit()); 4586 __ bind(deferred->exit());
4335 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); 4587 if (CpuFeatures::IsSupported(SSE2)) {
4588 CpuFeatures::Scope scope(SSE2);
4589 XMMRegister input_reg = ToDoubleRegister(instr->value());
4590 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
4591 } else {
4592 if (!IsX87TopOfStack(instr->value())) {
4593 __ fld_d(ToOperand(instr->value()));
4594 }
4595 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
4596 }
4336 } 4597 }
4337 4598
4338 4599
4339 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { 4600 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4340 // TODO(3095996): Get rid of this. For now, we need to make the 4601 // TODO(3095996): Get rid of this. For now, we need to make the
4341 // result register contain a valid pointer because it is already 4602 // result register contain a valid pointer because it is already
4342 // contained in the register pointer map. 4603 // contained in the register pointer map.
4343 Register reg = ToRegister(instr->result()); 4604 Register reg = ToRegister(instr->result());
4344 __ Set(reg, Immediate(0)); 4605 __ Set(reg, Immediate(0));
4345 4606
(...skipping 136 matching lines...) Expand 10 before | Expand all | Expand 10 after
4482 __ cmp(input_reg, 0x80000000u); 4743 __ cmp(input_reg, 0x80000000u);
4483 __ j(not_equal, &done); 4744 __ j(not_equal, &done);
4484 // Check if the input was 0x8000000 (kMinInt). 4745 // Check if the input was 0x8000000 (kMinInt).
4485 // If no, then we got an overflow and we deoptimize. 4746 // If no, then we got an overflow and we deoptimize.
4486 ExternalReference min_int = ExternalReference::address_of_min_int(); 4747 ExternalReference min_int = ExternalReference::address_of_min_int();
4487 __ movdbl(xmm_temp, Operand::StaticVariable(min_int)); 4748 __ movdbl(xmm_temp, Operand::StaticVariable(min_int));
4488 __ ucomisd(xmm_temp, xmm0); 4749 __ ucomisd(xmm_temp, xmm0);
4489 DeoptimizeIf(not_equal, instr->environment()); 4750 DeoptimizeIf(not_equal, instr->environment());
4490 DeoptimizeIf(parity_even, instr->environment()); // NaN. 4751 DeoptimizeIf(parity_even, instr->environment()); // NaN.
4491 } 4752 }
4492 } else { 4753 } else if (CpuFeatures::IsSupported(SSE2)) {
4754 CpuFeatures::Scope scope(SSE2);
4493 // Deoptimize if we don't have a heap number. 4755 // Deoptimize if we don't have a heap number.
4494 __ RecordComment("Deferred TaggedToI: not a heap number"); 4756 __ RecordComment("Deferred TaggedToI: not a heap number");
4495 DeoptimizeIf(not_equal, instr->environment()); 4757 DeoptimizeIf(not_equal, instr->environment());
4496 4758
4497 XMMRegister xmm_temp = ToDoubleRegister(instr->temp()); 4759 XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
4498 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); 4760 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
4499 __ cvttsd2si(input_reg, Operand(xmm0)); 4761 __ cvttsd2si(input_reg, Operand(xmm0));
4500 __ cvtsi2sd(xmm_temp, Operand(input_reg)); 4762 __ cvtsi2sd(xmm_temp, Operand(input_reg));
4501 __ ucomisd(xmm0, xmm_temp); 4763 __ ucomisd(xmm0, xmm_temp);
4502 __ RecordComment("Deferred TaggedToI: lost precision"); 4764 __ RecordComment("Deferred TaggedToI: lost precision");
4503 DeoptimizeIf(not_equal, instr->environment()); 4765 DeoptimizeIf(not_equal, instr->environment());
4504 __ RecordComment("Deferred TaggedToI: NaN"); 4766 __ RecordComment("Deferred TaggedToI: NaN");
4505 DeoptimizeIf(parity_even, instr->environment()); // NaN. 4767 DeoptimizeIf(parity_even, instr->environment()); // NaN.
4506 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4768 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4507 __ test(input_reg, Operand(input_reg)); 4769 __ test(input_reg, Operand(input_reg));
4508 __ j(not_zero, &done); 4770 __ j(not_zero, &done);
4509 __ movmskpd(input_reg, xmm0); 4771 __ movmskpd(input_reg, xmm0);
4510 __ and_(input_reg, 1); 4772 __ and_(input_reg, 1);
4511 __ RecordComment("Deferred TaggedToI: minus zero"); 4773 __ RecordComment("Deferred TaggedToI: minus zero");
4512 DeoptimizeIf(not_zero, instr->environment()); 4774 DeoptimizeIf(not_zero, instr->environment());
4513 } 4775 }
4776 } else {
4777 UNREACHABLE();
4514 } 4778 }
4515 __ bind(&done); 4779 __ bind(&done);
4516 } 4780 }
4517 4781
4518 4782
4519 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { 4783 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4520 class DeferredTaggedToI: public LDeferredCode { 4784 class DeferredTaggedToI: public LDeferredCode {
4521 public: 4785 public:
4522 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) 4786 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4523 : LDeferredCode(codegen), instr_(instr) { } 4787 : LDeferredCode(codegen), instr_(instr) { }
(...skipping 22 matching lines...) Expand all
4546 4810
4547 4811
4548 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { 4812 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4549 LOperand* input = instr->value(); 4813 LOperand* input = instr->value();
4550 ASSERT(input->IsRegister()); 4814 ASSERT(input->IsRegister());
4551 LOperand* temp = instr->temp(); 4815 LOperand* temp = instr->temp();
4552 ASSERT(temp == NULL || temp->IsRegister()); 4816 ASSERT(temp == NULL || temp->IsRegister());
4553 LOperand* result = instr->result(); 4817 LOperand* result = instr->result();
4554 ASSERT(result->IsDoubleRegister()); 4818 ASSERT(result->IsDoubleRegister());
4555 4819
4556 Register input_reg = ToRegister(input); 4820 if (CpuFeatures::IsSupported(SSE2)) {
4557 XMMRegister result_reg = ToDoubleRegister(result); 4821 CpuFeatures::Scope scope(SSE2);
4822 Register input_reg = ToRegister(input);
4823 XMMRegister result_reg = ToDoubleRegister(result);
4558 4824
4559 bool deoptimize_on_minus_zero = 4825 bool deoptimize_on_minus_zero =
4560 instr->hydrogen()->deoptimize_on_minus_zero(); 4826 instr->hydrogen()->deoptimize_on_minus_zero();
4561 Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg; 4827 Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
4562 4828
4563 EmitNumberUntagD(input_reg, 4829 EmitNumberUntagD(input_reg,
4564 temp_reg, 4830 temp_reg,
4565 result_reg, 4831 result_reg,
4566 instr->hydrogen()->deoptimize_on_undefined(), 4832 instr->hydrogen()->deoptimize_on_undefined(),
4567 deoptimize_on_minus_zero, 4833 deoptimize_on_minus_zero,
4568 instr->environment()); 4834 instr->environment());
4835 } else {
4836 UNIMPLEMENTED();
4837 }
4569 } 4838 }
4570 4839
4571 4840
4572 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { 4841 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4573 LOperand* input = instr->value(); 4842 LOperand* input = instr->value();
4574 ASSERT(input->IsDoubleRegister()); 4843 ASSERT(input->IsDoubleRegister());
4575 LOperand* result = instr->result(); 4844 LOperand* result = instr->result();
4576 ASSERT(result->IsRegister()); 4845 ASSERT(result->IsRegister());
4846 CpuFeatures::Scope scope(SSE2);
4577 4847
4578 XMMRegister input_reg = ToDoubleRegister(input); 4848 XMMRegister input_reg = ToDoubleRegister(input);
4579 Register result_reg = ToRegister(result); 4849 Register result_reg = ToRegister(result);
4580 4850
4581 if (instr->truncating()) { 4851 if (instr->truncating()) {
4582 // Performs a truncating conversion of a floating point number as used by 4852 // Performs a truncating conversion of a floating point number as used by
4583 // the JS bitwise operations. 4853 // the JS bitwise operations.
4584 __ cvttsd2si(result_reg, Operand(input_reg)); 4854 __ cvttsd2si(result_reg, Operand(input_reg));
4585 __ cmp(result_reg, 0x80000000u); 4855 __ cmp(result_reg, 0x80000000u);
4586 if (CpuFeatures::IsSupported(SSE3)) { 4856 if (CpuFeatures::IsSupported(SSE3)) {
(...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after
4756 Operand operand = ToOperand(instr->value()); 5026 Operand operand = ToOperand(instr->value());
4757 __ cmp(operand, target); 5027 __ cmp(operand, target);
4758 } 5028 }
4759 DeoptimizeIf(not_equal, instr->environment()); 5029 DeoptimizeIf(not_equal, instr->environment());
4760 } 5030 }
4761 5031
4762 5032
4763 void LCodeGen::DoCheckMapCommon(Register reg, 5033 void LCodeGen::DoCheckMapCommon(Register reg,
4764 Handle<Map> map, 5034 Handle<Map> map,
4765 CompareMapMode mode, 5035 CompareMapMode mode,
4766 LEnvironment* env) { 5036 LInstruction* instr) {
4767 Label success; 5037 Label success;
4768 __ CompareMap(reg, map, &success, mode); 5038 __ CompareMap(reg, map, &success, mode);
4769 DeoptimizeIf(not_equal, env); 5039 DeoptimizeIf(not_equal, instr->environment());
4770 __ bind(&success); 5040 __ bind(&success);
4771 } 5041 }
4772 5042
4773 5043
4774 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { 5044 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4775 LOperand* input = instr->value(); 5045 LOperand* input = instr->value();
4776 ASSERT(input->IsRegister()); 5046 ASSERT(input->IsRegister());
4777 Register reg = ToRegister(input); 5047 Register reg = ToRegister(input);
4778 5048
4779 Label success; 5049 Label success;
4780 SmallMapList* map_set = instr->hydrogen()->map_set(); 5050 SmallMapList* map_set = instr->hydrogen()->map_set();
4781 for (int i = 0; i < map_set->length() - 1; i++) { 5051 for (int i = 0; i < map_set->length() - 1; i++) {
4782 Handle<Map> map = map_set->at(i); 5052 Handle<Map> map = map_set->at(i);
4783 __ CompareMap(reg, map, &success, REQUIRE_EXACT_MAP); 5053 __ CompareMap(reg, map, &success, REQUIRE_EXACT_MAP);
4784 __ j(equal, &success); 5054 __ j(equal, &success);
4785 } 5055 }
4786 Handle<Map> map = map_set->last(); 5056 Handle<Map> map = map_set->last();
4787 DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment()); 5057 DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr);
4788 __ bind(&success); 5058 __ bind(&success);
4789 } 5059 }
4790 5060
4791 5061
4792 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { 5062 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5063 ASSERT(CpuFeatures::IsSupported(SSE2));
Jakob Kummerow 2012/11/28 16:28:22 nit: not needed
5064 CpuFeatures::Scope scope(SSE2);
4793 XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); 5065 XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
4794 Register result_reg = ToRegister(instr->result()); 5066 Register result_reg = ToRegister(instr->result());
4795 __ ClampDoubleToUint8(value_reg, xmm0, result_reg); 5067 __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
4796 } 5068 }
4797 5069
4798 5070
4799 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { 5071 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4800 ASSERT(instr->unclamped()->Equals(instr->result())); 5072 ASSERT(instr->unclamped()->Equals(instr->result()));
4801 Register value_reg = ToRegister(instr->result()); 5073 Register value_reg = ToRegister(instr->result());
4802 __ ClampUint8(value_reg); 5074 __ ClampUint8(value_reg);
4803 } 5075 }
4804 5076
4805 5077
4806 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { 5078 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5079 ASSERT(CpuFeatures::IsSupported(SSE2));
Jakob Kummerow 2012/11/28 16:28:22 nit: not needed
danno 2012/11/30 16:23:24 Done.
5080 CpuFeatures::Scope scope(SSE2);
5081
4807 ASSERT(instr->unclamped()->Equals(instr->result())); 5082 ASSERT(instr->unclamped()->Equals(instr->result()));
4808 Register input_reg = ToRegister(instr->unclamped()); 5083 Register input_reg = ToRegister(instr->unclamped());
4809 Label is_smi, done, heap_number; 5084 Label is_smi, done, heap_number;
4810 5085
4811 __ JumpIfSmi(input_reg, &is_smi); 5086 __ JumpIfSmi(input_reg, &is_smi);
4812 5087
4813 // Check for heap number 5088 // Check for heap number
4814 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), 5089 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
4815 factory()->heap_number_map()); 5090 factory()->heap_number_map());
4816 __ j(equal, &heap_number, Label::kNear); 5091 __ j(equal, &heap_number, Label::kNear);
(...skipping 26 matching lines...) Expand all
4843 5118
4844 Handle<JSObject> holder = instr->holder(); 5119 Handle<JSObject> holder = instr->holder();
4845 Handle<JSObject> current_prototype = instr->prototype(); 5120 Handle<JSObject> current_prototype = instr->prototype();
4846 5121
4847 // Load prototype object. 5122 // Load prototype object.
4848 __ LoadHeapObject(reg, current_prototype); 5123 __ LoadHeapObject(reg, current_prototype);
4849 5124
4850 // Check prototype maps up to the holder. 5125 // Check prototype maps up to the holder.
4851 while (!current_prototype.is_identical_to(holder)) { 5126 while (!current_prototype.is_identical_to(holder)) {
4852 DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()), 5127 DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
4853 ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment()); 5128 ALLOW_ELEMENT_TRANSITION_MAPS, instr);
4854 5129
4855 current_prototype = 5130 current_prototype =
4856 Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype())); 5131 Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
4857 // Load next prototype object. 5132 // Load next prototype object.
4858 __ LoadHeapObject(reg, current_prototype); 5133 __ LoadHeapObject(reg, current_prototype);
4859 } 5134 }
4860 5135
4861 // Check the holder map. 5136 // Check the holder map.
4862 DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()), 5137 DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
4863 ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment()); 5138 ALLOW_ELEMENT_TRANSITION_MAPS, instr);
4864 } 5139 }
4865 5140
4866 5141
4867 void LCodeGen::DoAllocateObject(LAllocateObject* instr) { 5142 void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
4868 class DeferredAllocateObject: public LDeferredCode { 5143 class DeferredAllocateObject: public LDeferredCode {
4869 public: 5144 public:
4870 DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr) 5145 DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
4871 : LDeferredCode(codegen), instr_(instr) { } 5146 : LDeferredCode(codegen), instr_(instr) { }
4872 virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); } 5147 virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
4873 virtual LInstruction* instr() { return instr_; } 5148 virtual LInstruction* instr() { return instr_; }
(...skipping 516 matching lines...) Expand 10 before | Expand all | Expand 10 after
5390 __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset)); 5665 __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
5391 5666
5392 // Check the marker in the calling frame. 5667 // Check the marker in the calling frame.
5393 __ bind(&check_frame_marker); 5668 __ bind(&check_frame_marker);
5394 __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset), 5669 __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
5395 Immediate(Smi::FromInt(StackFrame::CONSTRUCT))); 5670 Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
5396 } 5671 }
5397 5672
5398 5673
5399 void LCodeGen::EnsureSpaceForLazyDeopt() { 5674 void LCodeGen::EnsureSpaceForLazyDeopt() {
5400 // Ensure that we have enough space after the previous lazy-bailout 5675 if (!info()->IsStub()) {
5401 // instruction for patching the code here. 5676 // Ensure that we have enough space after the previous lazy-bailout
5402 int current_pc = masm()->pc_offset(); 5677 // instruction for patching the code here.
5403 int patch_size = Deoptimizer::patch_size(); 5678 int current_pc = masm()->pc_offset();
5404 if (current_pc < last_lazy_deopt_pc_ + patch_size) { 5679 int patch_size = Deoptimizer::patch_size();
5405 int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc; 5680 if (current_pc < last_lazy_deopt_pc_ + patch_size) {
5406 __ Nop(padding_size); 5681 int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
5682 __ Nop(padding_size);
5683 }
5407 } 5684 }
5408 last_lazy_deopt_pc_ = masm()->pc_offset(); 5685 last_lazy_deopt_pc_ = masm()->pc_offset();
5409 } 5686 }
5410 5687
5411 5688
5412 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { 5689 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5413 EnsureSpaceForLazyDeopt(); 5690 EnsureSpaceForLazyDeopt();
5414 ASSERT(instr->HasEnvironment()); 5691 ASSERT(instr->HasEnvironment());
5415 LEnvironment* env = instr->environment(); 5692 LEnvironment* env = instr->environment();
5416 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); 5693 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
(...skipping 203 matching lines...) Expand 10 before | Expand all | Expand 10 after
5620 FixedArray::kHeaderSize - kPointerSize)); 5897 FixedArray::kHeaderSize - kPointerSize));
5621 __ bind(&done); 5898 __ bind(&done);
5622 } 5899 }
5623 5900
5624 5901
5625 #undef __ 5902 #undef __
5626 5903
5627 } } // namespace v8::internal 5904 } } // namespace v8::internal
5628 5905
5629 #endif // V8_TARGET_ARCH_IA32 5906 #endif // V8_TARGET_ARCH_IA32
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698