| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 12 matching lines...) Expand all Loading... |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | 27 |
| 28 #include "v8.h" | 28 #include "v8.h" |
| 29 | 29 |
| 30 #if defined(V8_TARGET_ARCH_IA32) | 30 #if defined(V8_TARGET_ARCH_IA32) |
| 31 | 31 |
| 32 #include "ia32/lithium-codegen-ia32.h" | 32 #include "ia32/lithium-codegen-ia32.h" |
| 33 #include "ic.h" |
| 33 #include "code-stubs.h" | 34 #include "code-stubs.h" |
| 34 #include "deoptimizer.h" | 35 #include "deoptimizer.h" |
| 35 #include "stub-cache.h" | 36 #include "stub-cache.h" |
| 36 #include "codegen.h" | 37 #include "codegen.h" |
| 37 | 38 |
| 38 namespace v8 { | 39 namespace v8 { |
| 39 namespace internal { | 40 namespace internal { |
| 40 | 41 |
| 41 | 42 |
| 42 // When invoking builtins, we need to record the safepoint in the middle of | 43 // When invoking builtins, we need to record the safepoint in the middle of |
| (...skipping 20 matching lines...) Expand all Loading... |
| 63 Safepoint::DeoptMode deopt_mode_; | 64 Safepoint::DeoptMode deopt_mode_; |
| 64 }; | 65 }; |
| 65 | 66 |
| 66 | 67 |
| 67 #define __ masm()-> | 68 #define __ masm()-> |
| 68 | 69 |
| 69 bool LCodeGen::GenerateCode() { | 70 bool LCodeGen::GenerateCode() { |
| 70 HPhase phase("Z_Code generation", chunk()); | 71 HPhase phase("Z_Code generation", chunk()); |
| 71 ASSERT(is_unused()); | 72 ASSERT(is_unused()); |
| 72 status_ = GENERATING; | 73 status_ = GENERATING; |
| 73 CpuFeatures::Scope scope(SSE2); | |
| 74 | 74 |
| 75 CodeStub::GenerateFPStubs(); | 75 CodeStub::GenerateFPStubs(); |
| 76 | 76 |
| 77 // Open a frame scope to indicate that there is a frame on the stack. The | 77 // Open a frame scope to indicate that there is a frame on the stack. The |
| 78 // MANUAL indicates that the scope shouldn't actually generate code to set up | 78 // MANUAL indicates that the scope shouldn't actually generate code to set up |
| 79 // the frame (that is done in GeneratePrologue). | 79 // the frame (that is done in GeneratePrologue). |
| 80 FrameScope frame_scope(masm_, StackFrame::MANUAL); | 80 FrameScope frame_scope(masm_, StackFrame::MANUAL); |
| 81 | 81 |
| 82 dynamic_frame_alignment_ = (chunk()->num_double_slots() > 2 && | 82 dynamic_frame_alignment_ = info()->IsOptimizing() && |
| 83 !chunk()->graph()->is_recursive()) || | 83 ((chunk()->num_double_slots() > 2 && |
| 84 !info()->osr_ast_id().IsNone(); | 84 !chunk()->graph()->is_recursive()) || |
| 85 !info()->osr_ast_id().IsNone()); |
| 85 | 86 |
| 86 return GeneratePrologue() && | 87 return GeneratePrologue() && |
| 87 GenerateBody() && | 88 GenerateBody() && |
| 88 GenerateDeferredCode() && | 89 GenerateDeferredCode() && |
| 89 GenerateSafepointTable(); | 90 GenerateSafepointTable(); |
| 90 } | 91 } |
| 91 | 92 |
| 92 | 93 |
| 93 void LCodeGen::FinishCode(Handle<Code> code) { | 94 void LCodeGen::FinishCode(Handle<Code> code) { |
| 94 ASSERT(is_done()); | 95 ASSERT(is_done()); |
| 95 code->set_stack_slots(GetStackSlotCount()); | 96 code->set_stack_slots(GetStackSlotCount()); |
| 96 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); | 97 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); |
| 97 PopulateDeoptimizationData(code); | 98 PopulateDeoptimizationData(code); |
| 98 Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code); | 99 if (!info()->IsStub()) { |
| 100 Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code); |
| 101 } |
| 99 } | 102 } |
| 100 | 103 |
| 101 | 104 |
| 102 void LCodeGen::Abort(const char* reason) { | 105 void LCodeGen::Abort(const char* reason) { |
| 103 info()->set_bailout_reason(reason); | 106 info()->set_bailout_reason(reason); |
| 104 status_ = ABORTED; | 107 status_ = ABORTED; |
| 105 } | 108 } |
| 106 | 109 |
| 107 | 110 |
| 108 void LCodeGen::Comment(const char* format, ...) { | 111 void LCodeGen::Comment(const char* format, ...) { |
| (...skipping 10 matching lines...) Expand all Loading... |
| 119 size_t length = builder.position(); | 122 size_t length = builder.position(); |
| 120 Vector<char> copy = Vector<char>::New(length + 1); | 123 Vector<char> copy = Vector<char>::New(length + 1); |
| 121 memcpy(copy.start(), builder.Finalize(), copy.length()); | 124 memcpy(copy.start(), builder.Finalize(), copy.length()); |
| 122 masm()->RecordComment(copy.start()); | 125 masm()->RecordComment(copy.start()); |
| 123 } | 126 } |
| 124 | 127 |
| 125 | 128 |
| 126 bool LCodeGen::GeneratePrologue() { | 129 bool LCodeGen::GeneratePrologue() { |
| 127 ASSERT(is_generating()); | 130 ASSERT(is_generating()); |
| 128 | 131 |
| 129 ProfileEntryHookStub::MaybeCallEntryHook(masm_); | 132 if (info()->IsOptimizing()) { |
| 133 ProfileEntryHookStub::MaybeCallEntryHook(masm_); |
| 130 | 134 |
| 131 #ifdef DEBUG | 135 #ifdef DEBUG |
| 132 if (strlen(FLAG_stop_at) > 0 && | 136 if (strlen(FLAG_stop_at) > 0 && |
| 133 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { | 137 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { |
| 134 __ int3(); | 138 __ int3(); |
| 135 } | 139 } |
| 136 #endif | 140 #endif |
| 137 | 141 |
| 138 // Strict mode functions and builtins need to replace the receiver | 142 // Strict mode functions and builtins need to replace the receiver |
| 139 // with undefined when called as functions (without an explicit | 143 // with undefined when called as functions (without an explicit |
| 140 // receiver object). ecx is zero for method calls and non-zero for | 144 // receiver object). ecx is zero for method calls and non-zero for |
| 141 // function calls. | 145 // function calls. |
| 142 if (!info_->is_classic_mode() || info_->is_native()) { | 146 if (!info_->is_classic_mode() || info_->is_native()) { |
| 143 Label begin; | 147 Label begin; |
| 144 __ bind(&begin); | 148 __ bind(&begin); |
| 145 Label ok; | 149 Label ok; |
| 146 __ test(ecx, Operand(ecx)); | 150 __ test(ecx, Operand(ecx)); |
| 147 __ j(zero, &ok, Label::kNear); | 151 __ j(zero, &ok, Label::kNear); |
| 148 // +1 for return address. | 152 // +1 for return address. |
| 149 int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize; | 153 int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize; |
| 150 __ mov(Operand(esp, receiver_offset), | 154 __ mov(Operand(esp, receiver_offset), |
| 151 Immediate(isolate()->factory()->undefined_value())); | 155 Immediate(isolate()->factory()->undefined_value())); |
| 152 __ bind(&ok); | 156 __ bind(&ok); |
| 153 ASSERT(!FLAG_age_code || | 157 ASSERT(!FLAG_age_code || |
| 154 (kSizeOfOptimizedStrictModePrologue == ok.pos() - begin.pos())); | 158 (kSizeOfOptimizedStrictModePrologue == ok.pos() - begin.pos())); |
| 159 } |
| 155 } | 160 } |
| 156 | 161 |
| 157 | |
| 158 if (dynamic_frame_alignment_) { | 162 if (dynamic_frame_alignment_) { |
| 159 Label begin; | 163 Label begin; |
| 160 __ bind(&begin); | 164 __ bind(&begin); |
| 161 // Move state of dynamic frame alignment into edx. | 165 // Move state of dynamic frame alignment into edx. |
| 162 __ mov(edx, Immediate(kNoAlignmentPadding)); | 166 __ mov(edx, Immediate(kNoAlignmentPadding)); |
| 163 | 167 |
| 164 Label do_not_pad, align_loop; | 168 Label do_not_pad, align_loop; |
| 165 STATIC_ASSERT(kDoubleSize == 2 * kPointerSize); | 169 STATIC_ASSERT(kDoubleSize == 2 * kPointerSize); |
| 166 // Align esp + 4 to a multiple of 2 * kPointerSize. | 170 // Align esp + 4 to a multiple of 2 * kPointerSize. |
| 167 __ test(esp, Immediate(kPointerSize)); | 171 __ test(esp, Immediate(kPointerSize)); |
| (...skipping 10 matching lines...) Expand all Loading... |
| 178 __ add(Operand(ebx), Immediate(kPointerSize)); | 182 __ add(Operand(ebx), Immediate(kPointerSize)); |
| 179 __ dec(ecx); | 183 __ dec(ecx); |
| 180 __ j(not_zero, &align_loop, Label::kNear); | 184 __ j(not_zero, &align_loop, Label::kNear); |
| 181 __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue)); | 185 __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue)); |
| 182 __ bind(&do_not_pad); | 186 __ bind(&do_not_pad); |
| 183 ASSERT(!FLAG_age_code || | 187 ASSERT(!FLAG_age_code || |
| 184 (kSizeOfOptimizedAlignStackPrologue == | 188 (kSizeOfOptimizedAlignStackPrologue == |
| 185 do_not_pad.pos() - begin.pos())); | 189 do_not_pad.pos() - begin.pos())); |
| 186 } | 190 } |
| 187 | 191 |
| 188 __ push(ebp); // Caller's frame pointer. | 192 if (NeedsEagerFrame()) { |
| 189 __ mov(ebp, esp); | 193 ASSERT(!frame_is_built_); |
| 190 __ push(esi); // Callee's context. | 194 frame_is_built_ = true; |
| 191 __ push(edi); // Callee's JS function. | 195 __ push(ebp); // Caller's frame pointer. |
| 196 __ mov(ebp, esp); |
| 197 if (info()->IsStub()) { |
| 198 __ push(esi); |
| 199 __ push(Immediate(Smi::FromInt(StackFrame::STUB))); |
| 200 } else { |
| 201 __ push(esi); // Callee's context. |
| 202 __ push(edi); // Callee's JS function. |
| 203 } |
| 204 } |
| 192 | 205 |
| 193 if (dynamic_frame_alignment_ && FLAG_debug_code) { | 206 if (dynamic_frame_alignment_ && FLAG_debug_code) { |
| 194 __ test(esp, Immediate(kPointerSize)); | 207 __ test(esp, Immediate(kPointerSize)); |
| 195 __ Assert(zero, "frame is expected to be aligned"); | 208 __ Assert(zero, "frame is expected to be aligned"); |
| 196 } | 209 } |
| 197 | 210 |
| 198 // Reserve space for the stack slots needed by the code. | 211 // Reserve space for the stack slots needed by the code. |
| 199 int slots = GetStackSlotCount(); | 212 int slots = GetStackSlotCount(); |
| 200 ASSERT_GE(slots, 1); | 213 ASSERT(slots != 0 || !info()->IsOptimizing()); |
| 201 if (slots == 1) { | 214 if (slots > 0) { |
| 202 if (dynamic_frame_alignment_) { | 215 if (slots == 1) { |
| 203 __ push(edx); | 216 if (dynamic_frame_alignment_) { |
| 217 __ push(edx); |
| 218 } else { |
| 219 __ push(Immediate(kNoAlignmentPadding)); |
| 220 } |
| 204 } else { | 221 } else { |
| 205 __ push(Immediate(kNoAlignmentPadding)); | 222 if (FLAG_debug_code) { |
| 206 } | 223 __ mov(Operand(eax), Immediate(slots)); |
| 207 } else { | 224 Label loop; |
| 208 if (FLAG_debug_code) { | 225 __ bind(&loop); |
| 209 __ mov(Operand(eax), Immediate(slots)); | 226 __ push(Immediate(kSlotsZapValue)); |
| 210 Label loop; | 227 __ dec(eax); |
| 211 __ bind(&loop); | 228 __ j(not_zero, &loop); |
| 212 __ push(Immediate(kSlotsZapValue)); | 229 } else { |
| 213 __ dec(eax); | 230 __ sub(Operand(esp), Immediate(slots * kPointerSize)); |
| 214 __ j(not_zero, &loop); | 231 #ifdef _MSC_VER |
| 215 } else { | 232 // On windows, you may not access the stack more than one page below |
| 216 __ sub(Operand(esp), Immediate(slots * kPointerSize)); | 233 // the most recently mapped page. To make the allocated area randomly |
| 217 #ifdef _MSC_VER | 234 // accessible, we write to each page in turn (the value is irrelevant). |
| 218 // On windows, you may not access the stack more than one page below | 235 const int kPageSize = 4 * KB; |
| 219 // the most recently mapped page. To make the allocated area randomly | 236 for (int offset = slots * kPointerSize - kPageSize; |
| 220 // accessible, we write to each page in turn (the value is irrelevant). | 237 offset > 0; |
| 221 const int kPageSize = 4 * KB; | 238 offset -= kPageSize) { |
| 222 for (int offset = slots * kPointerSize - kPageSize; | 239 __ mov(Operand(esp, offset), eax); |
| 223 offset > 0; | 240 } |
| 224 offset -= kPageSize) { | 241 #endif |
| 225 __ mov(Operand(esp, offset), eax); | |
| 226 } | 242 } |
| 227 #endif | |
| 228 } | |
| 229 | 243 |
| 230 // Store dynamic frame alignment state in the first local. | 244 // Store dynamic frame alignment state in the first local. |
| 231 if (dynamic_frame_alignment_) { | 245 if (dynamic_frame_alignment_) { |
| 232 __ mov(Operand(ebp, | 246 __ mov(Operand(ebp, |
| 233 JavaScriptFrameConstants::kDynamicAlignmentStateOffset), | 247 JavaScriptFrameConstants::kDynamicAlignmentStateOffset), |
| 234 edx); | 248 edx); |
| 235 } else { | 249 } else { |
| 236 __ mov(Operand(ebp, | 250 __ mov(Operand(ebp, |
| 237 JavaScriptFrameConstants::kDynamicAlignmentStateOffset), | 251 JavaScriptFrameConstants::kDynamicAlignmentStateOffset), |
| 238 Immediate(kNoAlignmentPadding)); | 252 Immediate(kNoAlignmentPadding)); |
| 253 } |
| 239 } | 254 } |
| 240 } | 255 } |
| 241 | 256 |
| 242 // Possibly allocate a local context. | 257 // Possibly allocate a local context. |
| 243 int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; | 258 int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; |
| 244 if (heap_slots > 0) { | 259 if (heap_slots > 0) { |
| 245 Comment(";;; Allocate local context"); | 260 Comment(";;; Allocate local context"); |
| 246 // Argument to NewContext is the function, which is still in edi. | 261 // Argument to NewContext is the function, which is still in edi. |
| 247 __ push(edi); | 262 __ push(edi); |
| 248 if (heap_slots <= FastNewContextStub::kMaximumSlots) { | 263 if (heap_slots <= FastNewContextStub::kMaximumSlots) { |
| 249 FastNewContextStub stub(heap_slots); | 264 FastNewContextStub stub(heap_slots); |
| 250 __ CallStub(&stub); | 265 __ CallStub(&stub); |
| 251 } else { | 266 } else { |
| 252 __ CallRuntime(Runtime::kNewFunctionContext, 1); | 267 __ CallRuntime(Runtime::kNewFunctionContext, 1); |
| 253 } | 268 } |
| (...skipping 19 matching lines...) Expand all Loading... |
| 273 context_offset, | 288 context_offset, |
| 274 eax, | 289 eax, |
| 275 ebx, | 290 ebx, |
| 276 kDontSaveFPRegs); | 291 kDontSaveFPRegs); |
| 277 } | 292 } |
| 278 } | 293 } |
| 279 Comment(";;; End allocate local context"); | 294 Comment(";;; End allocate local context"); |
| 280 } | 295 } |
| 281 | 296 |
| 282 // Trace the call. | 297 // Trace the call. |
| 283 if (FLAG_trace) { | 298 if (FLAG_trace && info()->IsOptimizing()) { |
| 284 // We have not executed any compiled code yet, so esi still holds the | 299 // We have not executed any compiled code yet, so esi still holds the |
| 285 // incoming context. | 300 // incoming context. |
| 286 __ CallRuntime(Runtime::kTraceEnter, 0); | 301 __ CallRuntime(Runtime::kTraceEnter, 0); |
| 287 } | 302 } |
| 288 return !is_aborted(); | 303 return !is_aborted(); |
| 289 } | 304 } |
| 290 | 305 |
| 291 | 306 |
| 292 bool LCodeGen::GenerateBody() { | 307 bool LCodeGen::GenerateBody() { |
| 293 ASSERT(is_generating()); | 308 ASSERT(is_generating()); |
| (...skipping 12 matching lines...) Expand all Loading... |
| 306 instr->CompileToNative(this); | 321 instr->CompileToNative(this); |
| 307 } | 322 } |
| 308 } | 323 } |
| 309 EnsureSpaceForLazyDeopt(); | 324 EnsureSpaceForLazyDeopt(); |
| 310 return !is_aborted(); | 325 return !is_aborted(); |
| 311 } | 326 } |
| 312 | 327 |
| 313 | 328 |
| 314 bool LCodeGen::GenerateDeferredCode() { | 329 bool LCodeGen::GenerateDeferredCode() { |
| 315 ASSERT(is_generating()); | 330 ASSERT(is_generating()); |
| 331 |
| 316 if (deferred_.length() > 0) { | 332 if (deferred_.length() > 0) { |
| 317 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { | 333 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { |
| 318 LDeferredCode* code = deferred_[i]; | 334 LDeferredCode* code = deferred_[i]; |
| 319 __ bind(code->entry()); | 335 __ bind(code->entry()); |
| 336 if (NeedsDeferredFrame()) { |
| 337 Comment(";;; Deferred build frame", |
| 338 code->instruction_index(), |
| 339 code->instr()->Mnemonic()); |
| 340 ASSERT(!frame_is_built_); |
| 341 ASSERT(info()->IsStub()); |
| 342 frame_is_built_ = true; |
| 343 // Build the frame in such a way that esi isn't trashed. |
| 344 __ push(ebp); // Caller's frame pointer. |
| 345 __ push(Operand(ebp, StandardFrameConstants::kContextOffset)); |
| 346 __ push(Immediate(Smi::FromInt(StackFrame::STUB))); |
| 347 __ lea(ebp, Operand(esp, 2 * kPointerSize)); |
| 348 } |
| 320 Comment(";;; Deferred code @%d: %s.", | 349 Comment(";;; Deferred code @%d: %s.", |
| 321 code->instruction_index(), | 350 code->instruction_index(), |
| 322 code->instr()->Mnemonic()); | 351 code->instr()->Mnemonic()); |
| 323 code->Generate(); | 352 code->Generate(); |
| 353 if (NeedsDeferredFrame()) { |
| 354 Comment(";;; Deferred destory frame", |
| 355 code->instruction_index(), |
| 356 code->instr()->Mnemonic()); |
| 357 ASSERT(frame_is_built_); |
| 358 frame_is_built_ = false; |
| 359 __ mov(esp, ebp); |
| 360 __ pop(ebp); |
| 361 } |
| 324 __ jmp(code->exit()); | 362 __ jmp(code->exit()); |
| 325 } | 363 } |
| 326 } | 364 } |
| 327 | 365 |
| 328 // Deferred code is the last part of the instruction sequence. Mark | 366 // Deferred code is the last part of the instruction sequence. Mark |
| 329 // the generated code as done unless we bailed out. | 367 // the generated code as done unless we bailed out. |
| 330 if (!is_aborted()) status_ = DONE; | 368 if (!is_aborted()) status_ = DONE; |
| 331 return !is_aborted(); | 369 return !is_aborted(); |
| 332 } | 370 } |
| 333 | 371 |
| 334 | 372 |
| 335 bool LCodeGen::GenerateSafepointTable() { | 373 bool LCodeGen::GenerateSafepointTable() { |
| 336 ASSERT(is_done()); | 374 ASSERT(is_done()); |
| 375 if (!info()->IsStub()) { |
| 376 // For lazy deoptimization we need space to patch a call after every call. |
| 377 // Ensure there is always space for such patching, even if the code ends |
| 378 // in a call. |
| 379 int target_offset = masm()->pc_offset() + Deoptimizer::patch_size(); |
| 380 while (masm()->pc_offset() < target_offset) { |
| 381 masm()->nop(); |
| 382 } |
| 383 } |
| 337 safepoints_.Emit(masm(), GetStackSlotCount()); | 384 safepoints_.Emit(masm(), GetStackSlotCount()); |
| 338 return !is_aborted(); | 385 return !is_aborted(); |
| 339 } | 386 } |
| 340 | 387 |
| 341 | 388 |
| 342 Register LCodeGen::ToRegister(int index) const { | 389 Register LCodeGen::ToRegister(int index) const { |
| 343 return Register::FromAllocationIndex(index); | 390 return Register::FromAllocationIndex(index); |
| 344 } | 391 } |
| 345 | 392 |
| 346 | 393 |
| 347 XMMRegister LCodeGen::ToDoubleRegister(int index) const { | 394 XMMRegister LCodeGen::ToDoubleRegister(int index) const { |
| 348 return XMMRegister::FromAllocationIndex(index); | 395 return XMMRegister::FromAllocationIndex(index); |
| 349 } | 396 } |
| 350 | 397 |
| 351 | 398 |
| 399 bool LCodeGen::IsX87TopOfStack(LOperand* op) const { |
| 400 return op->IsDoubleRegister(); |
| 401 } |
| 402 |
| 403 |
| 352 Register LCodeGen::ToRegister(LOperand* op) const { | 404 Register LCodeGen::ToRegister(LOperand* op) const { |
| 353 ASSERT(op->IsRegister()); | 405 ASSERT(op->IsRegister()); |
| 354 return ToRegister(op->index()); | 406 return ToRegister(op->index()); |
| 355 } | 407 } |
| 356 | 408 |
| 357 | 409 |
| 358 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { | 410 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { |
| 359 ASSERT(op->IsDoubleRegister()); | 411 ASSERT(op->IsDoubleRegister()); |
| 360 return ToDoubleRegister(op->index()); | 412 return ToDoubleRegister(op->index()); |
| 361 } | 413 } |
| (...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 427 // arguments index points to the first element of a sequence of tagged | 479 // arguments index points to the first element of a sequence of tagged |
| 428 // values on the stack that represent the arguments. This needs to be | 480 // values on the stack that represent the arguments. This needs to be |
| 429 // kept in sync with the LArgumentsElements implementation. | 481 // kept in sync with the LArgumentsElements implementation. |
| 430 *arguments_index = -environment->parameter_count(); | 482 *arguments_index = -environment->parameter_count(); |
| 431 *arguments_count = environment->parameter_count(); | 483 *arguments_count = environment->parameter_count(); |
| 432 | 484 |
| 433 WriteTranslation(environment->outer(), | 485 WriteTranslation(environment->outer(), |
| 434 translation, | 486 translation, |
| 435 arguments_index, | 487 arguments_index, |
| 436 arguments_count); | 488 arguments_count); |
| 437 int closure_id = *info()->closure() != *environment->closure() | 489 bool has_closure_id = !info()->closure().is_null() && |
| 490 *info()->closure() != *environment->closure(); |
| 491 int closure_id = has_closure_id |
| 438 ? DefineDeoptimizationLiteral(environment->closure()) | 492 ? DefineDeoptimizationLiteral(environment->closure()) |
| 439 : Translation::kSelfLiteralId; | 493 : Translation::kSelfLiteralId; |
| 440 switch (environment->frame_type()) { | 494 switch (environment->frame_type()) { |
| 441 case JS_FUNCTION: | 495 case JS_FUNCTION: |
| 442 translation->BeginJSFrame(environment->ast_id(), closure_id, height); | 496 translation->BeginJSFrame(environment->ast_id(), closure_id, height); |
| 443 break; | 497 break; |
| 444 case JS_CONSTRUCT: | 498 case JS_CONSTRUCT: |
| 445 translation->BeginConstructStubFrame(closure_id, translation_size); | 499 translation->BeginConstructStubFrame(closure_id, translation_size); |
| 446 break; | 500 break; |
| 447 case JS_GETTER: | 501 case JS_GETTER: |
| 448 ASSERT(translation_size == 1); | 502 ASSERT(translation_size == 1); |
| 449 ASSERT(height == 0); | 503 ASSERT(height == 0); |
| 450 translation->BeginGetterStubFrame(closure_id); | 504 translation->BeginGetterStubFrame(closure_id); |
| 451 break; | 505 break; |
| 452 case JS_SETTER: | 506 case JS_SETTER: |
| 453 ASSERT(translation_size == 2); | 507 ASSERT(translation_size == 2); |
| 454 ASSERT(height == 0); | 508 ASSERT(height == 0); |
| 455 translation->BeginSetterStubFrame(closure_id); | 509 translation->BeginSetterStubFrame(closure_id); |
| 456 break; | 510 break; |
| 457 case ARGUMENTS_ADAPTOR: | 511 case ARGUMENTS_ADAPTOR: |
| 458 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); | 512 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); |
| 459 break; | 513 break; |
| 514 case STUB: |
| 515 translation->BeginCompiledStubPseudoFrame(Code::KEYED_LOAD_IC); |
| 516 break; |
| 517 default: |
| 518 UNREACHABLE(); |
| 460 } | 519 } |
| 461 | 520 |
| 462 // Inlined frames which push their arguments cause the index to be | 521 // Inlined frames which push their arguments cause the index to be |
| 463 // bumped and another stack area to be used for materialization. | 522 // bumped and another stack area to be used for materialization. |
| 464 if (environment->entry() != NULL && | 523 if (environment->entry() != NULL && |
| 465 environment->entry()->arguments_pushed()) { | 524 environment->entry()->arguments_pushed()) { |
| 466 *arguments_index = *arguments_index < 0 | 525 *arguments_index = *arguments_index < 0 |
| 467 ? GetStackSlotCount() | 526 ? GetStackSlotCount() |
| 468 : *arguments_index + *arguments_count; | 527 : *arguments_index + *arguments_count; |
| 469 *arguments_count = environment->entry()->arguments_count() + 1; | 528 *arguments_count = environment->entry()->arguments_count() + 1; |
| (...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 584 int argc, | 643 int argc, |
| 585 LInstruction* instr) { | 644 LInstruction* instr) { |
| 586 ASSERT(instr != NULL); | 645 ASSERT(instr != NULL); |
| 587 ASSERT(instr->HasPointerMap()); | 646 ASSERT(instr->HasPointerMap()); |
| 588 LPointerMap* pointers = instr->pointer_map(); | 647 LPointerMap* pointers = instr->pointer_map(); |
| 589 RecordPosition(pointers->position()); | 648 RecordPosition(pointers->position()); |
| 590 | 649 |
| 591 __ CallRuntime(fun, argc); | 650 __ CallRuntime(fun, argc); |
| 592 | 651 |
| 593 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); | 652 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); |
| 653 |
| 654 ASSERT(info()->is_calling()); |
| 594 } | 655 } |
| 595 | 656 |
| 596 | 657 |
| 597 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, | 658 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, |
| 598 int argc, | 659 int argc, |
| 599 LInstruction* instr, | 660 LInstruction* instr, |
| 600 LOperand* context) { | 661 LOperand* context) { |
| 601 if (context->IsRegister()) { | 662 if (context->IsRegister()) { |
| 602 if (!ToRegister(context).is(esi)) { | 663 if (!ToRegister(context).is(esi)) { |
| 603 __ mov(esi, ToRegister(context)); | 664 __ mov(esi, ToRegister(context)); |
| 604 } | 665 } |
| 605 } else if (context->IsStackSlot()) { | 666 } else if (context->IsStackSlot()) { |
| 606 __ mov(esi, ToOperand(context)); | 667 __ mov(esi, ToOperand(context)); |
| 607 } else if (context->IsConstantOperand()) { | 668 } else if (context->IsConstantOperand()) { |
| 608 HConstant* constant = | 669 HConstant* constant = |
| 609 chunk_->LookupConstant(LConstantOperand::cast(context)); | 670 chunk_->LookupConstant(LConstantOperand::cast(context)); |
| 610 __ LoadHeapObject(esi, Handle<Context>::cast(constant->handle())); | 671 __ LoadHeapObject(esi, Handle<Context>::cast(constant->handle())); |
| 611 } else { | 672 } else { |
| 612 UNREACHABLE(); | 673 UNREACHABLE(); |
| 613 } | 674 } |
| 614 | 675 |
| 615 __ CallRuntimeSaveDoubles(id); | 676 __ CallRuntimeSaveDoubles(id); |
| 616 RecordSafepointWithRegisters( | 677 RecordSafepointWithRegisters( |
| 617 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); | 678 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); |
| 679 |
| 680 ASSERT(info()->is_calling()); |
| 618 } | 681 } |
| 619 | 682 |
| 620 | 683 |
| 621 void LCodeGen::RegisterEnvironmentForDeoptimization( | 684 void LCodeGen::RegisterEnvironmentForDeoptimization( |
| 622 LEnvironment* environment, Safepoint::DeoptMode mode) { | 685 LEnvironment* environment, Safepoint::DeoptMode mode) { |
| 623 if (!environment->HasBeenRegistered()) { | 686 if (!environment->HasBeenRegistered()) { |
| 624 // Physical stack frame layout: | 687 // Physical stack frame layout: |
| 625 // -x ............. -4 0 ..................................... y | 688 // -x ............. -4 0 ..................................... y |
| 626 // [incoming arguments] [spill slots] [pushed outgoing arguments] | 689 // [incoming arguments] [spill slots] [pushed outgoing arguments] |
| 627 | 690 |
| (...skipping 25 matching lines...) Expand all Loading... |
| 653 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); | 716 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); |
| 654 deoptimizations_.Add(environment, zone()); | 717 deoptimizations_.Add(environment, zone()); |
| 655 } | 718 } |
| 656 } | 719 } |
| 657 | 720 |
| 658 | 721 |
| 659 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { | 722 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { |
| 660 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 723 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
| 661 ASSERT(environment->HasBeenRegistered()); | 724 ASSERT(environment->HasBeenRegistered()); |
| 662 int id = environment->deoptimization_index(); | 725 int id = environment->deoptimization_index(); |
| 663 Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER); | 726 ASSERT(info()->IsOptimizing() || info()->IsStub()); |
| 727 Deoptimizer::BailoutType bailout_type = frame_is_built_ |
| 728 ? Deoptimizer::EAGER |
| 729 : Deoptimizer::LAZY; |
| 730 Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type); |
| 664 if (entry == NULL) { | 731 if (entry == NULL) { |
| 665 Abort("bailout was not prepared"); | 732 Abort("bailout was not prepared"); |
| 666 return; | 733 return; |
| 667 } | 734 } |
| 668 | 735 |
| 669 if (FLAG_deopt_every_n_times != 0) { | 736 if (FLAG_deopt_every_n_times != 0) { |
| 670 Handle<SharedFunctionInfo> shared(info_->shared_info()); | 737 Handle<SharedFunctionInfo> shared(info_->shared_info()); |
| 671 Label no_deopt; | 738 Label no_deopt; |
| 672 __ pushfd(); | 739 __ pushfd(); |
| 673 __ push(eax); | 740 __ push(eax); |
| (...skipping 13 matching lines...) Expand all Loading... |
| 687 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); | 754 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); |
| 688 | 755 |
| 689 __ bind(&no_deopt); | 756 __ bind(&no_deopt); |
| 690 __ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset), | 757 __ mov(FieldOperand(ebx, SharedFunctionInfo::kStressDeoptCounterOffset), |
| 691 eax); | 758 eax); |
| 692 __ pop(ebx); | 759 __ pop(ebx); |
| 693 __ pop(eax); | 760 __ pop(eax); |
| 694 __ popfd(); | 761 __ popfd(); |
| 695 } | 762 } |
| 696 | 763 |
| 764 ASSERT(info()->IsStub() || frame_is_built_); |
| 697 if (cc == no_condition) { | 765 if (cc == no_condition) { |
| 698 if (FLAG_trap_on_deopt) __ int3(); | 766 if (FLAG_trap_on_deopt) __ int3(); |
| 699 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); | 767 if (frame_is_built_) { |
| 768 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); |
| 769 } else { |
| 770 __ call(entry, RelocInfo::RUNTIME_ENTRY); |
| 771 } |
| 700 } else { | 772 } else { |
| 701 if (FLAG_trap_on_deopt) { | 773 if (FLAG_trap_on_deopt) { |
| 702 Label done; | 774 Label done; |
| 703 __ j(NegateCondition(cc), &done, Label::kNear); | 775 __ j(NegateCondition(cc), &done, Label::kNear); |
| 704 __ int3(); | 776 __ int3(); |
| 705 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); | 777 if (frame_is_built_) { |
| 778 __ jmp(entry, RelocInfo::RUNTIME_ENTRY); |
| 779 } else { |
| 780 __ call(entry, RelocInfo::RUNTIME_ENTRY); |
| 781 } |
| 706 __ bind(&done); | 782 __ bind(&done); |
| 707 } else { | 783 } else { |
| 708 __ j(cc, entry, RelocInfo::RUNTIME_ENTRY); | 784 if (frame_is_built_) { |
| 785 __ j(cc, entry, RelocInfo::RUNTIME_ENTRY); |
| 786 } else { |
| 787 Label done; |
| 788 __ j(NegateCondition(cc), &done, Label::kNear); |
| 789 __ call(entry, RelocInfo::RUNTIME_ENTRY); |
| 790 __ bind(&done); |
| 791 } |
| 709 } | 792 } |
| 710 } | 793 } |
| 711 } | 794 } |
| 712 | 795 |
| 713 | 796 |
| 714 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { | 797 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { |
| 715 int length = deoptimizations_.length(); | 798 int length = deoptimizations_.length(); |
| 716 if (length == 0) return; | 799 if (length == 0) return; |
| 717 Handle<DeoptimizationInputData> data = | 800 Handle<DeoptimizationInputData> data = |
| 718 factory()->NewDeoptimizationInputData(length, TENURED); | 801 factory()->NewDeoptimizationInputData(length, TENURED); |
| (...skipping 681 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1400 // Use xor to produce +0.0 in a fast and compact way, but avoid to | 1483 // Use xor to produce +0.0 in a fast and compact way, but avoid to |
| 1401 // do so if the constant is -0.0. | 1484 // do so if the constant is -0.0. |
| 1402 if (BitCast<uint64_t, double>(v) == 0) { | 1485 if (BitCast<uint64_t, double>(v) == 0) { |
| 1403 __ xorps(res, res); | 1486 __ xorps(res, res); |
| 1404 } else { | 1487 } else { |
| 1405 Register temp = ToRegister(instr->temp()); | 1488 Register temp = ToRegister(instr->temp()); |
| 1406 uint64_t int_val = BitCast<uint64_t, double>(v); | 1489 uint64_t int_val = BitCast<uint64_t, double>(v); |
| 1407 int32_t lower = static_cast<int32_t>(int_val); | 1490 int32_t lower = static_cast<int32_t>(int_val); |
| 1408 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); | 1491 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); |
| 1409 if (CpuFeatures::IsSupported(SSE4_1)) { | 1492 if (CpuFeatures::IsSupported(SSE4_1)) { |
| 1410 CpuFeatures::Scope scope(SSE4_1); | 1493 CpuFeatures::Scope scope1(SSE2); |
| 1494 CpuFeatures::Scope scope2(SSE4_1); |
| 1411 if (lower != 0) { | 1495 if (lower != 0) { |
| 1412 __ Set(temp, Immediate(lower)); | 1496 __ Set(temp, Immediate(lower)); |
| 1413 __ movd(res, Operand(temp)); | 1497 __ movd(res, Operand(temp)); |
| 1414 __ Set(temp, Immediate(upper)); | 1498 __ Set(temp, Immediate(upper)); |
| 1415 __ pinsrd(res, Operand(temp), 1); | 1499 __ pinsrd(res, Operand(temp), 1); |
| 1416 } else { | 1500 } else { |
| 1417 __ xorps(res, res); | 1501 __ xorps(res, res); |
| 1418 __ Set(temp, Immediate(upper)); | 1502 __ Set(temp, Immediate(upper)); |
| 1419 __ pinsrd(res, Operand(temp), 1); | 1503 __ pinsrd(res, Operand(temp), 1); |
| 1420 } | 1504 } |
| 1421 } else { | 1505 } else { |
| 1506 ASSERT(CpuFeatures::IsSupported(SSE2)); |
| 1507 CpuFeatures::Scope scope(SSE2); |
| 1422 __ Set(temp, Immediate(upper)); | 1508 __ Set(temp, Immediate(upper)); |
| 1423 __ movd(res, Operand(temp)); | 1509 __ movd(res, Operand(temp)); |
| 1424 __ psllq(res, 32); | 1510 __ psllq(res, 32); |
| 1425 if (lower != 0) { | 1511 if (lower != 0) { |
| 1426 __ Set(temp, Immediate(lower)); | 1512 __ Set(temp, Immediate(lower)); |
| 1427 __ movd(xmm0, Operand(temp)); | 1513 __ movd(xmm0, Operand(temp)); |
| 1428 __ por(res, xmm0); | 1514 __ por(res, xmm0); |
| 1429 } | 1515 } |
| 1430 } | 1516 } |
| 1431 } | 1517 } |
| (...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1565 __ add(ToRegister(left), ToOperand(right)); | 1651 __ add(ToRegister(left), ToOperand(right)); |
| 1566 } | 1652 } |
| 1567 | 1653 |
| 1568 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { | 1654 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { |
| 1569 DeoptimizeIf(overflow, instr->environment()); | 1655 DeoptimizeIf(overflow, instr->environment()); |
| 1570 } | 1656 } |
| 1571 } | 1657 } |
| 1572 | 1658 |
| 1573 | 1659 |
| 1574 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { | 1660 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { |
| 1661 CpuFeatures::Scope scope(SSE2); |
| 1575 LOperand* left = instr->left(); | 1662 LOperand* left = instr->left(); |
| 1576 LOperand* right = instr->right(); | 1663 LOperand* right = instr->right(); |
| 1577 ASSERT(left->Equals(instr->result())); | 1664 ASSERT(left->Equals(instr->result())); |
| 1578 HMathMinMax::Operation operation = instr->hydrogen()->operation(); | 1665 HMathMinMax::Operation operation = instr->hydrogen()->operation(); |
| 1579 if (instr->hydrogen()->representation().IsInteger32()) { | 1666 if (instr->hydrogen()->representation().IsInteger32()) { |
| 1580 Label return_left; | 1667 Label return_left; |
| 1581 Condition condition = (operation == HMathMinMax::kMathMin) | 1668 Condition condition = (operation == HMathMinMax::kMathMin) |
| 1582 ? less_equal | 1669 ? less_equal |
| 1583 : greater_equal; | 1670 : greater_equal; |
| 1584 if (right->IsConstantOperand()) { | 1671 if (right->IsConstantOperand()) { |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1626 __ j(parity_even, &return_left, Label::kNear); // left == NaN. | 1713 __ j(parity_even, &return_left, Label::kNear); // left == NaN. |
| 1627 __ bind(&return_right); | 1714 __ bind(&return_right); |
| 1628 __ movsd(left_reg, right_reg); | 1715 __ movsd(left_reg, right_reg); |
| 1629 | 1716 |
| 1630 __ bind(&return_left); | 1717 __ bind(&return_left); |
| 1631 } | 1718 } |
| 1632 } | 1719 } |
| 1633 | 1720 |
| 1634 | 1721 |
| 1635 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { | 1722 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { |
| 1723 ASSERT(CpuFeatures::IsSupported(SSE2)); |
| 1724 CpuFeatures::Scope scope(SSE2); |
| 1636 XMMRegister left = ToDoubleRegister(instr->left()); | 1725 XMMRegister left = ToDoubleRegister(instr->left()); |
| 1637 XMMRegister right = ToDoubleRegister(instr->right()); | 1726 XMMRegister right = ToDoubleRegister(instr->right()); |
| 1638 XMMRegister result = ToDoubleRegister(instr->result()); | 1727 XMMRegister result = ToDoubleRegister(instr->result()); |
| 1639 // Modulo uses a fixed result register. | 1728 // Modulo uses a fixed result register. |
| 1640 ASSERT(instr->op() == Token::MOD || left.is(result)); | 1729 ASSERT(instr->op() == Token::MOD || left.is(result)); |
| 1641 switch (instr->op()) { | 1730 switch (instr->op()) { |
| 1642 case Token::ADD: | 1731 case Token::ADD: |
| 1643 __ addsd(left, right); | 1732 __ addsd(left, right); |
| 1644 break; | 1733 break; |
| 1645 case Token::SUB: | 1734 case Token::SUB: |
| 1646 __ subsd(left, right); | 1735 __ subsd(left, right); |
| 1647 break; | 1736 break; |
| 1648 case Token::MUL: | 1737 case Token::MUL: |
| 1649 __ mulsd(left, right); | 1738 __ mulsd(left, right); |
| 1650 break; | 1739 break; |
| 1651 case Token::DIV: | 1740 case Token::DIV: |
| 1652 __ divsd(left, right); | 1741 __ divsd(left, right); |
| 1653 break; | 1742 break; |
| 1654 case Token::MOD: { | 1743 case Token::MOD: { |
| 1655 // Pass two doubles as arguments on the stack. | 1744 // Pass two doubles as arguments on the stack. |
| 1656 __ PrepareCallCFunction(4, eax); | 1745 __ PrepareCallCFunction(4, eax); |
| 1657 __ movdbl(Operand(esp, 0 * kDoubleSize), left); | 1746 __ movdbl(Operand(esp, 0 * kDoubleSize), left); |
| (...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1710 } else { | 1799 } else { |
| 1711 __ j(cc, chunk_->GetAssemblyLabel(left_block)); | 1800 __ j(cc, chunk_->GetAssemblyLabel(left_block)); |
| 1712 __ jmp(chunk_->GetAssemblyLabel(right_block)); | 1801 __ jmp(chunk_->GetAssemblyLabel(right_block)); |
| 1713 } | 1802 } |
| 1714 } | 1803 } |
| 1715 | 1804 |
| 1716 | 1805 |
| 1717 void LCodeGen::DoBranch(LBranch* instr) { | 1806 void LCodeGen::DoBranch(LBranch* instr) { |
| 1718 int true_block = chunk_->LookupDestination(instr->true_block_id()); | 1807 int true_block = chunk_->LookupDestination(instr->true_block_id()); |
| 1719 int false_block = chunk_->LookupDestination(instr->false_block_id()); | 1808 int false_block = chunk_->LookupDestination(instr->false_block_id()); |
| 1809 CpuFeatures::Scope scope(SSE2); |
| 1720 | 1810 |
| 1721 Representation r = instr->hydrogen()->value()->representation(); | 1811 Representation r = instr->hydrogen()->value()->representation(); |
| 1722 if (r.IsInteger32()) { | 1812 if (r.IsInteger32()) { |
| 1723 Register reg = ToRegister(instr->value()); | 1813 Register reg = ToRegister(instr->value()); |
| 1724 __ test(reg, Operand(reg)); | 1814 __ test(reg, Operand(reg)); |
| 1725 EmitBranch(true_block, false_block, not_zero); | 1815 EmitBranch(true_block, false_block, not_zero); |
| 1726 } else if (r.IsDouble()) { | 1816 } else if (r.IsDouble()) { |
| 1727 XMMRegister reg = ToDoubleRegister(instr->value()); | 1817 XMMRegister reg = ToDoubleRegister(instr->value()); |
| 1728 __ xorps(xmm0, xmm0); | 1818 __ xorps(xmm0, xmm0); |
| 1729 __ ucomisd(reg, xmm0); | 1819 __ ucomisd(reg, xmm0); |
| (...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1869 return cond; | 1959 return cond; |
| 1870 } | 1960 } |
| 1871 | 1961 |
| 1872 | 1962 |
| 1873 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { | 1963 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { |
| 1874 LOperand* left = instr->left(); | 1964 LOperand* left = instr->left(); |
| 1875 LOperand* right = instr->right(); | 1965 LOperand* right = instr->right(); |
| 1876 int false_block = chunk_->LookupDestination(instr->false_block_id()); | 1966 int false_block = chunk_->LookupDestination(instr->false_block_id()); |
| 1877 int true_block = chunk_->LookupDestination(instr->true_block_id()); | 1967 int true_block = chunk_->LookupDestination(instr->true_block_id()); |
| 1878 Condition cc = TokenToCondition(instr->op(), instr->is_double()); | 1968 Condition cc = TokenToCondition(instr->op(), instr->is_double()); |
| 1969 CpuFeatures::Scope scope(SSE2); |
| 1879 | 1970 |
| 1880 if (left->IsConstantOperand() && right->IsConstantOperand()) { | 1971 if (left->IsConstantOperand() && right->IsConstantOperand()) { |
| 1881 // We can statically evaluate the comparison. | 1972 // We can statically evaluate the comparison. |
| 1882 double left_val = ToDouble(LConstantOperand::cast(left)); | 1973 double left_val = ToDouble(LConstantOperand::cast(left)); |
| 1883 double right_val = ToDouble(LConstantOperand::cast(right)); | 1974 double right_val = ToDouble(LConstantOperand::cast(right)); |
| 1884 int next_block = | 1975 int next_block = |
| 1885 EvalComparison(instr->op(), left_val, right_val) ? true_block | 1976 EvalComparison(instr->op(), left_val, right_val) ? true_block |
| 1886 : false_block; | 1977 : false_block; |
| 1887 EmitGoto(next_block); | 1978 EmitGoto(next_block); |
| 1888 } else { | 1979 } else { |
| (...skipping 489 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2378 __ j(condition, &true_value, Label::kNear); | 2469 __ j(condition, &true_value, Label::kNear); |
| 2379 __ mov(ToRegister(instr->result()), factory()->false_value()); | 2470 __ mov(ToRegister(instr->result()), factory()->false_value()); |
| 2380 __ jmp(&done, Label::kNear); | 2471 __ jmp(&done, Label::kNear); |
| 2381 __ bind(&true_value); | 2472 __ bind(&true_value); |
| 2382 __ mov(ToRegister(instr->result()), factory()->true_value()); | 2473 __ mov(ToRegister(instr->result()), factory()->true_value()); |
| 2383 __ bind(&done); | 2474 __ bind(&done); |
| 2384 } | 2475 } |
| 2385 | 2476 |
| 2386 | 2477 |
| 2387 void LCodeGen::DoReturn(LReturn* instr) { | 2478 void LCodeGen::DoReturn(LReturn* instr) { |
| 2388 if (FLAG_trace) { | 2479 if (FLAG_trace && info()->IsOptimizing()) { |
| 2389 // Preserve the return value on the stack and rely on the runtime call | 2480 // Preserve the return value on the stack and rely on the runtime call |
| 2390 // to return the value in the same register. We're leaving the code | 2481 // to return the value in the same register. We're leaving the code |
| 2391 // managed by the register allocator and tearing down the frame, it's | 2482 // managed by the register allocator and tearing down the frame, it's |
| 2392 // safe to write to the context register. | 2483 // safe to write to the context register. |
| 2393 __ push(eax); | 2484 __ push(eax); |
| 2394 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); | 2485 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); |
| 2395 __ CallRuntime(Runtime::kTraceExit, 1); | 2486 __ CallRuntime(Runtime::kTraceExit, 1); |
| 2396 } | 2487 } |
| 2397 if (dynamic_frame_alignment_) { | 2488 if (dynamic_frame_alignment_) { |
| 2398 // Fetch the state of the dynamic frame alignment. | 2489 // Fetch the state of the dynamic frame alignment. |
| 2399 __ mov(edx, Operand(ebp, | 2490 __ mov(edx, Operand(ebp, |
| 2400 JavaScriptFrameConstants::kDynamicAlignmentStateOffset)); | 2491 JavaScriptFrameConstants::kDynamicAlignmentStateOffset)); |
| 2401 } | 2492 } |
| 2402 __ mov(esp, ebp); | 2493 if (NeedsEagerFrame()) { |
| 2403 __ pop(ebp); | 2494 __ mov(esp, ebp); |
| 2495 __ pop(ebp); |
| 2496 } |
| 2404 if (dynamic_frame_alignment_) { | 2497 if (dynamic_frame_alignment_) { |
| 2405 Label no_padding; | 2498 Label no_padding; |
| 2406 __ cmp(edx, Immediate(kNoAlignmentPadding)); | 2499 __ cmp(edx, Immediate(kNoAlignmentPadding)); |
| 2407 __ j(equal, &no_padding); | 2500 __ j(equal, &no_padding); |
| 2408 if (FLAG_debug_code) { | 2501 if (FLAG_debug_code) { |
| 2409 __ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize), | 2502 __ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize), |
| 2410 Immediate(kAlignmentZapValue)); | 2503 Immediate(kAlignmentZapValue)); |
| 2411 __ Assert(equal, "expected alignment marker"); | 2504 __ Assert(equal, "expected alignment marker"); |
| 2412 } | 2505 } |
| 2413 __ Ret((GetParameterCount() + 2) * kPointerSize, ecx); | 2506 __ Ret((GetParameterCount() + 2) * kPointerSize, ecx); |
| 2414 __ bind(&no_padding); | 2507 __ bind(&no_padding); |
| 2415 } | 2508 } |
| 2416 __ Ret((GetParameterCount() + 1) * kPointerSize, ecx); | 2509 if (info()->IsStub()) { |
| 2510 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); |
| 2511 __ Ret(); |
| 2512 } else { |
| 2513 __ Ret((GetParameterCount() + 1) * kPointerSize, ecx); |
| 2514 } |
| 2417 } | 2515 } |
| 2418 | 2516 |
| 2419 | 2517 |
| 2420 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { | 2518 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { |
| 2421 Register result = ToRegister(instr->result()); | 2519 Register result = ToRegister(instr->result()); |
| 2422 __ mov(result, Operand::Cell(instr->hydrogen()->cell())); | 2520 __ mov(result, Operand::Cell(instr->hydrogen()->cell())); |
| 2423 if (instr->hydrogen()->RequiresHoleCheck()) { | 2521 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2424 __ cmp(result, factory()->the_hole_value()); | 2522 __ cmp(result, factory()->the_hole_value()); |
| 2425 DeoptimizeIf(equal, instr->environment()); | 2523 DeoptimizeIf(equal, instr->environment()); |
| 2426 } | 2524 } |
| (...skipping 355 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2782 __ SmiUntag(ToRegister(key)); | 2880 __ SmiUntag(ToRegister(key)); |
| 2783 } | 2881 } |
| 2784 Operand operand(BuildFastArrayOperand( | 2882 Operand operand(BuildFastArrayOperand( |
| 2785 instr->elements(), | 2883 instr->elements(), |
| 2786 key, | 2884 key, |
| 2787 instr->hydrogen()->key()->representation(), | 2885 instr->hydrogen()->key()->representation(), |
| 2788 elements_kind, | 2886 elements_kind, |
| 2789 0, | 2887 0, |
| 2790 instr->additional_index())); | 2888 instr->additional_index())); |
| 2791 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { | 2889 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
| 2792 XMMRegister result(ToDoubleRegister(instr->result())); | 2890 if (CpuFeatures::IsSupported(SSE2)) { |
| 2793 __ movss(result, operand); | 2891 CpuFeatures::Scope scope(SSE2); |
| 2794 __ cvtss2sd(result, result); | 2892 XMMRegister result(ToDoubleRegister(instr->result())); |
| 2893 __ movss(result, operand); |
| 2894 __ cvtss2sd(result, result); |
| 2895 } else { |
| 2896 __ fld_s(operand); |
| 2897 HandleX87FPReturnValue(instr); |
| 2898 } |
| 2795 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { | 2899 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { |
| 2796 __ movdbl(ToDoubleRegister(instr->result()), operand); | 2900 if (CpuFeatures::IsSupported(SSE2)) { |
| 2901 CpuFeatures::Scope scope(SSE2); |
| 2902 __ movdbl(ToDoubleRegister(instr->result()), operand); |
| 2903 } else { |
| 2904 __ fld_d(operand); |
| 2905 HandleX87FPReturnValue(instr); |
| 2906 } |
| 2797 } else { | 2907 } else { |
| 2798 Register result(ToRegister(instr->result())); | 2908 Register result(ToRegister(instr->result())); |
| 2799 switch (elements_kind) { | 2909 switch (elements_kind) { |
| 2800 case EXTERNAL_BYTE_ELEMENTS: | 2910 case EXTERNAL_BYTE_ELEMENTS: |
| 2801 __ movsx_b(result, operand); | 2911 __ movsx_b(result, operand); |
| 2802 break; | 2912 break; |
| 2803 case EXTERNAL_PIXEL_ELEMENTS: | 2913 case EXTERNAL_PIXEL_ELEMENTS: |
| 2804 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: | 2914 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: |
| 2805 __ movzx_b(result, operand); | 2915 __ movzx_b(result, operand); |
| 2806 break; | 2916 break; |
| (...skipping 23 matching lines...) Expand all Loading... |
| 2830 case FAST_HOLEY_DOUBLE_ELEMENTS: | 2940 case FAST_HOLEY_DOUBLE_ELEMENTS: |
| 2831 case DICTIONARY_ELEMENTS: | 2941 case DICTIONARY_ELEMENTS: |
| 2832 case NON_STRICT_ARGUMENTS_ELEMENTS: | 2942 case NON_STRICT_ARGUMENTS_ELEMENTS: |
| 2833 UNREACHABLE(); | 2943 UNREACHABLE(); |
| 2834 break; | 2944 break; |
| 2835 } | 2945 } |
| 2836 } | 2946 } |
| 2837 } | 2947 } |
| 2838 | 2948 |
| 2839 | 2949 |
| 2950 void LCodeGen::HandleX87FPReturnValue(LInstruction* instr) { |
| 2951 if (IsX87TopOfStack(instr->result())) { |
| 2952 // Return value is already on stack. If the value has no uses, then |
| 2953 // pop it off the FP stack. Otherwise, make sure that there are enough |
| 2954 // copies of the value on the stack to feed all of the usages, e.g. |
| 2955 // when the following instruction uses the return value in multiple |
| 2956 // inputs. |
| 2957 int count = instr->hydrogen_value()->UseCount(); |
| 2958 if (count == 0) { |
| 2959 __ fstp(0); |
| 2960 } else { |
| 2961 count--; |
| 2962 ASSERT(count <= 7); |
| 2963 while (count-- > 0) { |
| 2964 __ fld(0); |
| 2965 } |
| 2966 } |
| 2967 } else { |
| 2968 __ fstp_d(ToOperand(instr->result())); |
| 2969 } |
| 2970 } |
| 2971 |
| 2972 |
| 2840 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { | 2973 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { |
| 2841 XMMRegister result = ToDoubleRegister(instr->result()); | |
| 2842 | |
| 2843 if (instr->hydrogen()->RequiresHoleCheck()) { | 2974 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2844 int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + | 2975 int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + |
| 2845 sizeof(kHoleNanLower32); | 2976 sizeof(kHoleNanLower32); |
| 2846 Operand hole_check_operand = BuildFastArrayOperand( | 2977 Operand hole_check_operand = BuildFastArrayOperand( |
| 2847 instr->elements(), instr->key(), | 2978 instr->elements(), instr->key(), |
| 2848 instr->hydrogen()->key()->representation(), | 2979 instr->hydrogen()->key()->representation(), |
| 2849 FAST_DOUBLE_ELEMENTS, | 2980 FAST_DOUBLE_ELEMENTS, |
| 2850 offset, | 2981 offset, |
| 2851 instr->additional_index()); | 2982 instr->additional_index()); |
| 2852 __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); | 2983 __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); |
| 2853 DeoptimizeIf(equal, instr->environment()); | 2984 DeoptimizeIf(equal, instr->environment()); |
| 2854 } | 2985 } |
| 2855 | 2986 |
| 2856 Operand double_load_operand = BuildFastArrayOperand( | 2987 Operand double_load_operand = BuildFastArrayOperand( |
| 2857 instr->elements(), | 2988 instr->elements(), |
| 2858 instr->key(), | 2989 instr->key(), |
| 2859 instr->hydrogen()->key()->representation(), | 2990 instr->hydrogen()->key()->representation(), |
| 2860 FAST_DOUBLE_ELEMENTS, | 2991 FAST_DOUBLE_ELEMENTS, |
| 2861 FixedDoubleArray::kHeaderSize - kHeapObjectTag, | 2992 FixedDoubleArray::kHeaderSize - kHeapObjectTag, |
| 2862 instr->additional_index()); | 2993 instr->additional_index()); |
| 2863 __ movdbl(result, double_load_operand); | 2994 if (CpuFeatures::IsSupported(SSE2)) { |
| 2995 CpuFeatures::Scope scope(SSE2); |
| 2996 XMMRegister result = ToDoubleRegister(instr->result()); |
| 2997 __ movdbl(result, double_load_operand); |
| 2998 } else { |
| 2999 __ fld_d(double_load_operand); |
| 3000 HandleX87FPReturnValue(instr); |
| 3001 } |
| 2864 } | 3002 } |
| 2865 | 3003 |
| 2866 | 3004 |
| 2867 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { | 3005 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { |
| 2868 Register result = ToRegister(instr->result()); | 3006 Register result = ToRegister(instr->result()); |
| 2869 | 3007 |
| 2870 // Load the result. | 3008 // Load the result. |
| 2871 __ mov(result, | 3009 __ mov(result, |
| 2872 BuildFastArrayOperand(instr->elements(), | 3010 BuildFastArrayOperand(instr->elements(), |
| 2873 instr->key(), | 3011 instr->key(), |
| (...skipping 395 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3269 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); | 3407 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); |
| 3270 } | 3408 } |
| 3271 virtual LInstruction* instr() { return instr_; } | 3409 virtual LInstruction* instr() { return instr_; } |
| 3272 private: | 3410 private: |
| 3273 LUnaryMathOperation* instr_; | 3411 LUnaryMathOperation* instr_; |
| 3274 }; | 3412 }; |
| 3275 | 3413 |
| 3276 ASSERT(instr->value()->Equals(instr->result())); | 3414 ASSERT(instr->value()->Equals(instr->result())); |
| 3277 Representation r = instr->hydrogen()->value()->representation(); | 3415 Representation r = instr->hydrogen()->value()->representation(); |
| 3278 | 3416 |
| 3417 CpuFeatures::Scope scope(SSE2); |
| 3279 if (r.IsDouble()) { | 3418 if (r.IsDouble()) { |
| 3280 XMMRegister scratch = xmm0; | 3419 XMMRegister scratch = xmm0; |
| 3281 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3420 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
| 3282 __ xorps(scratch, scratch); | 3421 __ xorps(scratch, scratch); |
| 3283 __ subsd(scratch, input_reg); | 3422 __ subsd(scratch, input_reg); |
| 3284 __ pand(input_reg, scratch); | 3423 __ pand(input_reg, scratch); |
| 3285 } else if (r.IsInteger32()) { | 3424 } else if (r.IsInteger32()) { |
| 3286 EmitIntegerMathAbs(instr); | 3425 EmitIntegerMathAbs(instr); |
| 3287 } else { // Tagged case. | 3426 } else { // Tagged case. |
| 3288 DeferredMathAbsTaggedHeapNumber* deferred = | 3427 DeferredMathAbsTaggedHeapNumber* deferred = |
| 3289 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); | 3428 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); |
| 3290 Register input_reg = ToRegister(instr->value()); | 3429 Register input_reg = ToRegister(instr->value()); |
| 3291 // Smi check. | 3430 // Smi check. |
| 3292 __ JumpIfNotSmi(input_reg, deferred->entry()); | 3431 __ JumpIfNotSmi(input_reg, deferred->entry()); |
| 3293 EmitIntegerMathAbs(instr); | 3432 EmitIntegerMathAbs(instr); |
| 3294 __ bind(deferred->exit()); | 3433 __ bind(deferred->exit()); |
| 3295 } | 3434 } |
| 3296 } | 3435 } |
| 3297 | 3436 |
| 3298 | 3437 |
| 3299 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { | 3438 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { |
| 3439 CpuFeatures::Scope scope(SSE2); |
| 3300 XMMRegister xmm_scratch = xmm0; | 3440 XMMRegister xmm_scratch = xmm0; |
| 3301 Register output_reg = ToRegister(instr->result()); | 3441 Register output_reg = ToRegister(instr->result()); |
| 3302 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3442 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
| 3303 | 3443 |
| 3304 if (CpuFeatures::IsSupported(SSE4_1)) { | 3444 if (CpuFeatures::IsSupported(SSE4_1)) { |
| 3305 CpuFeatures::Scope scope(SSE4_1); | 3445 CpuFeatures::Scope scope(SSE4_1); |
| 3306 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3446 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3307 // Deoptimize on negative zero. | 3447 // Deoptimize on negative zero. |
| 3308 Label non_zero; | 3448 Label non_zero; |
| 3309 __ xorps(xmm_scratch, xmm_scratch); // Zero the register. | 3449 __ xorps(xmm_scratch, xmm_scratch); // Zero the register. |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3354 __ ucomisd(input_reg, xmm_scratch); | 3494 __ ucomisd(input_reg, xmm_scratch); |
| 3355 __ j(equal, &done, Label::kNear); | 3495 __ j(equal, &done, Label::kNear); |
| 3356 __ sub(output_reg, Immediate(1)); | 3496 __ sub(output_reg, Immediate(1)); |
| 3357 DeoptimizeIf(overflow, instr->environment()); | 3497 DeoptimizeIf(overflow, instr->environment()); |
| 3358 | 3498 |
| 3359 __ bind(&done); | 3499 __ bind(&done); |
| 3360 } | 3500 } |
| 3361 } | 3501 } |
| 3362 | 3502 |
| 3363 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { | 3503 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { |
| 3504 CpuFeatures::Scope scope(SSE2); |
| 3364 XMMRegister xmm_scratch = xmm0; | 3505 XMMRegister xmm_scratch = xmm0; |
| 3365 Register output_reg = ToRegister(instr->result()); | 3506 Register output_reg = ToRegister(instr->result()); |
| 3366 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3507 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
| 3367 | 3508 |
| 3368 Label below_half, done; | 3509 Label below_half, done; |
| 3369 // xmm_scratch = 0.5 | 3510 // xmm_scratch = 0.5 |
| 3370 ExternalReference one_half = ExternalReference::address_of_one_half(); | 3511 ExternalReference one_half = ExternalReference::address_of_one_half(); |
| 3371 __ movdbl(xmm_scratch, Operand::StaticVariable(one_half)); | 3512 __ movdbl(xmm_scratch, Operand::StaticVariable(one_half)); |
| 3372 __ ucomisd(xmm_scratch, input_reg); | 3513 __ ucomisd(xmm_scratch, input_reg); |
| 3373 __ j(above, &below_half); | 3514 __ j(above, &below_half); |
| (...skipping 25 matching lines...) Expand all Loading... |
| 3399 __ cvtss2sd(xmm_scratch, xmm_scratch); | 3540 __ cvtss2sd(xmm_scratch, xmm_scratch); |
| 3400 __ ucomisd(input_reg, xmm_scratch); | 3541 __ ucomisd(input_reg, xmm_scratch); |
| 3401 DeoptimizeIf(below, instr->environment()); | 3542 DeoptimizeIf(below, instr->environment()); |
| 3402 } | 3543 } |
| 3403 __ Set(output_reg, Immediate(0)); | 3544 __ Set(output_reg, Immediate(0)); |
| 3404 __ bind(&done); | 3545 __ bind(&done); |
| 3405 } | 3546 } |
| 3406 | 3547 |
| 3407 | 3548 |
| 3408 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { | 3549 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { |
| 3550 CpuFeatures::Scope scope(SSE2); |
| 3409 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3551 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
| 3410 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); | 3552 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); |
| 3411 __ sqrtsd(input_reg, input_reg); | 3553 __ sqrtsd(input_reg, input_reg); |
| 3412 } | 3554 } |
| 3413 | 3555 |
| 3414 | 3556 |
| 3415 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { | 3557 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { |
| 3558 CpuFeatures::Scope scope(SSE2); |
| 3416 XMMRegister xmm_scratch = xmm0; | 3559 XMMRegister xmm_scratch = xmm0; |
| 3417 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3560 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
| 3418 Register scratch = ToRegister(instr->temp()); | 3561 Register scratch = ToRegister(instr->temp()); |
| 3419 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); | 3562 ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); |
| 3420 | 3563 |
| 3421 // Note that according to ECMA-262 15.8.2.13: | 3564 // Note that according to ECMA-262 15.8.2.13: |
| 3422 // Math.pow(-Infinity, 0.5) == Infinity | 3565 // Math.pow(-Infinity, 0.5) == Infinity |
| 3423 // Math.sqrt(-Infinity) == NaN | 3566 // Math.sqrt(-Infinity) == NaN |
| 3424 Label done, sqrt; | 3567 Label done, sqrt; |
| 3425 // Check base for -Infinity. According to IEEE-754, single-precision | 3568 // Check base for -Infinity. According to IEEE-754, single-precision |
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3482 DeferredDoRandom(LCodeGen* codegen, LRandom* instr) | 3625 DeferredDoRandom(LCodeGen* codegen, LRandom* instr) |
| 3483 : LDeferredCode(codegen), instr_(instr) { } | 3626 : LDeferredCode(codegen), instr_(instr) { } |
| 3484 virtual void Generate() { codegen()->DoDeferredRandom(instr_); } | 3627 virtual void Generate() { codegen()->DoDeferredRandom(instr_); } |
| 3485 virtual LInstruction* instr() { return instr_; } | 3628 virtual LInstruction* instr() { return instr_; } |
| 3486 private: | 3629 private: |
| 3487 LRandom* instr_; | 3630 LRandom* instr_; |
| 3488 }; | 3631 }; |
| 3489 | 3632 |
| 3490 DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr); | 3633 DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr); |
| 3491 | 3634 |
| 3635 CpuFeatures::Scope scope(SSE2); |
| 3492 // Having marked this instruction as a call we can use any | 3636 // Having marked this instruction as a call we can use any |
| 3493 // registers. | 3637 // registers. |
| 3494 ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); | 3638 ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); |
| 3495 ASSERT(ToRegister(instr->global_object()).is(eax)); | 3639 ASSERT(ToRegister(instr->global_object()).is(eax)); |
| 3496 // Assert that the register size is indeed the size of each seed. | 3640 // Assert that the register size is indeed the size of each seed. |
| 3497 static const int kSeedSize = sizeof(uint32_t); | 3641 static const int kSeedSize = sizeof(uint32_t); |
| 3498 STATIC_ASSERT(kPointerSize == kSeedSize); | 3642 STATIC_ASSERT(kPointerSize == kSeedSize); |
| 3499 | 3643 |
| 3500 __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset)); | 3644 __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset)); |
| 3501 static const int kRandomSeedOffset = | 3645 static const int kRandomSeedOffset = |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3549 | 3693 |
| 3550 void LCodeGen::DoDeferredRandom(LRandom* instr) { | 3694 void LCodeGen::DoDeferredRandom(LRandom* instr) { |
| 3551 __ PrepareCallCFunction(1, ebx); | 3695 __ PrepareCallCFunction(1, ebx); |
| 3552 __ mov(Operand(esp, 0), eax); | 3696 __ mov(Operand(esp, 0), eax); |
| 3553 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); | 3697 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); |
| 3554 // Return value is in eax. | 3698 // Return value is in eax. |
| 3555 } | 3699 } |
| 3556 | 3700 |
| 3557 | 3701 |
| 3558 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { | 3702 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { |
| 3703 CpuFeatures::Scope scope(SSE2); |
| 3559 ASSERT(instr->value()->Equals(instr->result())); | 3704 ASSERT(instr->value()->Equals(instr->result())); |
| 3560 XMMRegister input_reg = ToDoubleRegister(instr->value()); | 3705 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
| 3561 Label positive, done, zero; | 3706 Label positive, done, zero; |
| 3562 __ xorps(xmm0, xmm0); | 3707 __ xorps(xmm0, xmm0); |
| 3563 __ ucomisd(input_reg, xmm0); | 3708 __ ucomisd(input_reg, xmm0); |
| 3564 __ j(above, &positive, Label::kNear); | 3709 __ j(above, &positive, Label::kNear); |
| 3565 __ j(equal, &zero, Label::kNear); | 3710 __ j(equal, &zero, Label::kNear); |
| 3566 ExternalReference nan = | 3711 ExternalReference nan = |
| 3567 ExternalReference::address_of_canonical_non_hole_nan(); | 3712 ExternalReference::address_of_canonical_non_hole_nan(); |
| 3568 __ movdbl(input_reg, Operand::StaticVariable(nan)); | 3713 __ movdbl(input_reg, Operand::StaticVariable(nan)); |
| (...skipping 269 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3838 int constant_index = | 3983 int constant_index = |
| 3839 ToInteger32(LConstantOperand::cast(instr->index())); | 3984 ToInteger32(LConstantOperand::cast(instr->index())); |
| 3840 if (instr->hydrogen()->length()->representation().IsTagged()) { | 3985 if (instr->hydrogen()->length()->representation().IsTagged()) { |
| 3841 __ cmp(ToOperand(instr->length()), | 3986 __ cmp(ToOperand(instr->length()), |
| 3842 Immediate(Smi::FromInt(constant_index))); | 3987 Immediate(Smi::FromInt(constant_index))); |
| 3843 } else { | 3988 } else { |
| 3844 __ cmp(ToOperand(instr->length()), Immediate(constant_index)); | 3989 __ cmp(ToOperand(instr->length()), Immediate(constant_index)); |
| 3845 } | 3990 } |
| 3846 DeoptimizeIf(below_equal, instr->environment()); | 3991 DeoptimizeIf(below_equal, instr->environment()); |
| 3847 } else { | 3992 } else { |
| 3993 if (instr->hydrogen()->index()->representation().IsTagged() && |
| 3994 !instr->hydrogen()->index()->type().IsSmi()) { |
| 3995 __ test(ToRegister(instr->index()), Immediate(kSmiTagMask)); |
| 3996 DeoptimizeIf(not_zero, instr->environment()); |
| 3997 } |
| 3848 __ cmp(ToRegister(instr->index()), ToOperand(instr->length())); | 3998 __ cmp(ToRegister(instr->index()), ToOperand(instr->length())); |
| 3849 DeoptimizeIf(above_equal, instr->environment()); | 3999 DeoptimizeIf(above_equal, instr->environment()); |
| 3850 } | 4000 } |
| 3851 } | 4001 } |
| 3852 | 4002 |
| 3853 | 4003 |
| 3854 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { | 4004 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { |
| 3855 ElementsKind elements_kind = instr->elements_kind(); | 4005 ElementsKind elements_kind = instr->elements_kind(); |
| 3856 LOperand* key = instr->key(); | 4006 LOperand* key = instr->key(); |
| 3857 if (!key->IsConstantOperand() && | 4007 if (!key->IsConstantOperand() && |
| 3858 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(), | 4008 ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(), |
| 3859 elements_kind)) { | 4009 elements_kind)) { |
| 3860 __ SmiUntag(ToRegister(key)); | 4010 __ SmiUntag(ToRegister(key)); |
| 3861 } | 4011 } |
| 3862 Operand operand(BuildFastArrayOperand( | 4012 Operand operand(BuildFastArrayOperand( |
| 3863 instr->elements(), | 4013 instr->elements(), |
| 3864 key, | 4014 key, |
| 3865 instr->hydrogen()->key()->representation(), | 4015 instr->hydrogen()->key()->representation(), |
| 3866 elements_kind, | 4016 elements_kind, |
| 3867 0, | 4017 0, |
| 3868 instr->additional_index())); | 4018 instr->additional_index())); |
| 3869 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { | 4019 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
| 4020 CpuFeatures::Scope scope(SSE2); |
| 3870 __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value())); | 4021 __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value())); |
| 3871 __ movss(operand, xmm0); | 4022 __ movss(operand, xmm0); |
| 3872 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { | 4023 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { |
| 4024 CpuFeatures::Scope scope(SSE2); |
| 3873 __ movdbl(operand, ToDoubleRegister(instr->value())); | 4025 __ movdbl(operand, ToDoubleRegister(instr->value())); |
| 3874 } else { | 4026 } else { |
| 3875 Register value = ToRegister(instr->value()); | 4027 Register value = ToRegister(instr->value()); |
| 3876 switch (elements_kind) { | 4028 switch (elements_kind) { |
| 3877 case EXTERNAL_PIXEL_ELEMENTS: | 4029 case EXTERNAL_PIXEL_ELEMENTS: |
| 3878 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: | 4030 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: |
| 3879 case EXTERNAL_BYTE_ELEMENTS: | 4031 case EXTERNAL_BYTE_ELEMENTS: |
| 3880 __ mov_b(operand, value); | 4032 __ mov_b(operand, value); |
| 3881 break; | 4033 break; |
| 3882 case EXTERNAL_SHORT_ELEMENTS: | 4034 case EXTERNAL_SHORT_ELEMENTS: |
| (...skipping 15 matching lines...) Expand all Loading... |
| 3898 case DICTIONARY_ELEMENTS: | 4050 case DICTIONARY_ELEMENTS: |
| 3899 case NON_STRICT_ARGUMENTS_ELEMENTS: | 4051 case NON_STRICT_ARGUMENTS_ELEMENTS: |
| 3900 UNREACHABLE(); | 4052 UNREACHABLE(); |
| 3901 break; | 4053 break; |
| 3902 } | 4054 } |
| 3903 } | 4055 } |
| 3904 } | 4056 } |
| 3905 | 4057 |
| 3906 | 4058 |
| 3907 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { | 4059 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { |
| 4060 CpuFeatures::Scope scope(SSE2); |
| 3908 XMMRegister value = ToDoubleRegister(instr->value()); | 4061 XMMRegister value = ToDoubleRegister(instr->value()); |
| 3909 | 4062 |
| 3910 if (instr->NeedsCanonicalization()) { | 4063 if (instr->NeedsCanonicalization()) { |
| 3911 Label have_value; | 4064 Label have_value; |
| 3912 | 4065 |
| 3913 __ ucomisd(value, value); | 4066 __ ucomisd(value, value); |
| 3914 __ j(parity_odd, &have_value); // NaN. | 4067 __ j(parity_odd, &have_value); // NaN. |
| 3915 | 4068 |
| 3916 ExternalReference canonical_nan_reference = | 4069 ExternalReference canonical_nan_reference = |
| 3917 ExternalReference::address_of_canonical_non_hole_nan(); | 4070 ExternalReference::address_of_canonical_non_hole_nan(); |
| (...skipping 230 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4148 | 4301 |
| 4149 void LCodeGen::DoStringAdd(LStringAdd* instr) { | 4302 void LCodeGen::DoStringAdd(LStringAdd* instr) { |
| 4150 EmitPushTaggedOperand(instr->left()); | 4303 EmitPushTaggedOperand(instr->left()); |
| 4151 EmitPushTaggedOperand(instr->right()); | 4304 EmitPushTaggedOperand(instr->right()); |
| 4152 StringAddStub stub(NO_STRING_CHECK_IN_STUB); | 4305 StringAddStub stub(NO_STRING_CHECK_IN_STUB); |
| 4153 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 4306 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
| 4154 } | 4307 } |
| 4155 | 4308 |
| 4156 | 4309 |
| 4157 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { | 4310 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { |
| 4158 LOperand* input = instr->value(); | 4311 if (CpuFeatures::IsSupported(SSE2)) { |
| 4159 ASSERT(input->IsRegister() || input->IsStackSlot()); | 4312 CpuFeatures::Scope scope(SSE2); |
| 4160 LOperand* output = instr->result(); | 4313 LOperand* input = instr->value(); |
| 4161 ASSERT(output->IsDoubleRegister()); | 4314 ASSERT(input->IsRegister() || input->IsStackSlot()); |
| 4162 __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input)); | 4315 LOperand* output = instr->result(); |
| 4316 ASSERT(output->IsDoubleRegister()); |
| 4317 __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input)); |
| 4318 } else { |
| 4319 UNREACHABLE(); |
| 4320 } |
| 4163 } | 4321 } |
| 4164 | 4322 |
| 4165 | 4323 |
| 4166 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { | 4324 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { |
| 4325 CpuFeatures::Scope scope(SSE2); |
| 4167 LOperand* input = instr->value(); | 4326 LOperand* input = instr->value(); |
| 4168 LOperand* output = instr->result(); | 4327 LOperand* output = instr->result(); |
| 4169 LOperand* temp = instr->temp(); | 4328 LOperand* temp = instr->temp(); |
| 4170 | 4329 |
| 4171 __ LoadUint32(ToDoubleRegister(output), | 4330 __ LoadUint32(ToDoubleRegister(output), |
| 4172 ToRegister(input), | 4331 ToRegister(input), |
| 4173 ToDoubleRegister(temp)); | 4332 ToDoubleRegister(temp)); |
| 4174 } | 4333 } |
| 4175 | 4334 |
| 4176 | 4335 |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4234 PushSafepointRegistersScope scope(this); | 4393 PushSafepointRegistersScope scope(this); |
| 4235 | 4394 |
| 4236 Label done; | 4395 Label done; |
| 4237 | 4396 |
| 4238 if (signedness == SIGNED_INT32) { | 4397 if (signedness == SIGNED_INT32) { |
| 4239 // There was overflow, so bits 30 and 31 of the original integer | 4398 // There was overflow, so bits 30 and 31 of the original integer |
| 4240 // disagree. Try to allocate a heap number in new space and store | 4399 // disagree. Try to allocate a heap number in new space and store |
| 4241 // the value in there. If that fails, call the runtime system. | 4400 // the value in there. If that fails, call the runtime system. |
| 4242 __ SmiUntag(reg); | 4401 __ SmiUntag(reg); |
| 4243 __ xor_(reg, 0x80000000); | 4402 __ xor_(reg, 0x80000000); |
| 4244 __ cvtsi2sd(xmm0, Operand(reg)); | 4403 if (CpuFeatures::IsSupported(SSE2)) { |
| 4404 CpuFeatures::Scope feature_scope(SSE2); |
| 4405 __ cvtsi2sd(xmm0, Operand(reg)); |
| 4406 } else { |
| 4407 __ push(reg); |
| 4408 __ fild_s(Operand(esp, 0)); |
| 4409 __ pop(reg); |
| 4410 } |
| 4245 } else { | 4411 } else { |
| 4246 __ LoadUint32(xmm0, reg, xmm1); | 4412 if (CpuFeatures::IsSupported(SSE2)) { |
| 4413 CpuFeatures::Scope feature_scope(SSE2); |
| 4414 __ LoadUint32(xmm0, reg, xmm1); |
| 4415 } else { |
| 4416 UNREACHABLE(); |
| 4417 } |
| 4247 } | 4418 } |
| 4248 | 4419 |
| 4249 if (FLAG_inline_new) { | 4420 if (FLAG_inline_new) { |
| 4250 __ AllocateHeapNumber(reg, tmp, no_reg, &slow); | 4421 __ AllocateHeapNumber(reg, tmp, no_reg, &slow); |
| 4251 __ jmp(&done, Label::kNear); | 4422 __ jmp(&done, Label::kNear); |
| 4252 } | 4423 } |
| 4253 | 4424 |
| 4254 // Slow case: Call the runtime system to do the number allocation. | 4425 // Slow case: Call the runtime system to do the number allocation. |
| 4255 __ bind(&slow); | 4426 __ bind(&slow); |
| 4256 | 4427 |
| 4257 // TODO(3095996): Put a valid pointer value in the stack slot where the result | 4428 // TODO(3095996): Put a valid pointer value in the stack slot where the result |
| 4258 // register is stored, as this register is in the pointer map, but contains an | 4429 // register is stored, as this register is in the pointer map, but contains an |
| 4259 // integer value. | 4430 // integer value. |
| 4260 __ StoreToSafepointRegisterSlot(reg, Immediate(0)); | 4431 __ StoreToSafepointRegisterSlot(reg, Immediate(0)); |
| 4261 // NumberTagI and NumberTagD use the context from the frame, rather than | 4432 // NumberTagI and NumberTagD use the context from the frame, rather than |
| 4262 // the environment's HContext or HInlinedContext value. | 4433 // the environment's HContext or HInlinedContext value. |
| 4263 // They only call Runtime::kAllocateHeapNumber. | 4434 // They only call Runtime::kAllocateHeapNumber. |
| 4264 // The corresponding HChange instructions are added in a phase that does | 4435 // The corresponding HChange instructions are added in a phase that does |
| 4265 // not have easy access to the local context. | 4436 // not have easy access to the local context. |
| 4266 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); | 4437 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); |
| 4267 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); | 4438 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); |
| 4268 RecordSafepointWithRegisters( | 4439 RecordSafepointWithRegisters( |
| 4269 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); | 4440 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); |
| 4270 if (!reg.is(eax)) __ mov(reg, eax); | 4441 if (!reg.is(eax)) __ mov(reg, eax); |
| 4271 | 4442 |
| 4272 // Done. Put the value in xmm0 into the value of the allocated heap | 4443 // Done. Put the value in xmm0 into the value of the allocated heap |
| 4273 // number. | 4444 // number. |
| 4274 __ bind(&done); | 4445 __ bind(&done); |
| 4275 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0); | 4446 if (CpuFeatures::IsSupported(SSE2)) { |
| 4447 CpuFeatures::Scope feature_scope(SSE2); |
| 4448 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0); |
| 4449 } else { |
| 4450 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); |
| 4451 } |
| 4276 __ StoreToSafepointRegisterSlot(reg, reg); | 4452 __ StoreToSafepointRegisterSlot(reg, reg); |
| 4277 } | 4453 } |
| 4278 | 4454 |
| 4279 | 4455 |
| 4280 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { | 4456 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { |
| 4281 class DeferredNumberTagD: public LDeferredCode { | 4457 class DeferredNumberTagD: public LDeferredCode { |
| 4282 public: | 4458 public: |
| 4283 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) | 4459 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) |
| 4284 : LDeferredCode(codegen), instr_(instr) { } | 4460 : LDeferredCode(codegen), instr_(instr) { } |
| 4285 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } | 4461 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } |
| 4286 virtual LInstruction* instr() { return instr_; } | 4462 virtual LInstruction* instr() { return instr_; } |
| 4287 private: | 4463 private: |
| 4288 LNumberTagD* instr_; | 4464 LNumberTagD* instr_; |
| 4289 }; | 4465 }; |
| 4290 | 4466 |
| 4291 XMMRegister input_reg = ToDoubleRegister(instr->value()); | |
| 4292 Register reg = ToRegister(instr->result()); | 4467 Register reg = ToRegister(instr->result()); |
| 4293 Register tmp = ToRegister(instr->temp()); | 4468 Register tmp = ToRegister(instr->temp()); |
| 4294 | 4469 |
| 4295 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); | 4470 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); |
| 4296 if (FLAG_inline_new) { | 4471 if (FLAG_inline_new) { |
| 4297 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); | 4472 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); |
| 4298 } else { | 4473 } else { |
| 4299 __ jmp(deferred->entry()); | 4474 __ jmp(deferred->entry()); |
| 4300 } | 4475 } |
| 4301 __ bind(deferred->exit()); | 4476 __ bind(deferred->exit()); |
| 4302 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); | 4477 if (CpuFeatures::IsSupported(SSE2)) { |
| 4478 CpuFeatures::Scope scope(SSE2); |
| 4479 XMMRegister input_reg = ToDoubleRegister(instr->value()); |
| 4480 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); |
| 4481 } else { |
| 4482 if (!IsX87TopOfStack(instr->value())) { |
| 4483 __ fld_d(ToOperand(instr->value())); |
| 4484 } |
| 4485 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); |
| 4486 } |
| 4303 } | 4487 } |
| 4304 | 4488 |
| 4305 | 4489 |
| 4306 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { | 4490 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { |
| 4307 // TODO(3095996): Get rid of this. For now, we need to make the | 4491 // TODO(3095996): Get rid of this. For now, we need to make the |
| 4308 // result register contain a valid pointer because it is already | 4492 // result register contain a valid pointer because it is already |
| 4309 // contained in the register pointer map. | 4493 // contained in the register pointer map. |
| 4310 Register reg = ToRegister(instr->result()); | 4494 Register reg = ToRegister(instr->result()); |
| 4311 __ Set(reg, Immediate(0)); | 4495 __ Set(reg, Immediate(0)); |
| 4312 | 4496 |
| (...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4447 __ cmp(input_reg, 0x80000000u); | 4631 __ cmp(input_reg, 0x80000000u); |
| 4448 __ j(not_equal, &done); | 4632 __ j(not_equal, &done); |
| 4449 // Check if the input was 0x8000000 (kMinInt). | 4633 // Check if the input was 0x8000000 (kMinInt). |
| 4450 // If no, then we got an overflow and we deoptimize. | 4634 // If no, then we got an overflow and we deoptimize. |
| 4451 ExternalReference min_int = ExternalReference::address_of_min_int(); | 4635 ExternalReference min_int = ExternalReference::address_of_min_int(); |
| 4452 __ movdbl(xmm_temp, Operand::StaticVariable(min_int)); | 4636 __ movdbl(xmm_temp, Operand::StaticVariable(min_int)); |
| 4453 __ ucomisd(xmm_temp, xmm0); | 4637 __ ucomisd(xmm_temp, xmm0); |
| 4454 DeoptimizeIf(not_equal, instr->environment()); | 4638 DeoptimizeIf(not_equal, instr->environment()); |
| 4455 DeoptimizeIf(parity_even, instr->environment()); // NaN. | 4639 DeoptimizeIf(parity_even, instr->environment()); // NaN. |
| 4456 } | 4640 } |
| 4457 } else { | 4641 } else if (CpuFeatures::IsSupported(SSE2)) { |
| 4642 CpuFeatures::Scope scope(SSE2); |
| 4458 // Deoptimize if we don't have a heap number. | 4643 // Deoptimize if we don't have a heap number. |
| 4459 DeoptimizeIf(not_equal, instr->environment()); | 4644 DeoptimizeIf(not_equal, instr->environment()); |
| 4460 | 4645 |
| 4461 XMMRegister xmm_temp = ToDoubleRegister(instr->temp()); | 4646 XMMRegister xmm_temp = ToDoubleRegister(instr->temp()); |
| 4462 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 4647 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
| 4463 __ cvttsd2si(input_reg, Operand(xmm0)); | 4648 __ cvttsd2si(input_reg, Operand(xmm0)); |
| 4464 __ cvtsi2sd(xmm_temp, Operand(input_reg)); | 4649 __ cvtsi2sd(xmm_temp, Operand(input_reg)); |
| 4465 __ ucomisd(xmm0, xmm_temp); | 4650 __ ucomisd(xmm0, xmm_temp); |
| 4466 DeoptimizeIf(not_equal, instr->environment()); | 4651 DeoptimizeIf(not_equal, instr->environment()); |
| 4467 DeoptimizeIf(parity_even, instr->environment()); // NaN. | 4652 DeoptimizeIf(parity_even, instr->environment()); // NaN. |
| 4468 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 4653 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 4469 __ test(input_reg, Operand(input_reg)); | 4654 __ test(input_reg, Operand(input_reg)); |
| 4470 __ j(not_zero, &done); | 4655 __ j(not_zero, &done); |
| 4471 __ movmskpd(input_reg, xmm0); | 4656 __ movmskpd(input_reg, xmm0); |
| 4472 __ and_(input_reg, 1); | 4657 __ and_(input_reg, 1); |
| 4473 DeoptimizeIf(not_zero, instr->environment()); | 4658 DeoptimizeIf(not_zero, instr->environment()); |
| 4474 } | 4659 } |
| 4660 } else { |
| 4661 UNREACHABLE(); |
| 4475 } | 4662 } |
| 4476 __ bind(&done); | 4663 __ bind(&done); |
| 4477 } | 4664 } |
| 4478 | 4665 |
| 4479 | 4666 |
| 4480 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { | 4667 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { |
| 4481 class DeferredTaggedToI: public LDeferredCode { | 4668 class DeferredTaggedToI: public LDeferredCode { |
| 4482 public: | 4669 public: |
| 4483 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) | 4670 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) |
| 4484 : LDeferredCode(codegen), instr_(instr) { } | 4671 : LDeferredCode(codegen), instr_(instr) { } |
| (...skipping 22 matching lines...) Expand all Loading... |
| 4507 | 4694 |
| 4508 | 4695 |
| 4509 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { | 4696 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { |
| 4510 LOperand* input = instr->value(); | 4697 LOperand* input = instr->value(); |
| 4511 ASSERT(input->IsRegister()); | 4698 ASSERT(input->IsRegister()); |
| 4512 LOperand* temp = instr->temp(); | 4699 LOperand* temp = instr->temp(); |
| 4513 ASSERT(temp == NULL || temp->IsRegister()); | 4700 ASSERT(temp == NULL || temp->IsRegister()); |
| 4514 LOperand* result = instr->result(); | 4701 LOperand* result = instr->result(); |
| 4515 ASSERT(result->IsDoubleRegister()); | 4702 ASSERT(result->IsDoubleRegister()); |
| 4516 | 4703 |
| 4517 Register input_reg = ToRegister(input); | 4704 if (CpuFeatures::IsSupported(SSE2)) { |
| 4518 XMMRegister result_reg = ToDoubleRegister(result); | 4705 CpuFeatures::Scope scope(SSE2); |
| 4706 Register input_reg = ToRegister(input); |
| 4707 XMMRegister result_reg = ToDoubleRegister(result); |
| 4519 | 4708 |
| 4520 bool deoptimize_on_minus_zero = | 4709 bool deoptimize_on_minus_zero = |
| 4521 instr->hydrogen()->deoptimize_on_minus_zero(); | 4710 instr->hydrogen()->deoptimize_on_minus_zero(); |
| 4522 Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg; | 4711 Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg; |
| 4523 | 4712 |
| 4524 EmitNumberUntagD(input_reg, | 4713 EmitNumberUntagD(input_reg, |
| 4525 temp_reg, | 4714 temp_reg, |
| 4526 result_reg, | 4715 result_reg, |
| 4527 instr->hydrogen()->deoptimize_on_undefined(), | 4716 instr->hydrogen()->deoptimize_on_undefined(), |
| 4528 deoptimize_on_minus_zero, | 4717 deoptimize_on_minus_zero, |
| 4529 instr->environment()); | 4718 instr->environment()); |
| 4719 } else { |
| 4720 UNIMPLEMENTED(); |
| 4721 } |
| 4530 } | 4722 } |
| 4531 | 4723 |
| 4532 | 4724 |
| 4533 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { | 4725 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { |
| 4534 LOperand* input = instr->value(); | 4726 LOperand* input = instr->value(); |
| 4535 ASSERT(input->IsDoubleRegister()); | 4727 ASSERT(input->IsDoubleRegister()); |
| 4536 LOperand* result = instr->result(); | 4728 LOperand* result = instr->result(); |
| 4537 ASSERT(result->IsRegister()); | 4729 ASSERT(result->IsRegister()); |
| 4730 CpuFeatures::Scope scope(SSE2); |
| 4538 | 4731 |
| 4539 XMMRegister input_reg = ToDoubleRegister(input); | 4732 XMMRegister input_reg = ToDoubleRegister(input); |
| 4540 Register result_reg = ToRegister(result); | 4733 Register result_reg = ToRegister(result); |
| 4541 | 4734 |
| 4542 if (instr->truncating()) { | 4735 if (instr->truncating()) { |
| 4543 // Performs a truncating conversion of a floating point number as used by | 4736 // Performs a truncating conversion of a floating point number as used by |
| 4544 // the JS bitwise operations. | 4737 // the JS bitwise operations. |
| 4545 __ cvttsd2si(result_reg, Operand(input_reg)); | 4738 __ cvttsd2si(result_reg, Operand(input_reg)); |
| 4546 __ cmp(result_reg, 0x80000000u); | 4739 __ cmp(result_reg, 0x80000000u); |
| 4547 if (CpuFeatures::IsSupported(SSE3)) { | 4740 if (CpuFeatures::IsSupported(SSE3)) { |
| (...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4717 Operand operand = ToOperand(instr->value()); | 4910 Operand operand = ToOperand(instr->value()); |
| 4718 __ cmp(operand, target); | 4911 __ cmp(operand, target); |
| 4719 } | 4912 } |
| 4720 DeoptimizeIf(not_equal, instr->environment()); | 4913 DeoptimizeIf(not_equal, instr->environment()); |
| 4721 } | 4914 } |
| 4722 | 4915 |
| 4723 | 4916 |
| 4724 void LCodeGen::DoCheckMapCommon(Register reg, | 4917 void LCodeGen::DoCheckMapCommon(Register reg, |
| 4725 Handle<Map> map, | 4918 Handle<Map> map, |
| 4726 CompareMapMode mode, | 4919 CompareMapMode mode, |
| 4727 LEnvironment* env) { | 4920 LInstruction* instr) { |
| 4728 Label success; | 4921 Label success; |
| 4729 __ CompareMap(reg, map, &success, mode); | 4922 __ CompareMap(reg, map, &success, mode); |
| 4730 DeoptimizeIf(not_equal, env); | 4923 DeoptimizeIf(not_equal, instr->environment()); |
| 4731 __ bind(&success); | 4924 __ bind(&success); |
| 4732 } | 4925 } |
| 4733 | 4926 |
| 4734 | 4927 |
| 4735 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { | 4928 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
| 4736 LOperand* input = instr->value(); | 4929 LOperand* input = instr->value(); |
| 4737 ASSERT(input->IsRegister()); | 4930 ASSERT(input->IsRegister()); |
| 4738 Register reg = ToRegister(input); | 4931 Register reg = ToRegister(input); |
| 4739 | 4932 |
| 4740 Label success; | 4933 Label success; |
| 4741 SmallMapList* map_set = instr->hydrogen()->map_set(); | 4934 SmallMapList* map_set = instr->hydrogen()->map_set(); |
| 4742 for (int i = 0; i < map_set->length() - 1; i++) { | 4935 for (int i = 0; i < map_set->length() - 1; i++) { |
| 4743 Handle<Map> map = map_set->at(i); | 4936 Handle<Map> map = map_set->at(i); |
| 4744 __ CompareMap(reg, map, &success, REQUIRE_EXACT_MAP); | 4937 __ CompareMap(reg, map, &success, REQUIRE_EXACT_MAP); |
| 4745 __ j(equal, &success); | 4938 __ j(equal, &success); |
| 4746 } | 4939 } |
| 4747 Handle<Map> map = map_set->last(); | 4940 Handle<Map> map = map_set->last(); |
| 4748 DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment()); | 4941 DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr); |
| 4749 __ bind(&success); | 4942 __ bind(&success); |
| 4750 } | 4943 } |
| 4751 | 4944 |
| 4752 | 4945 |
| 4753 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | 4946 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
| 4947 CpuFeatures::Scope scope1(SSE2); |
| 4754 XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); | 4948 XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); |
| 4755 Register result_reg = ToRegister(instr->result()); | 4949 Register result_reg = ToRegister(instr->result()); |
| 4756 __ ClampDoubleToUint8(value_reg, xmm0, result_reg); | 4950 __ ClampDoubleToUint8(value_reg, xmm0, result_reg); |
| 4757 } | 4951 } |
| 4758 | 4952 |
| 4759 | 4953 |
| 4760 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { | 4954 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { |
| 4761 ASSERT(instr->unclamped()->Equals(instr->result())); | 4955 ASSERT(instr->unclamped()->Equals(instr->result())); |
| 4762 Register value_reg = ToRegister(instr->result()); | 4956 Register value_reg = ToRegister(instr->result()); |
| 4763 __ ClampUint8(value_reg); | 4957 __ ClampUint8(value_reg); |
| 4764 } | 4958 } |
| 4765 | 4959 |
| 4766 | 4960 |
| 4767 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { | 4961 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { |
| 4962 CpuFeatures::Scope scope1(SSE2); |
| 4963 |
| 4768 ASSERT(instr->unclamped()->Equals(instr->result())); | 4964 ASSERT(instr->unclamped()->Equals(instr->result())); |
| 4769 Register input_reg = ToRegister(instr->unclamped()); | 4965 Register input_reg = ToRegister(instr->unclamped()); |
| 4770 Label is_smi, done, heap_number; | 4966 Label is_smi, done, heap_number; |
| 4771 | 4967 |
| 4772 __ JumpIfSmi(input_reg, &is_smi); | 4968 __ JumpIfSmi(input_reg, &is_smi); |
| 4773 | 4969 |
| 4774 // Check for heap number | 4970 // Check for heap number |
| 4775 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), | 4971 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), |
| 4776 factory()->heap_number_map()); | 4972 factory()->heap_number_map()); |
| 4777 __ j(equal, &heap_number, Label::kNear); | 4973 __ j(equal, &heap_number, Label::kNear); |
| (...skipping 26 matching lines...) Expand all Loading... |
| 4804 | 5000 |
| 4805 Handle<JSObject> holder = instr->holder(); | 5001 Handle<JSObject> holder = instr->holder(); |
| 4806 Handle<JSObject> current_prototype = instr->prototype(); | 5002 Handle<JSObject> current_prototype = instr->prototype(); |
| 4807 | 5003 |
| 4808 // Load prototype object. | 5004 // Load prototype object. |
| 4809 __ LoadHeapObject(reg, current_prototype); | 5005 __ LoadHeapObject(reg, current_prototype); |
| 4810 | 5006 |
| 4811 // Check prototype maps up to the holder. | 5007 // Check prototype maps up to the holder. |
| 4812 while (!current_prototype.is_identical_to(holder)) { | 5008 while (!current_prototype.is_identical_to(holder)) { |
| 4813 DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()), | 5009 DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()), |
| 4814 ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment()); | 5010 ALLOW_ELEMENT_TRANSITION_MAPS, instr); |
| 4815 | 5011 |
| 4816 current_prototype = | 5012 current_prototype = |
| 4817 Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype())); | 5013 Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype())); |
| 4818 // Load next prototype object. | 5014 // Load next prototype object. |
| 4819 __ LoadHeapObject(reg, current_prototype); | 5015 __ LoadHeapObject(reg, current_prototype); |
| 4820 } | 5016 } |
| 4821 | 5017 |
| 4822 // Check the holder map. | 5018 // Check the holder map. |
| 4823 DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()), | 5019 DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()), |
| 4824 ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment()); | 5020 ALLOW_ELEMENT_TRANSITION_MAPS, instr); |
| 4825 } | 5021 } |
| 4826 | 5022 |
| 4827 | 5023 |
| 4828 void LCodeGen::DoAllocateObject(LAllocateObject* instr) { | 5024 void LCodeGen::DoAllocateObject(LAllocateObject* instr) { |
| 4829 class DeferredAllocateObject: public LDeferredCode { | 5025 class DeferredAllocateObject: public LDeferredCode { |
| 4830 public: | 5026 public: |
| 4831 DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr) | 5027 DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr) |
| 4832 : LDeferredCode(codegen), instr_(instr) { } | 5028 : LDeferredCode(codegen), instr_(instr) { } |
| 4833 virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); } | 5029 virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); } |
| 4834 virtual LInstruction* instr() { return instr_; } | 5030 virtual LInstruction* instr() { return instr_; } |
| (...skipping 516 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5351 __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset)); | 5547 __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset)); |
| 5352 | 5548 |
| 5353 // Check the marker in the calling frame. | 5549 // Check the marker in the calling frame. |
| 5354 __ bind(&check_frame_marker); | 5550 __ bind(&check_frame_marker); |
| 5355 __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset), | 5551 __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset), |
| 5356 Immediate(Smi::FromInt(StackFrame::CONSTRUCT))); | 5552 Immediate(Smi::FromInt(StackFrame::CONSTRUCT))); |
| 5357 } | 5553 } |
| 5358 | 5554 |
| 5359 | 5555 |
| 5360 void LCodeGen::EnsureSpaceForLazyDeopt() { | 5556 void LCodeGen::EnsureSpaceForLazyDeopt() { |
| 5361 // Ensure that we have enough space after the previous lazy-bailout | 5557 if (!info()->IsStub()) { |
| 5362 // instruction for patching the code here. | 5558 // Ensure that we have enough space after the previous lazy-bailout |
| 5363 int current_pc = masm()->pc_offset(); | 5559 // instruction for patching the code here. |
| 5364 int patch_size = Deoptimizer::patch_size(); | 5560 int current_pc = masm()->pc_offset(); |
| 5365 if (current_pc < last_lazy_deopt_pc_ + patch_size) { | 5561 int patch_size = Deoptimizer::patch_size(); |
| 5366 int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc; | 5562 if (current_pc < last_lazy_deopt_pc_ + patch_size) { |
| 5367 __ Nop(padding_size); | 5563 int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc; |
| 5564 __ Nop(padding_size); |
| 5565 } |
| 5368 } | 5566 } |
| 5369 last_lazy_deopt_pc_ = masm()->pc_offset(); | 5567 last_lazy_deopt_pc_ = masm()->pc_offset(); |
| 5370 } | 5568 } |
| 5371 | 5569 |
| 5372 | 5570 |
| 5373 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { | 5571 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { |
| 5374 EnsureSpaceForLazyDeopt(); | 5572 EnsureSpaceForLazyDeopt(); |
| 5375 ASSERT(instr->HasEnvironment()); | 5573 ASSERT(instr->HasEnvironment()); |
| 5376 LEnvironment* env = instr->environment(); | 5574 LEnvironment* env = instr->environment(); |
| 5377 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); | 5575 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); |
| (...skipping 203 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5581 FixedArray::kHeaderSize - kPointerSize)); | 5779 FixedArray::kHeaderSize - kPointerSize)); |
| 5582 __ bind(&done); | 5780 __ bind(&done); |
| 5583 } | 5781 } |
| 5584 | 5782 |
| 5585 | 5783 |
| 5586 #undef __ | 5784 #undef __ |
| 5587 | 5785 |
| 5588 } } // namespace v8::internal | 5786 } } // namespace v8::internal |
| 5589 | 5787 |
| 5590 #endif // V8_TARGET_ARCH_IA32 | 5788 #endif // V8_TARGET_ARCH_IA32 |
| OLD | NEW |