OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
70 CodeStub::GenerateFPStubs(); | 70 CodeStub::GenerateFPStubs(); |
71 | 71 |
72 // Open a frame scope to indicate that there is a frame on the stack. The | 72 // Open a frame scope to indicate that there is a frame on the stack. The |
73 // NONE indicates that the scope shouldn't actually generate code to set up | 73 // NONE indicates that the scope shouldn't actually generate code to set up |
74 // the frame (that is done in GeneratePrologue). | 74 // the frame (that is done in GeneratePrologue). |
75 FrameScope frame_scope(masm_, StackFrame::NONE); | 75 FrameScope frame_scope(masm_, StackFrame::NONE); |
76 | 76 |
77 return GeneratePrologue() && | 77 return GeneratePrologue() && |
78 GenerateBody() && | 78 GenerateBody() && |
79 GenerateDeferredCode() && | 79 GenerateDeferredCode() && |
| 80 GenerateDeoptJumpTable() && |
80 GenerateSafepointTable(); | 81 GenerateSafepointTable(); |
81 } | 82 } |
82 | 83 |
83 | 84 |
84 void LCodeGen::FinishCode(Handle<Code> code) { | 85 void LCodeGen::FinishCode(Handle<Code> code) { |
85 ASSERT(is_done()); | 86 ASSERT(is_done()); |
86 code->set_stack_slots(GetStackSlotCount()); | 87 code->set_stack_slots(GetStackSlotCount()); |
87 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); | 88 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); |
88 PopulateDeoptimizationData(code); | 89 PopulateDeoptimizationData(code); |
89 } | 90 } |
(...skipping 19 matching lines...) Expand all Loading... |
109 size_t length = builder.position(); | 110 size_t length = builder.position(); |
110 Vector<char> copy = Vector<char>::New(length + 1); | 111 Vector<char> copy = Vector<char>::New(length + 1); |
111 memcpy(copy.start(), builder.Finalize(), copy.length()); | 112 memcpy(copy.start(), builder.Finalize(), copy.length()); |
112 masm()->RecordComment(copy.start()); | 113 masm()->RecordComment(copy.start()); |
113 } | 114 } |
114 | 115 |
115 | 116 |
116 bool LCodeGen::GeneratePrologue() { | 117 bool LCodeGen::GeneratePrologue() { |
117 ASSERT(is_generating()); | 118 ASSERT(is_generating()); |
118 | 119 |
119 ProfileEntryHookStub::MaybeCallEntryHook(masm_); | 120 if (info()->IsOptimizing()) { |
| 121 ProfileEntryHookStub::MaybeCallEntryHook(masm_); |
120 | 122 |
121 #ifdef DEBUG | 123 #ifdef DEBUG |
122 if (strlen(FLAG_stop_at) > 0 && | 124 if (strlen(FLAG_stop_at) > 0 && |
123 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { | 125 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { |
124 __ stop("stop_at"); | 126 __ stop("stop_at"); |
125 } | 127 } |
126 #endif | 128 #endif |
127 | 129 |
128 // a1: Callee's JS function. | 130 // a1: Callee's JS function. |
129 // cp: Callee's context. | 131 // cp: Callee's context. |
130 // fp: Caller's frame pointer. | 132 // fp: Caller's frame pointer. |
131 // lr: Caller's pc. | 133 // lr: Caller's pc. |
132 | 134 |
133 // Strict mode functions and builtins need to replace the receiver | 135 // Strict mode functions and builtins need to replace the receiver |
134 // with undefined when called as functions (without an explicit | 136 // with undefined when called as functions (without an explicit |
135 // receiver object). r5 is zero for method calls and non-zero for | 137 // receiver object). r5 is zero for method calls and non-zero for |
136 // function calls. | 138 // function calls. |
137 if (!info_->is_classic_mode() || info_->is_native()) { | 139 if (!info_->is_classic_mode() || info_->is_native()) { |
138 Label ok; | 140 Label ok; |
139 __ Branch(&ok, eq, t1, Operand(zero_reg)); | 141 __ Branch(&ok, eq, t1, Operand(zero_reg)); |
140 | 142 |
141 int receiver_offset = scope()->num_parameters() * kPointerSize; | 143 int receiver_offset = scope()->num_parameters() * kPointerSize; |
142 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); | 144 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); |
143 __ sw(a2, MemOperand(sp, receiver_offset)); | 145 __ sw(a2, MemOperand(sp, receiver_offset)); |
144 __ bind(&ok); | 146 __ bind(&ok); |
| 147 } |
145 } | 148 } |
146 | 149 |
147 info()->set_prologue_offset(masm_->pc_offset()); | 150 info()->set_prologue_offset(masm_->pc_offset()); |
148 // The following three instructions must remain together and unmodified for | 151 if (NeedsEagerFrame()) { |
149 // code aging to work properly. | 152 // The following three instructions must remain together and unmodified for |
150 __ Push(ra, fp, cp, a1); | 153 // code aging to work properly. |
151 // Add unused load of ip to ensure prologue sequence is identical for | 154 __ Push(ra, fp, cp, a1); |
152 // full-codegen and lithium-codegen. | 155 // Add unused load of ip to ensure prologue sequence is identical for |
153 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); | 156 // full-codegen and lithium-codegen. |
154 __ Addu(fp, sp, Operand(2 * kPointerSize)); // Adj. FP to point to saved FP. | 157 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); |
| 158 // Adj. FP to point to saved FP. |
| 159 __ Addu(fp, sp, Operand(2 * kPointerSize)); |
| 160 frame_is_built_ = true; |
| 161 } |
155 | 162 |
156 // Reserve space for the stack slots needed by the code. | 163 // Reserve space for the stack slots needed by the code. |
157 int slots = GetStackSlotCount(); | 164 int slots = GetStackSlotCount(); |
158 if (slots > 0) { | 165 if (slots > 0) { |
159 if (FLAG_debug_code) { | 166 if (FLAG_debug_code) { |
160 __ li(a0, Operand(slots)); | 167 __ li(a0, Operand(slots)); |
161 __ li(a2, Operand(kSlotsZapValue)); | 168 __ li(a2, Operand(kSlotsZapValue)); |
162 Label loop; | 169 Label loop; |
163 __ bind(&loop); | 170 __ bind(&loop); |
164 __ push(a2); | 171 __ push(a2); |
165 __ Subu(a0, a0, 1); | 172 __ Subu(a0, a0, 1); |
166 __ Branch(&loop, ne, a0, Operand(zero_reg)); | 173 __ Branch(&loop, ne, a0, Operand(zero_reg)); |
167 } else { | 174 } else { |
168 __ Subu(sp, sp, Operand(slots * kPointerSize)); | 175 __ Subu(sp, sp, Operand(slots * kPointerSize)); |
169 } | 176 } |
170 } | 177 } |
171 | 178 |
172 // Possibly allocate a local context. | 179 // Possibly allocate a local context. |
173 int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; | 180 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; |
174 if (heap_slots > 0) { | 181 if (heap_slots > 0) { |
175 Comment(";;; Allocate local context"); | 182 Comment(";;; Allocate local context"); |
176 // Argument to NewContext is the function, which is in a1. | 183 // Argument to NewContext is the function, which is in a1. |
177 __ push(a1); | 184 __ push(a1); |
178 if (heap_slots <= FastNewContextStub::kMaximumSlots) { | 185 if (heap_slots <= FastNewContextStub::kMaximumSlots) { |
179 FastNewContextStub stub(heap_slots); | 186 FastNewContextStub stub(heap_slots); |
180 __ CallStub(&stub); | 187 __ CallStub(&stub); |
181 } else { | 188 } else { |
182 __ CallRuntime(Runtime::kNewFunctionContext, 1); | 189 __ CallRuntime(Runtime::kNewFunctionContext, 1); |
183 } | 190 } |
(...skipping 15 matching lines...) Expand all Loading... |
199 __ sw(a0, target); | 206 __ sw(a0, target); |
200 // Update the write barrier. This clobbers a3 and a0. | 207 // Update the write barrier. This clobbers a3 and a0. |
201 __ RecordWriteContextSlot( | 208 __ RecordWriteContextSlot( |
202 cp, target.offset(), a0, a3, kRAHasBeenSaved, kSaveFPRegs); | 209 cp, target.offset(), a0, a3, kRAHasBeenSaved, kSaveFPRegs); |
203 } | 210 } |
204 } | 211 } |
205 Comment(";;; End allocate local context"); | 212 Comment(";;; End allocate local context"); |
206 } | 213 } |
207 | 214 |
208 // Trace the call. | 215 // Trace the call. |
209 if (FLAG_trace) { | 216 if (FLAG_trace && info()->IsOptimizing()) { |
210 __ CallRuntime(Runtime::kTraceEnter, 0); | 217 __ CallRuntime(Runtime::kTraceEnter, 0); |
211 } | 218 } |
212 EnsureSpaceForLazyDeopt(); | 219 EnsureSpaceForLazyDeopt(); |
213 return !is_aborted(); | 220 return !is_aborted(); |
214 } | 221 } |
215 | 222 |
216 | 223 |
217 bool LCodeGen::GenerateBody() { | 224 bool LCodeGen::GenerateBody() { |
218 ASSERT(is_generating()); | 225 ASSERT(is_generating()); |
219 bool emit_instructions = true; | 226 bool emit_instructions = true; |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
257 return !is_aborted(); | 264 return !is_aborted(); |
258 } | 265 } |
259 | 266 |
260 | 267 |
261 bool LCodeGen::GenerateDeferredCode() { | 268 bool LCodeGen::GenerateDeferredCode() { |
262 ASSERT(is_generating()); | 269 ASSERT(is_generating()); |
263 if (deferred_.length() > 0) { | 270 if (deferred_.length() > 0) { |
264 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { | 271 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { |
265 LDeferredCode* code = deferred_[i]; | 272 LDeferredCode* code = deferred_[i]; |
266 __ bind(code->entry()); | 273 __ bind(code->entry()); |
| 274 if (NeedsDeferredFrame()) { |
| 275 Comment(";;; Deferred build frame", |
| 276 code->instruction_index(), |
| 277 code->instr()->Mnemonic()); |
| 278 ASSERT(!frame_is_built_); |
| 279 ASSERT(info()->IsStub()); |
| 280 frame_is_built_ = true; |
| 281 __ MultiPush(cp.bit() | fp.bit() | ra.bit()); |
| 282 __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); |
| 283 __ push(scratch0()); |
| 284 __ Addu(fp, sp, Operand(2 * kPointerSize)); |
| 285 } |
267 Comment(";;; Deferred code @%d: %s.", | 286 Comment(";;; Deferred code @%d: %s.", |
268 code->instruction_index(), | 287 code->instruction_index(), |
269 code->instr()->Mnemonic()); | 288 code->instr()->Mnemonic()); |
270 code->Generate(); | 289 code->Generate(); |
| 290 if (NeedsDeferredFrame()) { |
| 291 Comment(";;; Deferred destroy frame", |
| 292 code->instruction_index(), |
| 293 code->instr()->Mnemonic()); |
| 294 ASSERT(frame_is_built_); |
| 295 __ pop(at); |
| 296 __ MultiPop(cp.bit() | fp.bit() | ra.bit()); |
| 297 frame_is_built_ = false; |
| 298 } |
271 __ jmp(code->exit()); | 299 __ jmp(code->exit()); |
272 } | 300 } |
273 } | 301 } |
274 // Deferred code is the last part of the instruction sequence. Mark | 302 // Deferred code is the last part of the instruction sequence. Mark |
275 // the generated code as done unless we bailed out. | 303 // the generated code as done unless we bailed out. |
276 if (!is_aborted()) status_ = DONE; | 304 if (!is_aborted()) status_ = DONE; |
277 return !is_aborted(); | 305 return !is_aborted(); |
278 } | 306 } |
279 | 307 |
280 | 308 |
281 bool LCodeGen::GenerateDeoptJumpTable() { | 309 bool LCodeGen::GenerateDeoptJumpTable() { |
282 // TODO(plind): not clear that this will have advantage for MIPS. | 310 // Check that the jump table is accessible from everywhere in the function |
283 // Skipping it for now. Raised issue #100 for this. | 311 // code, i.e. that offsets to the table can be encoded in the 16bit signed |
284 Abort("Unimplemented: GenerateDeoptJumpTable"); | 312 // immediate of a branch instruction. |
285 return false; | 313 // To simplify we consider the code size from the first instruction to the |
| 314 // end of the jump table. |
| 315 if (!is_int16((masm()->pc_offset() / Assembler::kInstrSize) + |
| 316 deopt_jump_table_.length() * 12)) { |
| 317 Abort("Generated code is too large"); |
| 318 } |
| 319 |
| 320 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); |
| 321 __ RecordComment("[ Deoptimization jump table"); |
| 322 Label table_start; |
| 323 __ bind(&table_start); |
| 324 Label needs_frame_not_call; |
| 325 Label needs_frame_is_call; |
| 326 for (int i = 0; i < deopt_jump_table_.length(); i++) { |
| 327 __ bind(&deopt_jump_table_[i].label); |
| 328 Address entry = deopt_jump_table_[i].address; |
| 329 __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry))); |
| 330 if (deopt_jump_table_[i].needs_frame) { |
| 331 if (deopt_jump_table_[i].is_lazy_deopt) { |
| 332 if (needs_frame_is_call.is_bound()) { |
| 333 __ Branch(&needs_frame_is_call); |
| 334 } else { |
| 335 __ bind(&needs_frame_is_call); |
| 336 __ MultiPush(cp.bit() | fp.bit() | ra.bit()); |
| 337 // This variant of deopt can only be used with stubs. Since we don't |
| 338 // have a function pointer to install in the stack frame that we're |
| 339 // building, install a special marker there instead. |
| 340 ASSERT(info()->IsStub()); |
| 341 __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); |
| 342 __ push(scratch0()); |
| 343 __ Addu(fp, sp, Operand(2 * kPointerSize)); |
| 344 __ Call(t9); |
| 345 } |
| 346 } else { |
| 347 if (needs_frame_not_call.is_bound()) { |
| 348 __ Branch(&needs_frame_not_call); |
| 349 } else { |
| 350 __ bind(&needs_frame_not_call); |
| 351 __ MultiPush(cp.bit() | fp.bit() | ra.bit()); |
| 352 // This variant of deopt can only be used with stubs. Since we don't |
| 353 // have a function pointer to install in the stack frame that we're |
| 354 // building, install a special marker there instead. |
| 355 ASSERT(info()->IsStub()); |
| 356 __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); |
| 357 __ push(scratch0()); |
| 358 __ Addu(fp, sp, Operand(2 * kPointerSize)); |
| 359 __ Jump(t9); |
| 360 } |
| 361 } |
| 362 } else { |
| 363 if (deopt_jump_table_[i].is_lazy_deopt) { |
| 364 __ Call(t9); |
| 365 } else { |
| 366 __ Jump(t9); |
| 367 } |
| 368 } |
| 369 } |
| 370 __ RecordComment("]"); |
| 371 |
| 372 // The deoptimization jump table is the last part of the instruction |
| 373 // sequence. Mark the generated code as done unless we bailed out. |
| 374 if (!is_aborted()) status_ = DONE; |
| 375 return !is_aborted(); |
286 } | 376 } |
287 | 377 |
288 | 378 |
289 bool LCodeGen::GenerateSafepointTable() { | 379 bool LCodeGen::GenerateSafepointTable() { |
290 ASSERT(is_done()); | 380 ASSERT(is_done()); |
291 safepoints_.Emit(masm(), GetStackSlotCount()); | 381 safepoints_.Emit(masm(), GetStackSlotCount()); |
292 return !is_aborted(); | 382 return !is_aborted(); |
293 } | 383 } |
294 | 384 |
295 | 385 |
(...skipping 179 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
475 // arguments index points to the first element of a sequence of tagged | 565 // arguments index points to the first element of a sequence of tagged |
476 // values on the stack that represent the arguments. This needs to be | 566 // values on the stack that represent the arguments. This needs to be |
477 // kept in sync with the LArgumentsElements implementation. | 567 // kept in sync with the LArgumentsElements implementation. |
478 *arguments_index = -environment->parameter_count(); | 568 *arguments_index = -environment->parameter_count(); |
479 *arguments_count = environment->parameter_count(); | 569 *arguments_count = environment->parameter_count(); |
480 | 570 |
481 WriteTranslation(environment->outer(), | 571 WriteTranslation(environment->outer(), |
482 translation, | 572 translation, |
483 arguments_index, | 573 arguments_index, |
484 arguments_count); | 574 arguments_count); |
485 int closure_id = *info()->closure() != *environment->closure() | 575 bool has_closure_id = !info()->closure().is_null() && |
| 576 *info()->closure() != *environment->closure(); |
| 577 int closure_id = has_closure_id |
486 ? DefineDeoptimizationLiteral(environment->closure()) | 578 ? DefineDeoptimizationLiteral(environment->closure()) |
487 : Translation::kSelfLiteralId; | 579 : Translation::kSelfLiteralId; |
488 | 580 |
489 switch (environment->frame_type()) { | 581 switch (environment->frame_type()) { |
490 case JS_FUNCTION: | 582 case JS_FUNCTION: |
491 translation->BeginJSFrame(environment->ast_id(), closure_id, height); | 583 translation->BeginJSFrame(environment->ast_id(), closure_id, height); |
492 break; | 584 break; |
493 case JS_CONSTRUCT: | 585 case JS_CONSTRUCT: |
494 translation->BeginConstructStubFrame(closure_id, translation_size); | 586 translation->BeginConstructStubFrame(closure_id, translation_size); |
495 break; | 587 break; |
496 case JS_GETTER: | 588 case JS_GETTER: |
497 ASSERT(translation_size == 1); | 589 ASSERT(translation_size == 1); |
498 ASSERT(height == 0); | 590 ASSERT(height == 0); |
499 translation->BeginGetterStubFrame(closure_id); | 591 translation->BeginGetterStubFrame(closure_id); |
500 break; | 592 break; |
501 case JS_SETTER: | 593 case JS_SETTER: |
502 ASSERT(translation_size == 2); | 594 ASSERT(translation_size == 2); |
503 ASSERT(height == 0); | 595 ASSERT(height == 0); |
504 translation->BeginSetterStubFrame(closure_id); | 596 translation->BeginSetterStubFrame(closure_id); |
505 break; | 597 break; |
| 598 case STUB: |
| 599 translation->BeginCompiledStubFrame(); |
| 600 break; |
506 case ARGUMENTS_ADAPTOR: | 601 case ARGUMENTS_ADAPTOR: |
507 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); | 602 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); |
508 break; | 603 break; |
509 } | 604 } |
510 | 605 |
511 // Inlined frames which push their arguments cause the index to be | 606 // Inlined frames which push their arguments cause the index to be |
512 // bumped and a new stack area to be used for materialization. | 607 // bumped and a new stack area to be used for materialization. |
513 if (environment->entry() != NULL && | 608 if (environment->entry() != NULL && |
514 environment->entry()->arguments_pushed()) { | 609 environment->entry()->arguments_pushed()) { |
515 *arguments_index = *arguments_index < 0 | 610 *arguments_index = *arguments_index < 0 |
(...skipping 166 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
682 } | 777 } |
683 | 778 |
684 | 779 |
685 void LCodeGen::DeoptimizeIf(Condition cc, | 780 void LCodeGen::DeoptimizeIf(Condition cc, |
686 LEnvironment* environment, | 781 LEnvironment* environment, |
687 Register src1, | 782 Register src1, |
688 const Operand& src2) { | 783 const Operand& src2) { |
689 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 784 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
690 ASSERT(environment->HasBeenRegistered()); | 785 ASSERT(environment->HasBeenRegistered()); |
691 int id = environment->deoptimization_index(); | 786 int id = environment->deoptimization_index(); |
692 Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER); | 787 |
| 788 Deoptimizer::BailoutType bailout_type = info()->IsStub() |
| 789 ? Deoptimizer::LAZY |
| 790 : Deoptimizer::EAGER; |
| 791 Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type); |
693 if (entry == NULL) { | 792 if (entry == NULL) { |
694 Abort("bailout was not prepared"); | 793 Abort("bailout was not prepared"); |
695 return; | 794 return; |
696 } | 795 } |
697 | 796 |
698 ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on MIPS. | 797 ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on MIPS. |
699 | 798 |
700 if (FLAG_deopt_every_n_times == 1 && | 799 if (FLAG_deopt_every_n_times == 1 && |
701 info_->shared_info()->opt_count() == id) { | 800 info_->shared_info()->opt_count() == id) { |
702 __ Jump(entry, RelocInfo::RUNTIME_ENTRY); | 801 __ Jump(entry, RelocInfo::RUNTIME_ENTRY); |
703 return; | 802 return; |
704 } | 803 } |
705 | 804 |
706 if (FLAG_trap_on_deopt) { | 805 if (FLAG_trap_on_deopt) { |
707 Label skip; | 806 Label skip; |
708 if (cc != al) { | 807 if (cc != al) { |
709 __ Branch(&skip, NegateCondition(cc), src1, src2); | 808 __ Branch(&skip, NegateCondition(cc), src1, src2); |
710 } | 809 } |
711 __ stop("trap_on_deopt"); | 810 __ stop("trap_on_deopt"); |
712 __ bind(&skip); | 811 __ bind(&skip); |
713 } | 812 } |
714 | 813 |
715 // TODO(plind): The Arm port is a little different here, due to their | 814 bool needs_lazy_deopt = info()->IsStub(); |
716 // DeOpt jump table, which is not used for Mips yet. | 815 ASSERT(info()->IsStub() || frame_is_built_); |
717 __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2); | 816 if (cc == al && !needs_lazy_deopt) { |
| 817 __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2); |
| 818 } else { |
| 819 // We often have several deopts to the same entry, reuse the last |
| 820 // jump entry if this is the case. |
| 821 if (deopt_jump_table_.is_empty() || |
| 822 (deopt_jump_table_.last().address != entry) || |
| 823 (deopt_jump_table_.last().is_lazy_deopt != needs_lazy_deopt) || |
| 824 (deopt_jump_table_.last().needs_frame != !frame_is_built_)) { |
| 825 JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt); |
| 826 deopt_jump_table_.Add(table_entry, zone()); |
| 827 } |
| 828 __ Branch(&deopt_jump_table_.last().label, cc, src1, src2); |
| 829 } |
718 } | 830 } |
719 | 831 |
720 | 832 |
721 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { | 833 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { |
722 int length = deoptimizations_.length(); | 834 int length = deoptimizations_.length(); |
723 if (length == 0) return; | 835 if (length == 0) return; |
724 Handle<DeoptimizationInputData> data = | 836 Handle<DeoptimizationInputData> data = |
725 factory()->NewDeoptimizationInputData(length, TENURED); | 837 factory()->NewDeoptimizationInputData(length, TENURED); |
726 | 838 |
727 Handle<ByteArray> translations = translations_.CreateByteArray(); | 839 Handle<ByteArray> translations = translations_.CreateByteArray(); |
(...skipping 546 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1274 | 1386 |
1275 void LCodeGen::DoConstantI(LConstantI* instr) { | 1387 void LCodeGen::DoConstantI(LConstantI* instr) { |
1276 ASSERT(instr->result()->IsRegister()); | 1388 ASSERT(instr->result()->IsRegister()); |
1277 __ li(ToRegister(instr->result()), Operand(instr->value())); | 1389 __ li(ToRegister(instr->result()), Operand(instr->value())); |
1278 } | 1390 } |
1279 | 1391 |
1280 | 1392 |
1281 void LCodeGen::DoConstantD(LConstantD* instr) { | 1393 void LCodeGen::DoConstantD(LConstantD* instr) { |
1282 ASSERT(instr->result()->IsDoubleRegister()); | 1394 ASSERT(instr->result()->IsDoubleRegister()); |
1283 DoubleRegister result = ToDoubleRegister(instr->result()); | 1395 DoubleRegister result = ToDoubleRegister(instr->result()); |
| 1396 CpuFeatures::Scope scope(FPU); |
1284 double v = instr->value(); | 1397 double v = instr->value(); |
1285 __ Move(result, v); | 1398 __ Move(result, v); |
1286 } | 1399 } |
1287 | 1400 |
1288 | 1401 |
1289 void LCodeGen::DoConstantT(LConstantT* instr) { | 1402 void LCodeGen::DoConstantT(LConstantT* instr) { |
1290 Handle<Object> value = instr->value(); | 1403 Handle<Object> value = instr->value(); |
1291 if (value->IsSmi()) { | 1404 if (value->IsSmi()) { |
1292 __ li(ToRegister(instr->result()), Operand(value)); | 1405 __ li(ToRegister(instr->result()), Operand(value)); |
1293 } else { | 1406 } else { |
(...skipping 177 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1471 __ Branch(&return_right, NegateCondition(condition), left_reg, right_op); | 1584 __ Branch(&return_right, NegateCondition(condition), left_reg, right_op); |
1472 __ mov(result_reg, left_reg); | 1585 __ mov(result_reg, left_reg); |
1473 __ Branch(&done); | 1586 __ Branch(&done); |
1474 } | 1587 } |
1475 __ Branch(&done, condition, left_reg, right_op); | 1588 __ Branch(&done, condition, left_reg, right_op); |
1476 __ bind(&return_right); | 1589 __ bind(&return_right); |
1477 __ Addu(result_reg, zero_reg, right_op); | 1590 __ Addu(result_reg, zero_reg, right_op); |
1478 __ bind(&done); | 1591 __ bind(&done); |
1479 } else { | 1592 } else { |
1480 ASSERT(instr->hydrogen()->representation().IsDouble()); | 1593 ASSERT(instr->hydrogen()->representation().IsDouble()); |
| 1594 CpuFeatures::Scope scope(FPU); |
1481 FPURegister left_reg = ToDoubleRegister(left); | 1595 FPURegister left_reg = ToDoubleRegister(left); |
1482 FPURegister right_reg = ToDoubleRegister(right); | 1596 FPURegister right_reg = ToDoubleRegister(right); |
1483 FPURegister result_reg = ToDoubleRegister(instr->result()); | 1597 FPURegister result_reg = ToDoubleRegister(instr->result()); |
1484 Label check_nan_left, check_zero, return_left, return_right, done; | 1598 Label check_nan_left, check_zero, return_left, return_right, done; |
1485 __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg); | 1599 __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg); |
1486 __ BranchF(&return_left, NULL, condition, left_reg, right_reg); | 1600 __ BranchF(&return_left, NULL, condition, left_reg, right_reg); |
1487 __ Branch(&return_right); | 1601 __ Branch(&return_right); |
1488 | 1602 |
1489 __ bind(&check_zero); | 1603 __ bind(&check_zero); |
1490 // left == right != 0. | 1604 // left == right != 0. |
(...skipping 20 matching lines...) Expand all Loading... |
1511 __ bind(&return_left); | 1625 __ bind(&return_left); |
1512 if (!left_reg.is(result_reg)) { | 1626 if (!left_reg.is(result_reg)) { |
1513 __ mov_d(result_reg, left_reg); | 1627 __ mov_d(result_reg, left_reg); |
1514 } | 1628 } |
1515 __ bind(&done); | 1629 __ bind(&done); |
1516 } | 1630 } |
1517 } | 1631 } |
1518 | 1632 |
1519 | 1633 |
1520 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { | 1634 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { |
| 1635 CpuFeatures::Scope scope(FPU); |
1521 DoubleRegister left = ToDoubleRegister(instr->left()); | 1636 DoubleRegister left = ToDoubleRegister(instr->left()); |
1522 DoubleRegister right = ToDoubleRegister(instr->right()); | 1637 DoubleRegister right = ToDoubleRegister(instr->right()); |
1523 DoubleRegister result = ToDoubleRegister(instr->result()); | 1638 DoubleRegister result = ToDoubleRegister(instr->result()); |
1524 switch (instr->op()) { | 1639 switch (instr->op()) { |
1525 case Token::ADD: | 1640 case Token::ADD: |
1526 __ add_d(result, left, right); | 1641 __ add_d(result, left, right); |
1527 break; | 1642 break; |
1528 case Token::SUB: | 1643 case Token::SUB: |
1529 __ sub_d(result, left, right); | 1644 __ sub_d(result, left, right); |
1530 break; | 1645 break; |
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1620 | 1735 |
1621 void LCodeGen::DoBranch(LBranch* instr) { | 1736 void LCodeGen::DoBranch(LBranch* instr) { |
1622 int true_block = chunk_->LookupDestination(instr->true_block_id()); | 1737 int true_block = chunk_->LookupDestination(instr->true_block_id()); |
1623 int false_block = chunk_->LookupDestination(instr->false_block_id()); | 1738 int false_block = chunk_->LookupDestination(instr->false_block_id()); |
1624 | 1739 |
1625 Representation r = instr->hydrogen()->value()->representation(); | 1740 Representation r = instr->hydrogen()->value()->representation(); |
1626 if (r.IsInteger32()) { | 1741 if (r.IsInteger32()) { |
1627 Register reg = ToRegister(instr->value()); | 1742 Register reg = ToRegister(instr->value()); |
1628 EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg)); | 1743 EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg)); |
1629 } else if (r.IsDouble()) { | 1744 } else if (r.IsDouble()) { |
| 1745 CpuFeatures::Scope scope(FPU); |
1630 DoubleRegister reg = ToDoubleRegister(instr->value()); | 1746 DoubleRegister reg = ToDoubleRegister(instr->value()); |
1631 // Test the double value. Zero and NaN are false. | 1747 // Test the double value. Zero and NaN are false. |
1632 EmitBranchF(true_block, false_block, ne, reg, kDoubleRegZero); | 1748 EmitBranchF(true_block, false_block, ne, reg, kDoubleRegZero); |
1633 } else { | 1749 } else { |
1634 ASSERT(r.IsTagged()); | 1750 ASSERT(r.IsTagged()); |
1635 Register reg = ToRegister(instr->value()); | 1751 Register reg = ToRegister(instr->value()); |
1636 HType type = instr->hydrogen()->value()->type(); | 1752 HType type = instr->hydrogen()->value()->type(); |
1637 if (type.IsBoolean()) { | 1753 if (type.IsBoolean()) { |
1638 __ LoadRoot(at, Heap::kTrueValueRootIndex); | 1754 __ LoadRoot(at, Heap::kTrueValueRootIndex); |
1639 EmitBranch(true_block, false_block, eq, reg, Operand(at)); | 1755 EmitBranch(true_block, false_block, eq, reg, Operand(at)); |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1697 Label not_string; | 1813 Label not_string; |
1698 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 1814 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
1699 __ Branch(¬_string, ge , at, Operand(FIRST_NONSTRING_TYPE)); | 1815 __ Branch(¬_string, ge , at, Operand(FIRST_NONSTRING_TYPE)); |
1700 __ lw(at, FieldMemOperand(reg, String::kLengthOffset)); | 1816 __ lw(at, FieldMemOperand(reg, String::kLengthOffset)); |
1701 __ Branch(true_label, ne, at, Operand(zero_reg)); | 1817 __ Branch(true_label, ne, at, Operand(zero_reg)); |
1702 __ Branch(false_label); | 1818 __ Branch(false_label); |
1703 __ bind(¬_string); | 1819 __ bind(¬_string); |
1704 } | 1820 } |
1705 | 1821 |
1706 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { | 1822 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { |
| 1823 CpuFeatures::Scope scope(FPU); |
1707 // heap number -> false iff +0, -0, or NaN. | 1824 // heap number -> false iff +0, -0, or NaN. |
1708 DoubleRegister dbl_scratch = double_scratch0(); | 1825 DoubleRegister dbl_scratch = double_scratch0(); |
1709 Label not_heap_number; | 1826 Label not_heap_number; |
1710 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | 1827 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
1711 __ Branch(¬_heap_number, ne, map, Operand(at)); | 1828 __ Branch(¬_heap_number, ne, map, Operand(at)); |
1712 __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); | 1829 __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); |
1713 __ BranchF(true_label, false_label, ne, dbl_scratch, kDoubleRegZero); | 1830 __ BranchF(true_label, false_label, ne, dbl_scratch, kDoubleRegZero); |
1714 // Falls through if dbl_scratch == 0. | 1831 // Falls through if dbl_scratch == 0. |
1715 __ Branch(false_label); | 1832 __ Branch(false_label); |
1716 __ bind(¬_heap_number); | 1833 __ bind(¬_heap_number); |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1776 if (left->IsConstantOperand() && right->IsConstantOperand()) { | 1893 if (left->IsConstantOperand() && right->IsConstantOperand()) { |
1777 // We can statically evaluate the comparison. | 1894 // We can statically evaluate the comparison. |
1778 double left_val = ToDouble(LConstantOperand::cast(left)); | 1895 double left_val = ToDouble(LConstantOperand::cast(left)); |
1779 double right_val = ToDouble(LConstantOperand::cast(right)); | 1896 double right_val = ToDouble(LConstantOperand::cast(right)); |
1780 int next_block = | 1897 int next_block = |
1781 EvalComparison(instr->op(), left_val, right_val) ? true_block | 1898 EvalComparison(instr->op(), left_val, right_val) ? true_block |
1782 : false_block; | 1899 : false_block; |
1783 EmitGoto(next_block); | 1900 EmitGoto(next_block); |
1784 } else { | 1901 } else { |
1785 if (instr->is_double()) { | 1902 if (instr->is_double()) { |
| 1903 CpuFeatures::Scope scope(FPU); |
1786 // Compare left and right as doubles and load the | 1904 // Compare left and right as doubles and load the |
1787 // resulting flags into the normal status register. | 1905 // resulting flags into the normal status register. |
1788 FPURegister left_reg = ToDoubleRegister(left); | 1906 FPURegister left_reg = ToDoubleRegister(left); |
1789 FPURegister right_reg = ToDoubleRegister(right); | 1907 FPURegister right_reg = ToDoubleRegister(right); |
1790 | 1908 |
1791 // If a NaN is involved, i.e. the result is unordered, | 1909 // If a NaN is involved, i.e. the result is unordered, |
1792 // jump to false block label. | 1910 // jump to false block label. |
1793 __ BranchF(NULL, chunk_->GetAssemblyLabel(false_block), eq, | 1911 __ BranchF(NULL, chunk_->GetAssemblyLabel(false_block), eq, |
1794 left_reg, right_reg); | 1912 left_reg, right_reg); |
1795 | 1913 |
(...skipping 527 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2323 Label done; | 2441 Label done; |
2324 __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg)); | 2442 __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg)); |
2325 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex); | 2443 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex); |
2326 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); | 2444 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); |
2327 ASSERT_EQ(3, masm()->InstructionsGeneratedSince(&done)); | 2445 ASSERT_EQ(3, masm()->InstructionsGeneratedSince(&done)); |
2328 __ bind(&done); | 2446 __ bind(&done); |
2329 } | 2447 } |
2330 | 2448 |
2331 | 2449 |
2332 void LCodeGen::DoReturn(LReturn* instr) { | 2450 void LCodeGen::DoReturn(LReturn* instr) { |
2333 if (FLAG_trace) { | 2451 if (FLAG_trace && info()->IsOptimizing()) { |
2334 // Push the return value on the stack as the parameter. | 2452 // Push the return value on the stack as the parameter. |
2335 // Runtime::TraceExit returns its parameter in v0. | 2453 // Runtime::TraceExit returns its parameter in v0. |
2336 __ push(v0); | 2454 __ push(v0); |
2337 __ CallRuntime(Runtime::kTraceExit, 1); | 2455 __ CallRuntime(Runtime::kTraceExit, 1); |
2338 } | 2456 } |
2339 int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize; | 2457 if (NeedsEagerFrame()) { |
2340 __ mov(sp, fp); | 2458 int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize; |
2341 __ Pop(ra, fp); | 2459 __ mov(sp, fp); |
2342 __ Addu(sp, sp, Operand(sp_delta)); | 2460 __ Pop(ra, fp); |
| 2461 __ Addu(sp, sp, Operand(sp_delta)); |
| 2462 } |
| 2463 if (info()->IsStub()) { |
| 2464 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 2465 } |
2343 __ Jump(ra); | 2466 __ Jump(ra); |
2344 } | 2467 } |
2345 | 2468 |
2346 | 2469 |
2347 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { | 2470 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { |
2348 Register result = ToRegister(instr->result()); | 2471 Register result = ToRegister(instr->result()); |
2349 __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell()))); | 2472 __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell()))); |
2350 __ lw(result, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset)); | 2473 __ lw(result, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset)); |
2351 if (instr->hydrogen()->RequiresHoleCheck()) { | 2474 if (instr->hydrogen()->RequiresHoleCheck()) { |
2352 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | 2475 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
(...skipping 338 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2691 | 2814 |
2692 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || | 2815 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || |
2693 elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { | 2816 elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { |
2694 FPURegister result = ToDoubleRegister(instr->result()); | 2817 FPURegister result = ToDoubleRegister(instr->result()); |
2695 if (key_is_constant) { | 2818 if (key_is_constant) { |
2696 __ Addu(scratch0(), external_pointer, constant_key << element_size_shift); | 2819 __ Addu(scratch0(), external_pointer, constant_key << element_size_shift); |
2697 } else { | 2820 } else { |
2698 __ sll(scratch0(), key, shift_size); | 2821 __ sll(scratch0(), key, shift_size); |
2699 __ Addu(scratch0(), scratch0(), external_pointer); | 2822 __ Addu(scratch0(), scratch0(), external_pointer); |
2700 } | 2823 } |
| 2824 if (CpuFeatures::IsSupported(FPU)) { |
| 2825 CpuFeatures::Scope scope(FPU); |
| 2826 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
| 2827 __ lwc1(result, MemOperand(scratch0(), additional_offset)); |
| 2828 __ cvt_d_s(result, result); |
| 2829 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS |
| 2830 __ ldc1(result, MemOperand(scratch0(), additional_offset)); |
| 2831 } |
| 2832 } else { |
| 2833 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
| 2834 Register value = external_pointer; |
| 2835 __ lw(value, MemOperand(scratch0(), additional_offset)); |
| 2836 __ And(sfpd_lo, value, Operand(kBinary32MantissaMask)); |
2701 | 2837 |
2702 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { | 2838 __ srl(scratch0(), value, kBinary32MantissaBits); |
2703 __ lwc1(result, MemOperand(scratch0(), additional_offset)); | 2839 __ And(scratch0(), scratch0(), |
2704 __ cvt_d_s(result, result); | 2840 Operand(kBinary32ExponentMask >> kBinary32MantissaBits)); |
2705 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS | 2841 |
2706 __ ldc1(result, MemOperand(scratch0(), additional_offset)); | 2842 Label exponent_rebiased; |
| 2843 __ Xor(at, scratch0(), Operand(0x00)); |
| 2844 __ Branch(&exponent_rebiased, eq, at, Operand(zero_reg)); |
| 2845 |
| 2846 __ Xor(at, scratch0(), Operand(0xff)); |
| 2847 Label skip; |
| 2848 __ Branch(&skip, ne, at, Operand(zero_reg)); |
| 2849 __ li(scratch0(), Operand(0x7ff)); |
| 2850 __ bind(&skip); |
| 2851 __ Branch(&exponent_rebiased, eq, at, Operand(zero_reg)); |
| 2852 |
| 2853 // Rebias exponent. |
| 2854 __ Addu(scratch0(), |
| 2855 scratch0(), |
| 2856 Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias)); |
| 2857 |
| 2858 __ bind(&exponent_rebiased); |
| 2859 __ And(sfpd_hi, value, Operand(kBinary32SignMask)); |
| 2860 __ sll(at, scratch0(), HeapNumber::kMantissaBitsInTopWord); |
| 2861 __ Or(sfpd_hi, sfpd_hi, at); |
| 2862 |
| 2863 // Shift mantissa. |
| 2864 static const int kMantissaShiftForHiWord = |
| 2865 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; |
| 2866 |
| 2867 static const int kMantissaShiftForLoWord = |
| 2868 kBitsPerInt - kMantissaShiftForHiWord; |
| 2869 |
| 2870 __ srl(at, sfpd_lo, kMantissaShiftForHiWord); |
| 2871 __ Or(sfpd_hi, sfpd_hi, at); |
| 2872 __ sll(sfpd_lo, sfpd_lo, kMantissaShiftForLoWord); |
| 2873 |
| 2874 } else { |
| 2875 __ lw(sfpd_lo, MemOperand(scratch0(), additional_offset)); |
| 2876 __ lw(sfpd_hi, MemOperand(scratch0(), |
| 2877 additional_offset + kPointerSize)); |
| 2878 } |
2707 } | 2879 } |
2708 } else { | 2880 } else { |
2709 Register result = ToRegister(instr->result()); | 2881 Register result = ToRegister(instr->result()); |
2710 MemOperand mem_operand = PrepareKeyedOperand( | 2882 MemOperand mem_operand = PrepareKeyedOperand( |
2711 key, external_pointer, key_is_constant, constant_key, | 2883 key, external_pointer, key_is_constant, constant_key, |
2712 element_size_shift, shift_size, | 2884 element_size_shift, shift_size, |
2713 instr->additional_index(), additional_offset); | 2885 instr->additional_index(), additional_offset); |
2714 switch (elements_kind) { | 2886 switch (elements_kind) { |
2715 case EXTERNAL_BYTE_ELEMENTS: | 2887 case EXTERNAL_BYTE_ELEMENTS: |
2716 __ lb(result, mem_operand); | 2888 __ lb(result, mem_operand); |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2765 int constant_key = 0; | 2937 int constant_key = 0; |
2766 if (key_is_constant) { | 2938 if (key_is_constant) { |
2767 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); | 2939 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); |
2768 if (constant_key & 0xF0000000) { | 2940 if (constant_key & 0xF0000000) { |
2769 Abort("array index constant value too big."); | 2941 Abort("array index constant value too big."); |
2770 } | 2942 } |
2771 } else { | 2943 } else { |
2772 key = ToRegister(instr->key()); | 2944 key = ToRegister(instr->key()); |
2773 } | 2945 } |
2774 | 2946 |
2775 if (key_is_constant) { | 2947 int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) + |
2776 __ Addu(elements, elements, | 2948 ((constant_key + instr->additional_index()) << element_size_shift); |
2777 Operand(((constant_key + instr->additional_index()) << | 2949 if (!key_is_constant) { |
2778 element_size_shift) + | 2950 __ sll(scratch, key, shift_size); |
2779 FixedDoubleArray::kHeaderSize - kHeapObjectTag)); | 2951 __ Addu(elements, elements, scratch); |
| 2952 } |
| 2953 if (CpuFeatures::IsSupported(FPU)) { |
| 2954 CpuFeatures::Scope scope(FPU); |
| 2955 __ Addu(elements, elements, Operand(base_offset)); |
| 2956 __ ldc1(result, MemOperand(elements)); |
| 2957 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2958 __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); |
| 2959 DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32)); |
| 2960 } |
2780 } else { | 2961 } else { |
2781 __ sll(scratch, key, shift_size); | 2962 __ lw(sfpd_hi, MemOperand(elements, base_offset + kPointerSize)); |
2782 __ Addu(elements, elements, Operand(scratch)); | 2963 __ lw(sfpd_lo, MemOperand(elements, base_offset)); |
2783 __ Addu(elements, elements, | 2964 if (instr->hydrogen()->RequiresHoleCheck()) { |
2784 Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) + | 2965 ASSERT(kPointerSize == sizeof(kHoleNanLower32)); |
2785 (instr->additional_index() << element_size_shift))); | 2966 DeoptimizeIf(eq, instr->environment(), sfpd_hi, Operand(kHoleNanUpper32)); |
| 2967 } |
2786 } | 2968 } |
2787 | |
2788 if (instr->hydrogen()->RequiresHoleCheck()) { | |
2789 __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); | |
2790 DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32)); | |
2791 } | |
2792 | |
2793 __ ldc1(result, MemOperand(elements)); | |
2794 } | 2969 } |
2795 | 2970 |
2796 | 2971 |
2797 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { | 2972 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { |
2798 Register elements = ToRegister(instr->elements()); | 2973 Register elements = ToRegister(instr->elements()); |
2799 Register result = ToRegister(instr->result()); | 2974 Register result = ToRegister(instr->result()); |
2800 Register scratch = scratch0(); | 2975 Register scratch = scratch0(); |
2801 Register store_base = scratch; | 2976 Register store_base = scratch; |
2802 int offset = 0; | 2977 int offset = 0; |
2803 | 2978 |
(...skipping 422 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3226 __ mov(result, input); | 3401 __ mov(result, input); |
3227 ASSERT_EQ(2, masm()->InstructionsGeneratedSince(&done)); | 3402 ASSERT_EQ(2, masm()->InstructionsGeneratedSince(&done)); |
3228 __ subu(result, zero_reg, input); | 3403 __ subu(result, zero_reg, input); |
3229 // Overflow if result is still negative, i.e. 0x80000000. | 3404 // Overflow if result is still negative, i.e. 0x80000000. |
3230 DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg)); | 3405 DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg)); |
3231 __ bind(&done); | 3406 __ bind(&done); |
3232 } | 3407 } |
3233 | 3408 |
3234 | 3409 |
3235 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { | 3410 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { |
| 3411 CpuFeatures::Scope scope(FPU); |
3236 // Class for deferred case. | 3412 // Class for deferred case. |
3237 class DeferredMathAbsTaggedHeapNumber: public LDeferredCode { | 3413 class DeferredMathAbsTaggedHeapNumber: public LDeferredCode { |
3238 public: | 3414 public: |
3239 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, | 3415 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, |
3240 LUnaryMathOperation* instr) | 3416 LUnaryMathOperation* instr) |
3241 : LDeferredCode(codegen), instr_(instr) { } | 3417 : LDeferredCode(codegen), instr_(instr) { } |
3242 virtual void Generate() { | 3418 virtual void Generate() { |
3243 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); | 3419 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); |
3244 } | 3420 } |
3245 virtual LInstruction* instr() { return instr_; } | 3421 virtual LInstruction* instr() { return instr_; } |
(...skipping 16 matching lines...) Expand all Loading... |
3262 // Smi check. | 3438 // Smi check. |
3263 __ JumpIfNotSmi(input, deferred->entry()); | 3439 __ JumpIfNotSmi(input, deferred->entry()); |
3264 // If smi, handle it directly. | 3440 // If smi, handle it directly. |
3265 EmitIntegerMathAbs(instr); | 3441 EmitIntegerMathAbs(instr); |
3266 __ bind(deferred->exit()); | 3442 __ bind(deferred->exit()); |
3267 } | 3443 } |
3268 } | 3444 } |
3269 | 3445 |
3270 | 3446 |
3271 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { | 3447 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { |
| 3448 CpuFeatures::Scope scope(FPU); |
3272 DoubleRegister input = ToDoubleRegister(instr->value()); | 3449 DoubleRegister input = ToDoubleRegister(instr->value()); |
3273 Register result = ToRegister(instr->result()); | 3450 Register result = ToRegister(instr->result()); |
3274 Register scratch1 = scratch0(); | 3451 Register scratch1 = scratch0(); |
3275 Register except_flag = ToRegister(instr->temp()); | 3452 Register except_flag = ToRegister(instr->temp()); |
3276 | 3453 |
3277 __ EmitFPUTruncate(kRoundToMinusInf, | 3454 __ EmitFPUTruncate(kRoundToMinusInf, |
3278 result, | 3455 result, |
3279 input, | 3456 input, |
3280 scratch1, | 3457 scratch1, |
3281 double_scratch0(), | 3458 double_scratch0(), |
3282 except_flag); | 3459 except_flag); |
3283 | 3460 |
3284 // Deopt if the operation did not succeed. | 3461 // Deopt if the operation did not succeed. |
3285 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg)); | 3462 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg)); |
3286 | 3463 |
3287 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3464 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
3288 // Test for -0. | 3465 // Test for -0. |
3289 Label done; | 3466 Label done; |
3290 __ Branch(&done, ne, result, Operand(zero_reg)); | 3467 __ Branch(&done, ne, result, Operand(zero_reg)); |
3291 __ mfc1(scratch1, input.high()); | 3468 __ mfc1(scratch1, input.high()); |
3292 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); | 3469 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); |
3293 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg)); | 3470 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg)); |
3294 __ bind(&done); | 3471 __ bind(&done); |
3295 } | 3472 } |
3296 } | 3473 } |
3297 | 3474 |
3298 | 3475 |
3299 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { | 3476 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { |
| 3477 CpuFeatures::Scope scope(FPU); |
3300 DoubleRegister input = ToDoubleRegister(instr->value()); | 3478 DoubleRegister input = ToDoubleRegister(instr->value()); |
3301 Register result = ToRegister(instr->result()); | 3479 Register result = ToRegister(instr->result()); |
3302 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); | 3480 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); |
3303 Register scratch = scratch0(); | 3481 Register scratch = scratch0(); |
3304 Label done, check_sign_on_zero; | 3482 Label done, check_sign_on_zero; |
3305 | 3483 |
3306 // Extract exponent bits. | 3484 // Extract exponent bits. |
3307 __ mfc1(result, input.high()); | 3485 __ mfc1(result, input.high()); |
3308 __ Ext(scratch, | 3486 __ Ext(scratch, |
3309 result, | 3487 result, |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3366 __ bind(&check_sign_on_zero); | 3544 __ bind(&check_sign_on_zero); |
3367 __ mfc1(scratch, input.high()); | 3545 __ mfc1(scratch, input.high()); |
3368 __ And(scratch, scratch, Operand(HeapNumber::kSignMask)); | 3546 __ And(scratch, scratch, Operand(HeapNumber::kSignMask)); |
3369 DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg)); | 3547 DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg)); |
3370 } | 3548 } |
3371 __ bind(&done); | 3549 __ bind(&done); |
3372 } | 3550 } |
3373 | 3551 |
3374 | 3552 |
3375 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { | 3553 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { |
| 3554 CpuFeatures::Scope scope(FPU); |
3376 DoubleRegister input = ToDoubleRegister(instr->value()); | 3555 DoubleRegister input = ToDoubleRegister(instr->value()); |
3377 DoubleRegister result = ToDoubleRegister(instr->result()); | 3556 DoubleRegister result = ToDoubleRegister(instr->result()); |
3378 __ sqrt_d(result, input); | 3557 __ sqrt_d(result, input); |
3379 } | 3558 } |
3380 | 3559 |
3381 | 3560 |
3382 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { | 3561 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { |
| 3562 CpuFeatures::Scope scope(FPU); |
3383 DoubleRegister input = ToDoubleRegister(instr->value()); | 3563 DoubleRegister input = ToDoubleRegister(instr->value()); |
3384 DoubleRegister result = ToDoubleRegister(instr->result()); | 3564 DoubleRegister result = ToDoubleRegister(instr->result()); |
3385 DoubleRegister temp = ToDoubleRegister(instr->temp()); | 3565 DoubleRegister temp = ToDoubleRegister(instr->temp()); |
3386 | 3566 |
3387 ASSERT(!input.is(result)); | 3567 ASSERT(!input.is(result)); |
3388 | 3568 |
3389 // Note that according to ECMA-262 15.8.2.13: | 3569 // Note that according to ECMA-262 15.8.2.13: |
3390 // Math.pow(-Infinity, 0.5) == Infinity | 3570 // Math.pow(-Infinity, 0.5) == Infinity |
3391 // Math.sqrt(-Infinity) == NaN | 3571 // Math.sqrt(-Infinity) == NaN |
3392 Label done; | 3572 Label done; |
3393 __ Move(temp, -V8_INFINITY); | 3573 __ Move(temp, -V8_INFINITY); |
3394 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input); | 3574 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input); |
3395 // Set up Infinity in the delay slot. | 3575 // Set up Infinity in the delay slot. |
3396 // result is overwritten if the branch is not taken. | 3576 // result is overwritten if the branch is not taken. |
3397 __ neg_d(result, temp); | 3577 __ neg_d(result, temp); |
3398 | 3578 |
3399 // Add +0 to convert -0 to +0. | 3579 // Add +0 to convert -0 to +0. |
3400 __ add_d(result, input, kDoubleRegZero); | 3580 __ add_d(result, input, kDoubleRegZero); |
3401 __ sqrt_d(result, result); | 3581 __ sqrt_d(result, result); |
3402 __ bind(&done); | 3582 __ bind(&done); |
3403 } | 3583 } |
3404 | 3584 |
3405 | 3585 |
3406 void LCodeGen::DoPower(LPower* instr) { | 3586 void LCodeGen::DoPower(LPower* instr) { |
| 3587 CpuFeatures::Scope scope(FPU); |
3407 Representation exponent_type = instr->hydrogen()->right()->representation(); | 3588 Representation exponent_type = instr->hydrogen()->right()->representation(); |
3408 // Having marked this as a call, we can use any registers. | 3589 // Having marked this as a call, we can use any registers. |
3409 // Just make sure that the input/output registers are the expected ones. | 3590 // Just make sure that the input/output registers are the expected ones. |
3410 ASSERT(!instr->right()->IsDoubleRegister() || | 3591 ASSERT(!instr->right()->IsDoubleRegister() || |
3411 ToDoubleRegister(instr->right()).is(f4)); | 3592 ToDoubleRegister(instr->right()).is(f4)); |
3412 ASSERT(!instr->right()->IsRegister() || | 3593 ASSERT(!instr->right()->IsRegister() || |
3413 ToRegister(instr->right()).is(a2)); | 3594 ToRegister(instr->right()).is(a2)); |
3414 ASSERT(ToDoubleRegister(instr->left()).is(f2)); | 3595 ASSERT(ToDoubleRegister(instr->left()).is(f2)); |
3415 ASSERT(ToDoubleRegister(instr->result()).is(f0)); | 3596 ASSERT(ToDoubleRegister(instr->result()).is(f0)); |
3416 | 3597 |
(...skipping 10 matching lines...) Expand all Loading... |
3427 __ CallStub(&stub); | 3608 __ CallStub(&stub); |
3428 } else { | 3609 } else { |
3429 ASSERT(exponent_type.IsDouble()); | 3610 ASSERT(exponent_type.IsDouble()); |
3430 MathPowStub stub(MathPowStub::DOUBLE); | 3611 MathPowStub stub(MathPowStub::DOUBLE); |
3431 __ CallStub(&stub); | 3612 __ CallStub(&stub); |
3432 } | 3613 } |
3433 } | 3614 } |
3434 | 3615 |
3435 | 3616 |
3436 void LCodeGen::DoRandom(LRandom* instr) { | 3617 void LCodeGen::DoRandom(LRandom* instr) { |
| 3618 CpuFeatures::Scope scope(FPU); |
3437 class DeferredDoRandom: public LDeferredCode { | 3619 class DeferredDoRandom: public LDeferredCode { |
3438 public: | 3620 public: |
3439 DeferredDoRandom(LCodeGen* codegen, LRandom* instr) | 3621 DeferredDoRandom(LCodeGen* codegen, LRandom* instr) |
3440 : LDeferredCode(codegen), instr_(instr) { } | 3622 : LDeferredCode(codegen), instr_(instr) { } |
3441 virtual void Generate() { codegen()->DoDeferredRandom(instr_); } | 3623 virtual void Generate() { codegen()->DoDeferredRandom(instr_); } |
3442 virtual LInstruction* instr() { return instr_; } | 3624 virtual LInstruction* instr() { return instr_; } |
3443 private: | 3625 private: |
3444 LRandom* instr_; | 3626 LRandom* instr_; |
3445 }; | 3627 }; |
3446 | 3628 |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3503 } | 3685 } |
3504 | 3686 |
3505 void LCodeGen::DoDeferredRandom(LRandom* instr) { | 3687 void LCodeGen::DoDeferredRandom(LRandom* instr) { |
3506 __ PrepareCallCFunction(1, scratch0()); | 3688 __ PrepareCallCFunction(1, scratch0()); |
3507 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); | 3689 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); |
3508 // Return value is in v0. | 3690 // Return value is in v0. |
3509 } | 3691 } |
3510 | 3692 |
3511 | 3693 |
3512 void LCodeGen::DoMathExp(LMathExp* instr) { | 3694 void LCodeGen::DoMathExp(LMathExp* instr) { |
| 3695 CpuFeatures::Scope scope(FPU); |
3513 DoubleRegister input = ToDoubleRegister(instr->value()); | 3696 DoubleRegister input = ToDoubleRegister(instr->value()); |
3514 DoubleRegister result = ToDoubleRegister(instr->result()); | 3697 DoubleRegister result = ToDoubleRegister(instr->result()); |
3515 DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); | 3698 DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); |
3516 DoubleRegister double_scratch2 = double_scratch0(); | 3699 DoubleRegister double_scratch2 = double_scratch0(); |
3517 Register temp1 = ToRegister(instr->temp1()); | 3700 Register temp1 = ToRegister(instr->temp1()); |
3518 Register temp2 = ToRegister(instr->temp2()); | 3701 Register temp2 = ToRegister(instr->temp2()); |
3519 | 3702 |
3520 MathExpGenerator::EmitMathExp( | 3703 MathExpGenerator::EmitMathExp( |
3521 masm(), input, result, double_scratch1, double_scratch2, | 3704 masm(), input, result, double_scratch1, double_scratch2, |
3522 temp1, temp2, scratch0()); | 3705 temp1, temp2, scratch0()); |
(...skipping 275 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3798 } else { | 3981 } else { |
3799 DeoptimizeIf(hs, | 3982 DeoptimizeIf(hs, |
3800 instr->environment(), | 3983 instr->environment(), |
3801 ToRegister(instr->index()), | 3984 ToRegister(instr->index()), |
3802 Operand(ToRegister(instr->length()))); | 3985 Operand(ToRegister(instr->length()))); |
3803 } | 3986 } |
3804 } | 3987 } |
3805 | 3988 |
3806 | 3989 |
3807 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { | 3990 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { |
| 3991 CpuFeatures::Scope scope(FPU); |
3808 Register external_pointer = ToRegister(instr->elements()); | 3992 Register external_pointer = ToRegister(instr->elements()); |
3809 Register key = no_reg; | 3993 Register key = no_reg; |
3810 ElementsKind elements_kind = instr->elements_kind(); | 3994 ElementsKind elements_kind = instr->elements_kind(); |
3811 bool key_is_constant = instr->key()->IsConstantOperand(); | 3995 bool key_is_constant = instr->key()->IsConstantOperand(); |
3812 int constant_key = 0; | 3996 int constant_key = 0; |
3813 if (key_is_constant) { | 3997 if (key_is_constant) { |
3814 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); | 3998 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); |
3815 if (constant_key & 0xF0000000) { | 3999 if (constant_key & 0xF0000000) { |
3816 Abort("array index constant value too big."); | 4000 Abort("array index constant value too big."); |
3817 } | 4001 } |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3871 case DICTIONARY_ELEMENTS: | 4055 case DICTIONARY_ELEMENTS: |
3872 case NON_STRICT_ARGUMENTS_ELEMENTS: | 4056 case NON_STRICT_ARGUMENTS_ELEMENTS: |
3873 UNREACHABLE(); | 4057 UNREACHABLE(); |
3874 break; | 4058 break; |
3875 } | 4059 } |
3876 } | 4060 } |
3877 } | 4061 } |
3878 | 4062 |
3879 | 4063 |
3880 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { | 4064 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { |
| 4065 CpuFeatures::Scope scope(FPU); |
3881 DoubleRegister value = ToDoubleRegister(instr->value()); | 4066 DoubleRegister value = ToDoubleRegister(instr->value()); |
3882 Register elements = ToRegister(instr->elements()); | 4067 Register elements = ToRegister(instr->elements()); |
3883 Register key = no_reg; | 4068 Register key = no_reg; |
3884 Register scratch = scratch0(); | 4069 Register scratch = scratch0(); |
3885 bool key_is_constant = instr->key()->IsConstantOperand(); | 4070 bool key_is_constant = instr->key()->IsConstantOperand(); |
3886 int constant_key = 0; | 4071 int constant_key = 0; |
3887 Label not_nan; | 4072 Label not_nan; |
3888 | 4073 |
3889 // Calculate the effective address of the slot in the array to store the | 4074 // Calculate the effective address of the slot in the array to store the |
3890 // double value. | 4075 // double value. |
(...skipping 263 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4154 | 4339 |
4155 | 4340 |
4156 void LCodeGen::DoStringLength(LStringLength* instr) { | 4341 void LCodeGen::DoStringLength(LStringLength* instr) { |
4157 Register string = ToRegister(instr->string()); | 4342 Register string = ToRegister(instr->string()); |
4158 Register result = ToRegister(instr->result()); | 4343 Register result = ToRegister(instr->result()); |
4159 __ lw(result, FieldMemOperand(string, String::kLengthOffset)); | 4344 __ lw(result, FieldMemOperand(string, String::kLengthOffset)); |
4160 } | 4345 } |
4161 | 4346 |
4162 | 4347 |
4163 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { | 4348 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { |
| 4349 CpuFeatures::Scope scope(FPU); |
4164 LOperand* input = instr->value(); | 4350 LOperand* input = instr->value(); |
4165 ASSERT(input->IsRegister() || input->IsStackSlot()); | 4351 ASSERT(input->IsRegister() || input->IsStackSlot()); |
4166 LOperand* output = instr->result(); | 4352 LOperand* output = instr->result(); |
4167 ASSERT(output->IsDoubleRegister()); | 4353 ASSERT(output->IsDoubleRegister()); |
4168 FPURegister single_scratch = double_scratch0().low(); | 4354 FPURegister single_scratch = double_scratch0().low(); |
4169 if (input->IsStackSlot()) { | 4355 if (input->IsStackSlot()) { |
4170 Register scratch = scratch0(); | 4356 Register scratch = scratch0(); |
4171 __ lw(scratch, ToMemOperand(input)); | 4357 __ lw(scratch, ToMemOperand(input)); |
4172 __ mtc1(scratch, single_scratch); | 4358 __ mtc1(scratch, single_scratch); |
4173 } else { | 4359 } else { |
4174 __ mtc1(ToRegister(input), single_scratch); | 4360 __ mtc1(ToRegister(input), single_scratch); |
4175 } | 4361 } |
4176 __ cvt_d_w(ToDoubleRegister(output), single_scratch); | 4362 __ cvt_d_w(ToDoubleRegister(output), single_scratch); |
4177 } | 4363 } |
4178 | 4364 |
4179 | 4365 |
4180 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { | 4366 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { |
| 4367 CpuFeatures::Scope scope(FPU); |
4181 LOperand* input = instr->value(); | 4368 LOperand* input = instr->value(); |
4182 LOperand* output = instr->result(); | 4369 LOperand* output = instr->result(); |
4183 | 4370 |
4184 FPURegister dbl_scratch = double_scratch0(); | 4371 FPURegister dbl_scratch = double_scratch0(); |
4185 __ mtc1(ToRegister(input), dbl_scratch); | 4372 __ mtc1(ToRegister(input), dbl_scratch); |
4186 __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22); | 4373 __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22); |
4187 } | 4374 } |
4188 | 4375 |
4189 | 4376 |
4190 void LCodeGen::DoNumberTagI(LNumberTagI* instr) { | 4377 void LCodeGen::DoNumberTagI(LNumberTagI* instr) { |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4232 ASSERT(input->IsRegister() && input->Equals(instr->result())); | 4419 ASSERT(input->IsRegister() && input->Equals(instr->result())); |
4233 Register reg = ToRegister(input); | 4420 Register reg = ToRegister(input); |
4234 | 4421 |
4235 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr); | 4422 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr); |
4236 __ Branch(deferred->entry(), hi, reg, Operand(Smi::kMaxValue)); | 4423 __ Branch(deferred->entry(), hi, reg, Operand(Smi::kMaxValue)); |
4237 __ SmiTag(reg, reg); | 4424 __ SmiTag(reg, reg); |
4238 __ bind(deferred->exit()); | 4425 __ bind(deferred->exit()); |
4239 } | 4426 } |
4240 | 4427 |
4241 | 4428 |
| 4429 // Convert unsigned integer with specified number of leading zeroes in binary |
| 4430 // representation to IEEE 754 double. |
| 4431 // Integer to convert is passed in register hiword. |
| 4432 // Resulting double is returned in registers hiword:loword. |
| 4433 // This functions does not work correctly for 0. |
| 4434 static void GenerateUInt2Double(MacroAssembler* masm, |
| 4435 Register hiword, |
| 4436 Register loword, |
| 4437 Register scratch, |
| 4438 int leading_zeroes) { |
| 4439 const int meaningful_bits = kBitsPerInt - leading_zeroes - 1; |
| 4440 const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits; |
| 4441 |
| 4442 const int mantissa_shift_for_hi_word = |
| 4443 meaningful_bits - HeapNumber::kMantissaBitsInTopWord; |
| 4444 const int mantissa_shift_for_lo_word = |
| 4445 kBitsPerInt - mantissa_shift_for_hi_word; |
| 4446 masm->li(scratch, Operand(biased_exponent << HeapNumber::kExponentShift)); |
| 4447 if (mantissa_shift_for_hi_word > 0) { |
| 4448 masm->sll(loword, hiword, mantissa_shift_for_lo_word); |
| 4449 masm->srl(hiword, hiword, mantissa_shift_for_hi_word); |
| 4450 masm->Or(hiword, scratch, hiword); |
| 4451 } else { |
| 4452 masm->mov(loword, zero_reg); |
| 4453 masm->sll(hiword, hiword, mantissa_shift_for_hi_word); |
| 4454 masm->Or(hiword, scratch, hiword); |
| 4455 } |
| 4456 |
| 4457 // If least significant bit of biased exponent was not 1 it was corrupted |
| 4458 // by most significant bit of mantissa so we should fix that. |
| 4459 if (!(biased_exponent & 1)) { |
| 4460 masm->li(scratch, 1 << HeapNumber::kExponentShift); |
| 4461 masm->nor(scratch, scratch, scratch); |
| 4462 masm->and_(hiword, hiword, scratch); |
| 4463 } |
| 4464 } |
| 4465 |
| 4466 |
4242 void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, | 4467 void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, |
4243 LOperand* value, | 4468 LOperand* value, |
4244 IntegerSignedness signedness) { | 4469 IntegerSignedness signedness) { |
4245 Label slow; | 4470 Label slow; |
4246 Register src = ToRegister(value); | 4471 Register src = ToRegister(value); |
4247 Register dst = ToRegister(instr->result()); | 4472 Register dst = ToRegister(instr->result()); |
4248 FPURegister dbl_scratch = double_scratch0(); | 4473 DoubleRegister dbl_scratch = double_scratch0(); |
4249 | 4474 |
4250 // Preserve the value of all registers. | 4475 // Preserve the value of all registers. |
4251 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); | 4476 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); |
4252 | 4477 |
4253 Label done; | 4478 Label done; |
4254 if (signedness == SIGNED_INT32) { | 4479 if (signedness == SIGNED_INT32) { |
4255 // There was overflow, so bits 30 and 31 of the original integer | 4480 // There was overflow, so bits 30 and 31 of the original integer |
4256 // disagree. Try to allocate a heap number in new space and store | 4481 // disagree. Try to allocate a heap number in new space and store |
4257 // the value in there. If that fails, call the runtime system. | 4482 // the value in there. If that fails, call the runtime system. |
4258 if (dst.is(src)) { | 4483 if (dst.is(src)) { |
4259 __ SmiUntag(src, dst); | 4484 __ SmiUntag(src, dst); |
4260 __ Xor(src, src, Operand(0x80000000)); | 4485 __ Xor(src, src, Operand(0x80000000)); |
4261 } | 4486 } |
4262 __ mtc1(src, dbl_scratch); | 4487 if (CpuFeatures::IsSupported(FPU)) { |
4263 __ cvt_d_w(dbl_scratch, dbl_scratch); | 4488 CpuFeatures::Scope scope(FPU); |
| 4489 __ mtc1(src, dbl_scratch); |
| 4490 __ cvt_d_w(dbl_scratch, dbl_scratch); |
| 4491 } else { |
| 4492 FloatingPointHelper::Destination dest = |
| 4493 FloatingPointHelper::kCoreRegisters; |
| 4494 FloatingPointHelper::ConvertIntToDouble(masm(), src, dest, f0, |
| 4495 sfpd_lo, sfpd_hi, |
| 4496 scratch0(), f2); |
| 4497 } |
4264 } else { | 4498 } else { |
4265 __ mtc1(src, dbl_scratch); | 4499 if (CpuFeatures::IsSupported(FPU)) { |
4266 __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22); | 4500 CpuFeatures::Scope scope(FPU); |
| 4501 __ mtc1(src, dbl_scratch); |
| 4502 __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22); |
| 4503 } else { |
| 4504 Label no_leading_zero, done; |
| 4505 __ And(at, src, Operand(0x80000000)); |
| 4506 __ Branch(&no_leading_zero, ne, at, Operand(zero_reg)); |
| 4507 |
| 4508 // Integer has one leading zeros. |
| 4509 GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, t0, 1); |
| 4510 __ Branch(&done); |
| 4511 |
| 4512 __ bind(&no_leading_zero); |
| 4513 GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, t0, 0); |
| 4514 __ Branch(&done); |
| 4515 } |
4267 } | 4516 } |
4268 | 4517 |
4269 if (FLAG_inline_new) { | 4518 if (FLAG_inline_new) { |
4270 __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex); | 4519 __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex); |
4271 __ AllocateHeapNumber(t1, a3, t0, t2, &slow, DONT_TAG_RESULT); | 4520 __ AllocateHeapNumber(t1, a3, t0, scratch0(), &slow, DONT_TAG_RESULT); |
4272 __ Move(dst, t1); | 4521 __ Move(dst, t1); |
4273 __ Branch(&done); | 4522 __ Branch(&done); |
4274 } | 4523 } |
4275 | 4524 |
4276 // Slow case: Call the runtime system to do the number allocation. | 4525 // Slow case: Call the runtime system to do the number allocation. |
4277 __ bind(&slow); | 4526 __ bind(&slow); |
4278 | 4527 |
4279 // TODO(3095996): Put a valid pointer value in the stack slot where the result | 4528 // TODO(3095996): Put a valid pointer value in the stack slot where the result |
4280 // register is stored, as this register is in the pointer map, but contains an | 4529 // register is stored, as this register is in the pointer map, but contains an |
4281 // integer value. | 4530 // integer value. |
4282 __ StoreToSafepointRegisterSlot(zero_reg, dst); | 4531 __ StoreToSafepointRegisterSlot(zero_reg, dst); |
4283 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); | 4532 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); |
4284 __ Move(dst, v0); | 4533 __ Move(dst, v0); |
4285 __ Subu(dst, dst, kHeapObjectTag); | 4534 __ Subu(dst, dst, kHeapObjectTag); |
4286 | 4535 |
4287 // Done. Put the value in dbl_scratch into the value of the allocated heap | 4536 // Done. Put the value in dbl_scratch into the value of the allocated heap |
4288 // number. | 4537 // number. |
4289 __ bind(&done); | 4538 __ bind(&done); |
4290 __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset)); | 4539 if (CpuFeatures::IsSupported(FPU)) { |
| 4540 CpuFeatures::Scope scope(FPU); |
| 4541 __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset)); |
| 4542 } else { |
| 4543 __ sw(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset)); |
| 4544 __ sw(sfpd_hi, MemOperand(dst, HeapNumber::kExponentOffset)); |
| 4545 } |
4291 __ Addu(dst, dst, kHeapObjectTag); | 4546 __ Addu(dst, dst, kHeapObjectTag); |
4292 __ StoreToSafepointRegisterSlot(dst, dst); | 4547 __ StoreToSafepointRegisterSlot(dst, dst); |
4293 } | 4548 } |
4294 | 4549 |
4295 | 4550 |
4296 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { | 4551 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { |
4297 class DeferredNumberTagD: public LDeferredCode { | 4552 class DeferredNumberTagD: public LDeferredCode { |
4298 public: | 4553 public: |
4299 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) | 4554 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) |
4300 : LDeferredCode(codegen), instr_(instr) { } | 4555 : LDeferredCode(codegen), instr_(instr) { } |
(...skipping 12 matching lines...) Expand all Loading... |
4313 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); | 4568 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); |
4314 if (FLAG_inline_new) { | 4569 if (FLAG_inline_new) { |
4315 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); | 4570 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); |
4316 // We want the untagged address first for performance | 4571 // We want the untagged address first for performance |
4317 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(), | 4572 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(), |
4318 DONT_TAG_RESULT); | 4573 DONT_TAG_RESULT); |
4319 } else { | 4574 } else { |
4320 __ Branch(deferred->entry()); | 4575 __ Branch(deferred->entry()); |
4321 } | 4576 } |
4322 __ bind(deferred->exit()); | 4577 __ bind(deferred->exit()); |
4323 __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset)); | 4578 if (CpuFeatures::IsSupported(FPU)) { |
| 4579 CpuFeatures::Scope scope(FPU); |
| 4580 __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset)); |
| 4581 } else { |
| 4582 __ sw(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset)); |
| 4583 __ sw(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize)); |
| 4584 } |
4324 // Now that we have finished with the object's real address tag it | 4585 // Now that we have finished with the object's real address tag it |
4325 __ Addu(reg, reg, kHeapObjectTag); | 4586 __ Addu(reg, reg, kHeapObjectTag); |
4326 } | 4587 } |
4327 | 4588 |
4328 | 4589 |
4329 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { | 4590 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { |
4330 // TODO(3095996): Get rid of this. For now, we need to make the | 4591 // TODO(3095996): Get rid of this. For now, we need to make the |
4331 // result register contain a valid pointer because it is already | 4592 // result register contain a valid pointer because it is already |
4332 // contained in the register pointer map. | 4593 // contained in the register pointer map. |
4333 Register reg = ToRegister(instr->result()); | 4594 Register reg = ToRegister(instr->result()); |
(...skipping 27 matching lines...) Expand all Loading... |
4361 } | 4622 } |
4362 } | 4623 } |
4363 | 4624 |
4364 | 4625 |
4365 void LCodeGen::EmitNumberUntagD(Register input_reg, | 4626 void LCodeGen::EmitNumberUntagD(Register input_reg, |
4366 DoubleRegister result_reg, | 4627 DoubleRegister result_reg, |
4367 bool deoptimize_on_undefined, | 4628 bool deoptimize_on_undefined, |
4368 bool deoptimize_on_minus_zero, | 4629 bool deoptimize_on_minus_zero, |
4369 LEnvironment* env) { | 4630 LEnvironment* env) { |
4370 Register scratch = scratch0(); | 4631 Register scratch = scratch0(); |
| 4632 CpuFeatures::Scope scope(FPU); |
4371 | 4633 |
4372 Label load_smi, heap_number, done; | 4634 Label load_smi, heap_number, done; |
4373 | 4635 |
4374 // Smi check. | 4636 // Smi check. |
4375 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); | 4637 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); |
4376 | 4638 |
4377 // Heap number map check. | 4639 // Heap number map check. |
4378 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 4640 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
4379 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | 4641 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
4380 if (deoptimize_on_undefined) { | 4642 if (deoptimize_on_undefined) { |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4425 Label done; | 4687 Label done; |
4426 | 4688 |
4427 // The input is a tagged HeapObject. | 4689 // The input is a tagged HeapObject. |
4428 // Heap number map check. | 4690 // Heap number map check. |
4429 __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 4691 __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
4430 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | 4692 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
4431 // This 'at' value and scratch1 map value are used for tests in both clauses | 4693 // This 'at' value and scratch1 map value are used for tests in both clauses |
4432 // of the if. | 4694 // of the if. |
4433 | 4695 |
4434 if (instr->truncating()) { | 4696 if (instr->truncating()) { |
| 4697 CpuFeatures::Scope scope(FPU); |
4435 Register scratch3 = ToRegister(instr->temp2()); | 4698 Register scratch3 = ToRegister(instr->temp2()); |
4436 FPURegister single_scratch = double_scratch.low(); | 4699 FPURegister single_scratch = double_scratch.low(); |
4437 ASSERT(!scratch3.is(input_reg) && | 4700 ASSERT(!scratch3.is(input_reg) && |
4438 !scratch3.is(scratch1) && | 4701 !scratch3.is(scratch1) && |
4439 !scratch3.is(scratch2)); | 4702 !scratch3.is(scratch2)); |
4440 // Performs a truncating conversion of a floating point number as used by | 4703 // Performs a truncating conversion of a floating point number as used by |
4441 // the JS bitwise operations. | 4704 // the JS bitwise operations. |
4442 Label heap_number; | 4705 Label heap_number; |
4443 __ Branch(&heap_number, eq, scratch1, Operand(at)); // HeapNumber map? | 4706 __ Branch(&heap_number, eq, scratch1, Operand(at)); // HeapNumber map? |
4444 // Check for undefined. Undefined is converted to zero for truncating | 4707 // Check for undefined. Undefined is converted to zero for truncating |
(...skipping 216 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4661 __ CompareMapAndBranch( | 4924 __ CompareMapAndBranch( |
4662 reg, scratch, map, &success, eq, &success, REQUIRE_EXACT_MAP); | 4925 reg, scratch, map, &success, eq, &success, REQUIRE_EXACT_MAP); |
4663 } | 4926 } |
4664 Handle<Map> map = map_set->last(); | 4927 Handle<Map> map = map_set->last(); |
4665 DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment()); | 4928 DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment()); |
4666 __ bind(&success); | 4929 __ bind(&success); |
4667 } | 4930 } |
4668 | 4931 |
4669 | 4932 |
4670 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | 4933 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
| 4934 CpuFeatures::Scope vfp_scope(FPU); |
4671 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); | 4935 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); |
4672 Register result_reg = ToRegister(instr->result()); | 4936 Register result_reg = ToRegister(instr->result()); |
4673 DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); | 4937 DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); |
4674 __ ClampDoubleToUint8(result_reg, value_reg, temp_reg); | 4938 __ ClampDoubleToUint8(result_reg, value_reg, temp_reg); |
4675 } | 4939 } |
4676 | 4940 |
4677 | 4941 |
4678 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { | 4942 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { |
| 4943 CpuFeatures::Scope vfp_scope(FPU); |
4679 Register unclamped_reg = ToRegister(instr->unclamped()); | 4944 Register unclamped_reg = ToRegister(instr->unclamped()); |
4680 Register result_reg = ToRegister(instr->result()); | 4945 Register result_reg = ToRegister(instr->result()); |
4681 __ ClampUint8(result_reg, unclamped_reg); | 4946 __ ClampUint8(result_reg, unclamped_reg); |
4682 } | 4947 } |
4683 | 4948 |
4684 | 4949 |
4685 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { | 4950 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { |
| 4951 CpuFeatures::Scope vfp_scope(FPU); |
4686 Register scratch = scratch0(); | 4952 Register scratch = scratch0(); |
4687 Register input_reg = ToRegister(instr->unclamped()); | 4953 Register input_reg = ToRegister(instr->unclamped()); |
4688 Register result_reg = ToRegister(instr->result()); | 4954 Register result_reg = ToRegister(instr->result()); |
4689 DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); | 4955 DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); |
4690 Label is_smi, done, heap_number; | 4956 Label is_smi, done, heap_number; |
4691 | 4957 |
4692 // Both smi and heap number cases are handled. | 4958 // Both smi and heap number cases are handled. |
4693 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi); | 4959 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi); |
4694 | 4960 |
4695 // Check for heap number | 4961 // Check for heap number |
(...skipping 601 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5297 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | 5563 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
5298 __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset)); | 5564 __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset)); |
5299 | 5565 |
5300 // Check the marker in the calling frame. | 5566 // Check the marker in the calling frame. |
5301 __ bind(&check_frame_marker); | 5567 __ bind(&check_frame_marker); |
5302 __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset)); | 5568 __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset)); |
5303 } | 5569 } |
5304 | 5570 |
5305 | 5571 |
5306 void LCodeGen::EnsureSpaceForLazyDeopt() { | 5572 void LCodeGen::EnsureSpaceForLazyDeopt() { |
| 5573 if (info()->IsStub()) return; |
5307 // Ensure that we have enough space after the previous lazy-bailout | 5574 // Ensure that we have enough space after the previous lazy-bailout |
5308 // instruction for patching the code here. | 5575 // instruction for patching the code here. |
5309 int current_pc = masm()->pc_offset(); | 5576 int current_pc = masm()->pc_offset(); |
5310 int patch_size = Deoptimizer::patch_size(); | 5577 int patch_size = Deoptimizer::patch_size(); |
5311 if (current_pc < last_lazy_deopt_pc_ + patch_size) { | 5578 if (current_pc < last_lazy_deopt_pc_ + patch_size) { |
5312 int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc; | 5579 int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc; |
5313 ASSERT_EQ(0, padding_size % Assembler::kInstrSize); | 5580 ASSERT_EQ(0, padding_size % Assembler::kInstrSize); |
5314 while (padding_size > 0) { | 5581 while (padding_size > 0) { |
5315 __ nop(); | 5582 __ nop(); |
5316 padding_size -= Assembler::kInstrSize; | 5583 padding_size -= Assembler::kInstrSize; |
(...skipping 204 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5521 __ Subu(scratch, result, scratch); | 5788 __ Subu(scratch, result, scratch); |
5522 __ lw(result, FieldMemOperand(scratch, | 5789 __ lw(result, FieldMemOperand(scratch, |
5523 FixedArray::kHeaderSize - kPointerSize)); | 5790 FixedArray::kHeaderSize - kPointerSize)); |
5524 __ bind(&done); | 5791 __ bind(&done); |
5525 } | 5792 } |
5526 | 5793 |
5527 | 5794 |
5528 #undef __ | 5795 #undef __ |
5529 | 5796 |
5530 } } // namespace v8::internal | 5797 } } // namespace v8::internal |
OLD | NEW |