OLD | NEW |
| (Empty) |
1 // Copyright 2013 the V8 project authors. All rights reserved. | |
2 // Redistribution and use in source and binary forms, with or without | |
3 // modification, are permitted provided that the following conditions are | |
4 // met: | |
5 // | |
6 // * Redistributions of source code must retain the above copyright | |
7 // notice, this list of conditions and the following disclaimer. | |
8 // * Redistributions in binary form must reproduce the above | |
9 // copyright notice, this list of conditions and the following | |
10 // disclaimer in the documentation and/or other materials provided | |
11 // with the distribution. | |
12 // * Neither the name of Google Inc. nor the names of its | |
13 // contributors may be used to endorse or promote products derived | |
14 // from this software without specific prior written permission. | |
15 // | |
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
27 | |
28 #include "v8.h" | |
29 | |
30 #include "a64/lithium-codegen-a64.h" | |
31 #include "a64/lithium-gap-resolver-a64.h" | |
32 #include "code-stubs.h" | |
33 #include "stub-cache.h" | |
34 #include "hydrogen-osr.h" | |
35 | |
36 namespace v8 { | |
37 namespace internal { | |
38 | |
39 | |
40 class SafepointGenerator V8_FINAL : public CallWrapper { | |
41 public: | |
42 SafepointGenerator(LCodeGen* codegen, | |
43 LPointerMap* pointers, | |
44 Safepoint::DeoptMode mode) | |
45 : codegen_(codegen), | |
46 pointers_(pointers), | |
47 deopt_mode_(mode) { } | |
48 virtual ~SafepointGenerator() { } | |
49 | |
50 virtual void BeforeCall(int call_size) const { } | |
51 | |
52 virtual void AfterCall() const { | |
53 codegen_->RecordSafepoint(pointers_, deopt_mode_); | |
54 } | |
55 | |
56 private: | |
57 LCodeGen* codegen_; | |
58 LPointerMap* pointers_; | |
59 Safepoint::DeoptMode deopt_mode_; | |
60 }; | |
61 | |
62 | |
63 #define __ masm()-> | |
64 | |
65 // Emit code to branch if the given condition holds. | |
66 // The code generated here doesn't modify the flags and they must have | |
67 // been set by some prior instructions. | |
68 // | |
69 // The EmitInverted function simply inverts the condition. | |
70 class BranchOnCondition : public BranchGenerator { | |
71 public: | |
72 BranchOnCondition(LCodeGen* codegen, Condition cond) | |
73 : BranchGenerator(codegen), | |
74 cond_(cond) { } | |
75 | |
76 virtual void Emit(Label* label) const { | |
77 __ B(cond_, label); | |
78 } | |
79 | |
80 virtual void EmitInverted(Label* label) const { | |
81 if (cond_ != al) { | |
82 __ B(InvertCondition(cond_), label); | |
83 } | |
84 } | |
85 | |
86 private: | |
87 Condition cond_; | |
88 }; | |
89 | |
90 | |
91 // Emit code to compare lhs and rhs and branch if the condition holds. | |
92 // This uses MacroAssembler's CompareAndBranch function so it will handle | |
93 // converting the comparison to Cbz/Cbnz if the right-hand side is 0. | |
94 // | |
95 // EmitInverted still compares the two operands but inverts the condition. | |
96 class CompareAndBranch : public BranchGenerator { | |
97 public: | |
98 CompareAndBranch(LCodeGen* codegen, | |
99 Condition cond, | |
100 const Register& lhs, | |
101 const Operand& rhs) | |
102 : BranchGenerator(codegen), | |
103 cond_(cond), | |
104 lhs_(lhs), | |
105 rhs_(rhs) { } | |
106 | |
107 virtual void Emit(Label* label) const { | |
108 __ CompareAndBranch(lhs_, rhs_, cond_, label); | |
109 } | |
110 | |
111 virtual void EmitInverted(Label* label) const { | |
112 __ CompareAndBranch(lhs_, rhs_, InvertCondition(cond_), label); | |
113 } | |
114 | |
115 private: | |
116 Condition cond_; | |
117 const Register& lhs_; | |
118 const Operand& rhs_; | |
119 }; | |
120 | |
121 | |
122 // Test the input with the given mask and branch if the condition holds. | |
123 // If the condition is 'eq' or 'ne' this will use MacroAssembler's | |
124 // TestAndBranchIfAllClear and TestAndBranchIfAnySet so it will handle the | |
125 // conversion to Tbz/Tbnz when possible. | |
126 class TestAndBranch : public BranchGenerator { | |
127 public: | |
128 TestAndBranch(LCodeGen* codegen, | |
129 Condition cond, | |
130 const Register& value, | |
131 uint64_t mask) | |
132 : BranchGenerator(codegen), | |
133 cond_(cond), | |
134 value_(value), | |
135 mask_(mask) { } | |
136 | |
137 virtual void Emit(Label* label) const { | |
138 switch (cond_) { | |
139 case eq: | |
140 __ TestAndBranchIfAllClear(value_, mask_, label); | |
141 break; | |
142 case ne: | |
143 __ TestAndBranchIfAnySet(value_, mask_, label); | |
144 break; | |
145 default: | |
146 __ Tst(value_, mask_); | |
147 __ B(cond_, label); | |
148 } | |
149 } | |
150 | |
151 virtual void EmitInverted(Label* label) const { | |
152 // The inverse of "all clear" is "any set" and vice versa. | |
153 switch (cond_) { | |
154 case eq: | |
155 __ TestAndBranchIfAnySet(value_, mask_, label); | |
156 break; | |
157 case ne: | |
158 __ TestAndBranchIfAllClear(value_, mask_, label); | |
159 break; | |
160 default: | |
161 __ Tst(value_, mask_); | |
162 __ B(InvertCondition(cond_), label); | |
163 } | |
164 } | |
165 | |
166 private: | |
167 Condition cond_; | |
168 const Register& value_; | |
169 uint64_t mask_; | |
170 }; | |
171 | |
172 | |
173 // Test the input and branch if it is non-zero and not a NaN. | |
174 class BranchIfNonZeroNumber : public BranchGenerator { | |
175 public: | |
176 BranchIfNonZeroNumber(LCodeGen* codegen, const FPRegister& value, | |
177 const FPRegister& scratch) | |
178 : BranchGenerator(codegen), value_(value), scratch_(scratch) { } | |
179 | |
180 virtual void Emit(Label* label) const { | |
181 __ Fabs(scratch_, value_); | |
182 // Compare with 0.0. Because scratch_ is positive, the result can be one of | |
183 // nZCv (equal), nzCv (greater) or nzCV (unordered). | |
184 __ Fcmp(scratch_, 0.0); | |
185 __ B(gt, label); | |
186 } | |
187 | |
188 virtual void EmitInverted(Label* label) const { | |
189 __ Fabs(scratch_, value_); | |
190 __ Fcmp(scratch_, 0.0); | |
191 __ B(le, label); | |
192 } | |
193 | |
194 private: | |
195 const FPRegister& value_; | |
196 const FPRegister& scratch_; | |
197 }; | |
198 | |
199 | |
200 // Test the input and branch if it is a heap number. | |
201 class BranchIfHeapNumber : public BranchGenerator { | |
202 public: | |
203 BranchIfHeapNumber(LCodeGen* codegen, const Register& value) | |
204 : BranchGenerator(codegen), value_(value) { } | |
205 | |
206 virtual void Emit(Label* label) const { | |
207 __ JumpIfHeapNumber(value_, label); | |
208 } | |
209 | |
210 virtual void EmitInverted(Label* label) const { | |
211 __ JumpIfNotHeapNumber(value_, label); | |
212 } | |
213 | |
214 private: | |
215 const Register& value_; | |
216 }; | |
217 | |
218 | |
219 // Test the input and branch if it is the specified root value. | |
220 class BranchIfRoot : public BranchGenerator { | |
221 public: | |
222 BranchIfRoot(LCodeGen* codegen, const Register& value, | |
223 Heap::RootListIndex index) | |
224 : BranchGenerator(codegen), value_(value), index_(index) { } | |
225 | |
226 virtual void Emit(Label* label) const { | |
227 __ JumpIfRoot(value_, index_, label); | |
228 } | |
229 | |
230 virtual void EmitInverted(Label* label) const { | |
231 __ JumpIfNotRoot(value_, index_, label); | |
232 } | |
233 | |
234 private: | |
235 const Register& value_; | |
236 const Heap::RootListIndex index_; | |
237 }; | |
238 | |
239 | |
240 void LCodeGen::WriteTranslation(LEnvironment* environment, | |
241 Translation* translation) { | |
242 if (environment == NULL) return; | |
243 | |
244 // The translation includes one command per value in the environment. | |
245 int translation_size = environment->translation_size(); | |
246 // The output frame height does not include the parameters. | |
247 int height = translation_size - environment->parameter_count(); | |
248 | |
249 WriteTranslation(environment->outer(), translation); | |
250 bool has_closure_id = !info()->closure().is_null() && | |
251 !info()->closure().is_identical_to(environment->closure()); | |
252 int closure_id = has_closure_id | |
253 ? DefineDeoptimizationLiteral(environment->closure()) | |
254 : Translation::kSelfLiteralId; | |
255 | |
256 switch (environment->frame_type()) { | |
257 case JS_FUNCTION: | |
258 translation->BeginJSFrame(environment->ast_id(), closure_id, height); | |
259 break; | |
260 case JS_CONSTRUCT: | |
261 translation->BeginConstructStubFrame(closure_id, translation_size); | |
262 break; | |
263 case JS_GETTER: | |
264 ASSERT(translation_size == 1); | |
265 ASSERT(height == 0); | |
266 translation->BeginGetterStubFrame(closure_id); | |
267 break; | |
268 case JS_SETTER: | |
269 ASSERT(translation_size == 2); | |
270 ASSERT(height == 0); | |
271 translation->BeginSetterStubFrame(closure_id); | |
272 break; | |
273 case STUB: | |
274 translation->BeginCompiledStubFrame(); | |
275 break; | |
276 case ARGUMENTS_ADAPTOR: | |
277 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); | |
278 break; | |
279 default: | |
280 UNREACHABLE(); | |
281 } | |
282 | |
283 int object_index = 0; | |
284 int dematerialized_index = 0; | |
285 for (int i = 0; i < translation_size; ++i) { | |
286 LOperand* value = environment->values()->at(i); | |
287 | |
288 AddToTranslation(environment, | |
289 translation, | |
290 value, | |
291 environment->HasTaggedValueAt(i), | |
292 environment->HasUint32ValueAt(i), | |
293 &object_index, | |
294 &dematerialized_index); | |
295 } | |
296 } | |
297 | |
298 | |
299 void LCodeGen::AddToTranslation(LEnvironment* environment, | |
300 Translation* translation, | |
301 LOperand* op, | |
302 bool is_tagged, | |
303 bool is_uint32, | |
304 int* object_index_pointer, | |
305 int* dematerialized_index_pointer) { | |
306 if (op == LEnvironment::materialization_marker()) { | |
307 int object_index = (*object_index_pointer)++; | |
308 if (environment->ObjectIsDuplicateAt(object_index)) { | |
309 int dupe_of = environment->ObjectDuplicateOfAt(object_index); | |
310 translation->DuplicateObject(dupe_of); | |
311 return; | |
312 } | |
313 int object_length = environment->ObjectLengthAt(object_index); | |
314 if (environment->ObjectIsArgumentsAt(object_index)) { | |
315 translation->BeginArgumentsObject(object_length); | |
316 } else { | |
317 translation->BeginCapturedObject(object_length); | |
318 } | |
319 int dematerialized_index = *dematerialized_index_pointer; | |
320 int env_offset = environment->translation_size() + dematerialized_index; | |
321 *dematerialized_index_pointer += object_length; | |
322 for (int i = 0; i < object_length; ++i) { | |
323 LOperand* value = environment->values()->at(env_offset + i); | |
324 AddToTranslation(environment, | |
325 translation, | |
326 value, | |
327 environment->HasTaggedValueAt(env_offset + i), | |
328 environment->HasUint32ValueAt(env_offset + i), | |
329 object_index_pointer, | |
330 dematerialized_index_pointer); | |
331 } | |
332 return; | |
333 } | |
334 | |
335 if (op->IsStackSlot()) { | |
336 if (is_tagged) { | |
337 translation->StoreStackSlot(op->index()); | |
338 } else if (is_uint32) { | |
339 translation->StoreUint32StackSlot(op->index()); | |
340 } else { | |
341 translation->StoreInt32StackSlot(op->index()); | |
342 } | |
343 } else if (op->IsDoubleStackSlot()) { | |
344 translation->StoreDoubleStackSlot(op->index()); | |
345 } else if (op->IsRegister()) { | |
346 Register reg = ToRegister(op); | |
347 if (is_tagged) { | |
348 translation->StoreRegister(reg); | |
349 } else if (is_uint32) { | |
350 translation->StoreUint32Register(reg); | |
351 } else { | |
352 translation->StoreInt32Register(reg); | |
353 } | |
354 } else if (op->IsDoubleRegister()) { | |
355 DoubleRegister reg = ToDoubleRegister(op); | |
356 translation->StoreDoubleRegister(reg); | |
357 } else if (op->IsConstantOperand()) { | |
358 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); | |
359 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate())); | |
360 translation->StoreLiteral(src_index); | |
361 } else { | |
362 UNREACHABLE(); | |
363 } | |
364 } | |
365 | |
366 | |
367 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) { | |
368 int result = deoptimization_literals_.length(); | |
369 for (int i = 0; i < deoptimization_literals_.length(); ++i) { | |
370 if (deoptimization_literals_[i].is_identical_to(literal)) return i; | |
371 } | |
372 deoptimization_literals_.Add(literal, zone()); | |
373 return result; | |
374 } | |
375 | |
376 | |
377 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, | |
378 Safepoint::DeoptMode mode) { | |
379 if (!environment->HasBeenRegistered()) { | |
380 int frame_count = 0; | |
381 int jsframe_count = 0; | |
382 for (LEnvironment* e = environment; e != NULL; e = e->outer()) { | |
383 ++frame_count; | |
384 if (e->frame_type() == JS_FUNCTION) { | |
385 ++jsframe_count; | |
386 } | |
387 } | |
388 Translation translation(&translations_, frame_count, jsframe_count, zone()); | |
389 WriteTranslation(environment, &translation); | |
390 int deoptimization_index = deoptimizations_.length(); | |
391 int pc_offset = masm()->pc_offset(); | |
392 environment->Register(deoptimization_index, | |
393 translation.index(), | |
394 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); | |
395 deoptimizations_.Add(environment, zone()); | |
396 } | |
397 } | |
398 | |
399 | |
400 void LCodeGen::CallCode(Handle<Code> code, | |
401 RelocInfo::Mode mode, | |
402 LInstruction* instr) { | |
403 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); | |
404 } | |
405 | |
406 | |
407 void LCodeGen::CallCodeGeneric(Handle<Code> code, | |
408 RelocInfo::Mode mode, | |
409 LInstruction* instr, | |
410 SafepointMode safepoint_mode) { | |
411 ASSERT(instr != NULL); | |
412 | |
413 Assembler::BlockPoolsScope scope(masm_); | |
414 __ Call(code, mode); | |
415 RecordSafepointWithLazyDeopt(instr, safepoint_mode); | |
416 | |
417 if ((code->kind() == Code::BINARY_OP_IC) || | |
418 (code->kind() == Code::COMPARE_IC)) { | |
419 // Signal that we don't inline smi code before these stubs in the | |
420 // optimizing code generator. | |
421 InlineSmiCheckInfo::EmitNotInlined(masm()); | |
422 } | |
423 } | |
424 | |
425 | |
426 void LCodeGen::DoCallFunction(LCallFunction* instr) { | |
427 ASSERT(ToRegister(instr->context()).is(cp)); | |
428 ASSERT(ToRegister(instr->function()).Is(x1)); | |
429 ASSERT(ToRegister(instr->result()).Is(x0)); | |
430 | |
431 int arity = instr->arity(); | |
432 CallFunctionStub stub(arity, instr->hydrogen()->function_flags()); | |
433 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); | |
434 } | |
435 | |
436 | |
437 void LCodeGen::DoCallNew(LCallNew* instr) { | |
438 ASSERT(ToRegister(instr->context()).is(cp)); | |
439 ASSERT(instr->IsMarkedAsCall()); | |
440 ASSERT(ToRegister(instr->constructor()).is(x1)); | |
441 | |
442 __ Mov(x0, instr->arity()); | |
443 // No cell in x2 for construct type feedback in optimized code. | |
444 __ LoadRoot(x2, Heap::kUndefinedValueRootIndex); | |
445 | |
446 CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); | |
447 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); | |
448 | |
449 ASSERT(ToRegister(instr->result()).is(x0)); | |
450 } | |
451 | |
452 | |
453 void LCodeGen::DoCallNewArray(LCallNewArray* instr) { | |
454 ASSERT(instr->IsMarkedAsCall()); | |
455 ASSERT(ToRegister(instr->context()).is(cp)); | |
456 ASSERT(ToRegister(instr->constructor()).is(x1)); | |
457 | |
458 __ Mov(x0, Operand(instr->arity())); | |
459 __ LoadRoot(x2, Heap::kUndefinedValueRootIndex); | |
460 | |
461 ElementsKind kind = instr->hydrogen()->elements_kind(); | |
462 AllocationSiteOverrideMode override_mode = | |
463 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) | |
464 ? DISABLE_ALLOCATION_SITES | |
465 : DONT_OVERRIDE; | |
466 | |
467 if (instr->arity() == 0) { | |
468 ArrayNoArgumentConstructorStub stub(kind, override_mode); | |
469 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); | |
470 } else if (instr->arity() == 1) { | |
471 Label done; | |
472 if (IsFastPackedElementsKind(kind)) { | |
473 Label packed_case; | |
474 | |
475 // We might need to create a holey array; look at the first argument. | |
476 __ Peek(x10, 0); | |
477 __ Cbz(x10, &packed_case); | |
478 | |
479 ElementsKind holey_kind = GetHoleyElementsKind(kind); | |
480 ArraySingleArgumentConstructorStub stub(holey_kind, override_mode); | |
481 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); | |
482 __ B(&done); | |
483 __ Bind(&packed_case); | |
484 } | |
485 | |
486 ArraySingleArgumentConstructorStub stub(kind, override_mode); | |
487 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); | |
488 __ Bind(&done); | |
489 } else { | |
490 ArrayNArgumentsConstructorStub stub(kind, override_mode); | |
491 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); | |
492 } | |
493 | |
494 ASSERT(ToRegister(instr->result()).is(x0)); | |
495 } | |
496 | |
497 | |
498 void LCodeGen::CallRuntime(const Runtime::Function* function, | |
499 int num_arguments, | |
500 LInstruction* instr, | |
501 SaveFPRegsMode save_doubles) { | |
502 ASSERT(instr != NULL); | |
503 | |
504 __ CallRuntime(function, num_arguments, save_doubles); | |
505 | |
506 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); | |
507 } | |
508 | |
509 | |
510 void LCodeGen::LoadContextFromDeferred(LOperand* context) { | |
511 if (context->IsRegister()) { | |
512 __ Mov(cp, ToRegister(context)); | |
513 } else if (context->IsStackSlot()) { | |
514 __ Ldr(cp, ToMemOperand(context)); | |
515 } else if (context->IsConstantOperand()) { | |
516 HConstant* constant = | |
517 chunk_->LookupConstant(LConstantOperand::cast(context)); | |
518 __ LoadHeapObject(cp, | |
519 Handle<HeapObject>::cast(constant->handle(isolate()))); | |
520 } else { | |
521 UNREACHABLE(); | |
522 } | |
523 } | |
524 | |
525 | |
526 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, | |
527 int argc, | |
528 LInstruction* instr, | |
529 LOperand* context) { | |
530 LoadContextFromDeferred(context); | |
531 __ CallRuntimeSaveDoubles(id); | |
532 RecordSafepointWithRegisters( | |
533 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); | |
534 } | |
535 | |
536 | |
537 void LCodeGen::RecordAndWritePosition(int position) { | |
538 if (position == RelocInfo::kNoPosition) return; | |
539 masm()->positions_recorder()->RecordPosition(position); | |
540 masm()->positions_recorder()->WriteRecordedPositions(); | |
541 } | |
542 | |
543 | |
544 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr, | |
545 SafepointMode safepoint_mode) { | |
546 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { | |
547 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); | |
548 } else { | |
549 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); | |
550 RecordSafepointWithRegisters( | |
551 instr->pointer_map(), 0, Safepoint::kLazyDeopt); | |
552 } | |
553 } | |
554 | |
555 | |
556 void LCodeGen::RecordSafepoint(LPointerMap* pointers, | |
557 Safepoint::Kind kind, | |
558 int arguments, | |
559 Safepoint::DeoptMode deopt_mode) { | |
560 ASSERT(expected_safepoint_kind_ == kind); | |
561 | |
562 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); | |
563 Safepoint safepoint = safepoints_.DefineSafepoint( | |
564 masm(), kind, arguments, deopt_mode); | |
565 | |
566 for (int i = 0; i < operands->length(); i++) { | |
567 LOperand* pointer = operands->at(i); | |
568 if (pointer->IsStackSlot()) { | |
569 safepoint.DefinePointerSlot(pointer->index(), zone()); | |
570 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { | |
571 safepoint.DefinePointerRegister(ToRegister(pointer), zone()); | |
572 } | |
573 } | |
574 | |
575 if (kind & Safepoint::kWithRegisters) { | |
576 // Register cp always contains a pointer to the context. | |
577 safepoint.DefinePointerRegister(cp, zone()); | |
578 } | |
579 } | |
580 | |
581 void LCodeGen::RecordSafepoint(LPointerMap* pointers, | |
582 Safepoint::DeoptMode deopt_mode) { | |
583 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode); | |
584 } | |
585 | |
586 | |
587 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { | |
588 LPointerMap empty_pointers(zone()); | |
589 RecordSafepoint(&empty_pointers, deopt_mode); | |
590 } | |
591 | |
592 | |
593 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, | |
594 int arguments, | |
595 Safepoint::DeoptMode deopt_mode) { | |
596 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode); | |
597 } | |
598 | |
599 | |
600 void LCodeGen::RecordSafepointWithRegistersAndDoubles( | |
601 LPointerMap* pointers, int arguments, Safepoint::DeoptMode deopt_mode) { | |
602 RecordSafepoint( | |
603 pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode); | |
604 } | |
605 | |
606 | |
607 bool LCodeGen::GenerateCode() { | |
608 LPhase phase("Z_Code generation", chunk()); | |
609 ASSERT(is_unused()); | |
610 status_ = GENERATING; | |
611 | |
612 // Open a frame scope to indicate that there is a frame on the stack. The | |
613 // NONE indicates that the scope shouldn't actually generate code to set up | |
614 // the frame (that is done in GeneratePrologue). | |
615 FrameScope frame_scope(masm_, StackFrame::NONE); | |
616 | |
617 return GeneratePrologue() && | |
618 GenerateBody() && | |
619 GenerateDeferredCode() && | |
620 GenerateDeoptJumpTable() && | |
621 GenerateSafepointTable(); | |
622 } | |
623 | |
624 | |
625 void LCodeGen::SaveCallerDoubles() { | |
626 ASSERT(info()->saves_caller_doubles()); | |
627 ASSERT(NeedsEagerFrame()); | |
628 Comment(";;; Save clobbered callee double registers"); | |
629 BitVector* doubles = chunk()->allocated_double_registers(); | |
630 BitVector::Iterator iterator(doubles); | |
631 int count = 0; | |
632 while (!iterator.Done()) { | |
633 // TODO(all): Is this supposed to save just the callee-saved doubles? It | |
634 // looks like it's saving all of them. | |
635 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current()); | |
636 __ Poke(value, count * kDoubleSize); | |
637 iterator.Advance(); | |
638 count++; | |
639 } | |
640 } | |
641 | |
642 | |
643 void LCodeGen::RestoreCallerDoubles() { | |
644 ASSERT(info()->saves_caller_doubles()); | |
645 ASSERT(NeedsEagerFrame()); | |
646 Comment(";;; Restore clobbered callee double registers"); | |
647 BitVector* doubles = chunk()->allocated_double_registers(); | |
648 BitVector::Iterator iterator(doubles); | |
649 int count = 0; | |
650 while (!iterator.Done()) { | |
651 // TODO(all): Is this supposed to restore just the callee-saved doubles? It | |
652 // looks like it's restoring all of them. | |
653 FPRegister value = FPRegister::FromAllocationIndex(iterator.Current()); | |
654 __ Peek(value, count * kDoubleSize); | |
655 iterator.Advance(); | |
656 count++; | |
657 } | |
658 } | |
659 | |
660 | |
661 bool LCodeGen::GeneratePrologue() { | |
662 ASSERT(is_generating()); | |
663 | |
664 if (info()->IsOptimizing()) { | |
665 ProfileEntryHookStub::MaybeCallEntryHook(masm_); | |
666 | |
667 // TODO(all): Add support for stop_t FLAG in DEBUG mode. | |
668 | |
669 // Sloppy mode functions and builtins need to replace the receiver with the | |
670 // global proxy when called as functions (without an explicit receiver | |
671 // object). | |
672 if (info_->this_has_uses() && | |
673 info_->strict_mode() == SLOPPY && | |
674 !info_->is_native()) { | |
675 Label ok; | |
676 int receiver_offset = info_->scope()->num_parameters() * kXRegSize; | |
677 __ Peek(x10, receiver_offset); | |
678 __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok); | |
679 | |
680 __ Ldr(x10, GlobalObjectMemOperand()); | |
681 __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset)); | |
682 __ Poke(x10, receiver_offset); | |
683 | |
684 __ Bind(&ok); | |
685 } | |
686 } | |
687 | |
688 ASSERT(__ StackPointer().Is(jssp)); | |
689 info()->set_prologue_offset(masm_->pc_offset()); | |
690 if (NeedsEagerFrame()) { | |
691 __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME); | |
692 frame_is_built_ = true; | |
693 info_->AddNoFrameRange(0, masm_->pc_offset()); | |
694 } | |
695 | |
696 // Reserve space for the stack slots needed by the code. | |
697 int slots = GetStackSlotCount(); | |
698 if (slots > 0) { | |
699 __ Claim(slots, kPointerSize); | |
700 } | |
701 | |
702 if (info()->saves_caller_doubles()) { | |
703 SaveCallerDoubles(); | |
704 } | |
705 | |
706 // Allocate a local context if needed. | |
707 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; | |
708 if (heap_slots > 0) { | |
709 Comment(";;; Allocate local context"); | |
710 // Argument to NewContext is the function, which is in x1. | |
711 if (heap_slots <= FastNewContextStub::kMaximumSlots) { | |
712 FastNewContextStub stub(heap_slots); | |
713 __ CallStub(&stub); | |
714 } else { | |
715 __ Push(x1); | |
716 __ CallRuntime(Runtime::kNewFunctionContext, 1); | |
717 } | |
718 RecordSafepoint(Safepoint::kNoLazyDeopt); | |
719 // Context is returned in x0. It replaces the context passed to us. It's | |
720 // saved in the stack and kept live in cp. | |
721 __ Mov(cp, x0); | |
722 __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset)); | |
723 // Copy any necessary parameters into the context. | |
724 int num_parameters = scope()->num_parameters(); | |
725 for (int i = 0; i < num_parameters; i++) { | |
726 Variable* var = scope()->parameter(i); | |
727 if (var->IsContextSlot()) { | |
728 Register value = x0; | |
729 Register scratch = x3; | |
730 | |
731 int parameter_offset = StandardFrameConstants::kCallerSPOffset + | |
732 (num_parameters - 1 - i) * kPointerSize; | |
733 // Load parameter from stack. | |
734 __ Ldr(value, MemOperand(fp, parameter_offset)); | |
735 // Store it in the context. | |
736 MemOperand target = ContextMemOperand(cp, var->index()); | |
737 __ Str(value, target); | |
738 // Update the write barrier. This clobbers value and scratch. | |
739 __ RecordWriteContextSlot(cp, target.offset(), value, scratch, | |
740 GetLinkRegisterState(), kSaveFPRegs); | |
741 } | |
742 } | |
743 Comment(";;; End allocate local context"); | |
744 } | |
745 | |
746 // Trace the call. | |
747 if (FLAG_trace && info()->IsOptimizing()) { | |
748 // We have not executed any compiled code yet, so cp still holds the | |
749 // incoming context. | |
750 __ CallRuntime(Runtime::kTraceEnter, 0); | |
751 } | |
752 | |
753 return !is_aborted(); | |
754 } | |
755 | |
756 | |
757 void LCodeGen::GenerateOsrPrologue() { | |
758 // Generate the OSR entry prologue at the first unknown OSR value, or if there | |
759 // are none, at the OSR entrypoint instruction. | |
760 if (osr_pc_offset_ >= 0) return; | |
761 | |
762 osr_pc_offset_ = masm()->pc_offset(); | |
763 | |
764 // Adjust the frame size, subsuming the unoptimized frame into the | |
765 // optimized frame. | |
766 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); | |
767 ASSERT(slots >= 0); | |
768 __ Claim(slots); | |
769 } | |
770 | |
771 | |
772 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { | |
773 if (!instr->IsLazyBailout() && !instr->IsGap()) { | |
774 safepoints_.BumpLastLazySafepointIndex(); | |
775 } | |
776 } | |
777 | |
778 | |
779 bool LCodeGen::GenerateDeferredCode() { | |
780 ASSERT(is_generating()); | |
781 if (deferred_.length() > 0) { | |
782 for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) { | |
783 LDeferredCode* code = deferred_[i]; | |
784 | |
785 HValue* value = | |
786 instructions_->at(code->instruction_index())->hydrogen_value(); | |
787 RecordAndWritePosition( | |
788 chunk()->graph()->SourcePositionToScriptPosition(value->position())); | |
789 | |
790 Comment(";;; <@%d,#%d> " | |
791 "-------------------- Deferred %s --------------------", | |
792 code->instruction_index(), | |
793 code->instr()->hydrogen_value()->id(), | |
794 code->instr()->Mnemonic()); | |
795 | |
796 __ Bind(code->entry()); | |
797 | |
798 if (NeedsDeferredFrame()) { | |
799 Comment(";;; Build frame"); | |
800 ASSERT(!frame_is_built_); | |
801 ASSERT(info()->IsStub()); | |
802 frame_is_built_ = true; | |
803 __ Push(lr, fp, cp); | |
804 __ Mov(fp, Smi::FromInt(StackFrame::STUB)); | |
805 __ Push(fp); | |
806 __ Add(fp, __ StackPointer(), | |
807 StandardFrameConstants::kFixedFrameSizeFromFp); | |
808 Comment(";;; Deferred code"); | |
809 } | |
810 | |
811 code->Generate(); | |
812 | |
813 if (NeedsDeferredFrame()) { | |
814 Comment(";;; Destroy frame"); | |
815 ASSERT(frame_is_built_); | |
816 __ Pop(xzr, cp, fp, lr); | |
817 frame_is_built_ = false; | |
818 } | |
819 | |
820 __ B(code->exit()); | |
821 } | |
822 } | |
823 | |
824 // Force constant pool emission at the end of the deferred code to make | |
825 // sure that no constant pools are emitted after deferred code because | |
826 // deferred code generation is the last step which generates code. The two | |
827 // following steps will only output data used by crakshaft. | |
828 masm()->CheckConstPool(true, false); | |
829 | |
830 return !is_aborted(); | |
831 } | |
832 | |
833 | |
834 bool LCodeGen::GenerateDeoptJumpTable() { | |
835 if (deopt_jump_table_.length() > 0) { | |
836 Comment(";;; -------------------- Jump table --------------------"); | |
837 } | |
838 Label table_start; | |
839 __ bind(&table_start); | |
840 Label needs_frame; | |
841 for (int i = 0; i < deopt_jump_table_.length(); i++) { | |
842 __ Bind(&deopt_jump_table_[i]->label); | |
843 Address entry = deopt_jump_table_[i]->address; | |
844 Deoptimizer::BailoutType type = deopt_jump_table_[i]->bailout_type; | |
845 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); | |
846 if (id == Deoptimizer::kNotDeoptimizationEntry) { | |
847 Comment(";;; jump table entry %d.", i); | |
848 } else { | |
849 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); | |
850 } | |
851 if (deopt_jump_table_[i]->needs_frame) { | |
852 ASSERT(!info()->saves_caller_doubles()); | |
853 | |
854 UseScratchRegisterScope temps(masm()); | |
855 Register stub_deopt_entry = temps.AcquireX(); | |
856 Register stub_marker = temps.AcquireX(); | |
857 | |
858 __ Mov(stub_deopt_entry, ExternalReference::ForDeoptEntry(entry)); | |
859 if (needs_frame.is_bound()) { | |
860 __ B(&needs_frame); | |
861 } else { | |
862 __ Bind(&needs_frame); | |
863 // This variant of deopt can only be used with stubs. Since we don't | |
864 // have a function pointer to install in the stack frame that we're | |
865 // building, install a special marker there instead. | |
866 ASSERT(info()->IsStub()); | |
867 __ Mov(stub_marker, Smi::FromInt(StackFrame::STUB)); | |
868 __ Push(lr, fp, cp, stub_marker); | |
869 __ Add(fp, __ StackPointer(), 2 * kPointerSize); | |
870 __ Call(stub_deopt_entry); | |
871 } | |
872 } else { | |
873 if (info()->saves_caller_doubles()) { | |
874 ASSERT(info()->IsStub()); | |
875 RestoreCallerDoubles(); | |
876 } | |
877 __ Call(entry, RelocInfo::RUNTIME_ENTRY); | |
878 } | |
879 masm()->CheckConstPool(false, false); | |
880 } | |
881 | |
882 // Force constant pool emission at the end of the deopt jump table to make | |
883 // sure that no constant pools are emitted after. | |
884 masm()->CheckConstPool(true, false); | |
885 | |
886 // The deoptimization jump table is the last part of the instruction | |
887 // sequence. Mark the generated code as done unless we bailed out. | |
888 if (!is_aborted()) status_ = DONE; | |
889 return !is_aborted(); | |
890 } | |
891 | |
892 | |
893 bool LCodeGen::GenerateSafepointTable() { | |
894 ASSERT(is_done()); | |
895 // We do not know how much data will be emitted for the safepoint table, so | |
896 // force emission of the veneer pool. | |
897 masm()->CheckVeneerPool(true, true); | |
898 safepoints_.Emit(masm(), GetStackSlotCount()); | |
899 return !is_aborted(); | |
900 } | |
901 | |
902 | |
903 void LCodeGen::FinishCode(Handle<Code> code) { | |
904 ASSERT(is_done()); | |
905 code->set_stack_slots(GetStackSlotCount()); | |
906 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); | |
907 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code); | |
908 PopulateDeoptimizationData(code); | |
909 info()->CommitDependencies(code); | |
910 } | |
911 | |
912 | |
913 void LCodeGen::Abort(BailoutReason reason) { | |
914 info()->set_bailout_reason(reason); | |
915 status_ = ABORTED; | |
916 } | |
917 | |
918 | |
919 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { | |
920 int length = deoptimizations_.length(); | |
921 if (length == 0) return; | |
922 | |
923 Handle<DeoptimizationInputData> data = | |
924 factory()->NewDeoptimizationInputData(length, TENURED); | |
925 | |
926 Handle<ByteArray> translations = | |
927 translations_.CreateByteArray(isolate()->factory()); | |
928 data->SetTranslationByteArray(*translations); | |
929 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); | |
930 data->SetOptimizationId(Smi::FromInt(info_->optimization_id())); | |
931 if (info_->IsOptimizing()) { | |
932 // Reference to shared function info does not change between phases. | |
933 AllowDeferredHandleDereference allow_handle_dereference; | |
934 data->SetSharedFunctionInfo(*info_->shared_info()); | |
935 } else { | |
936 data->SetSharedFunctionInfo(Smi::FromInt(0)); | |
937 } | |
938 | |
939 Handle<FixedArray> literals = | |
940 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); | |
941 { AllowDeferredHandleDereference copy_handles; | |
942 for (int i = 0; i < deoptimization_literals_.length(); i++) { | |
943 literals->set(i, *deoptimization_literals_[i]); | |
944 } | |
945 data->SetLiteralArray(*literals); | |
946 } | |
947 | |
948 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt())); | |
949 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_)); | |
950 | |
951 // Populate the deoptimization entries. | |
952 for (int i = 0; i < length; i++) { | |
953 LEnvironment* env = deoptimizations_[i]; | |
954 data->SetAstId(i, env->ast_id()); | |
955 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index())); | |
956 data->SetArgumentsStackHeight(i, | |
957 Smi::FromInt(env->arguments_stack_height())); | |
958 data->SetPc(i, Smi::FromInt(env->pc_offset())); | |
959 } | |
960 | |
961 code->set_deoptimization_data(*data); | |
962 } | |
963 | |
964 | |
965 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { | |
966 ASSERT(deoptimization_literals_.length() == 0); | |
967 | |
968 const ZoneList<Handle<JSFunction> >* inlined_closures = | |
969 chunk()->inlined_closures(); | |
970 | |
971 for (int i = 0, length = inlined_closures->length(); i < length; i++) { | |
972 DefineDeoptimizationLiteral(inlined_closures->at(i)); | |
973 } | |
974 | |
975 inlined_function_count_ = deoptimization_literals_.length(); | |
976 } | |
977 | |
978 | |
979 void LCodeGen::DeoptimizeBranch( | |
980 LEnvironment* environment, | |
981 BranchType branch_type, Register reg, int bit, | |
982 Deoptimizer::BailoutType* override_bailout_type) { | |
983 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | |
984 Deoptimizer::BailoutType bailout_type = | |
985 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; | |
986 | |
987 if (override_bailout_type != NULL) { | |
988 bailout_type = *override_bailout_type; | |
989 } | |
990 | |
991 ASSERT(environment->HasBeenRegistered()); | |
992 ASSERT(info()->IsOptimizing() || info()->IsStub()); | |
993 int id = environment->deoptimization_index(); | |
994 Address entry = | |
995 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); | |
996 | |
997 if (entry == NULL) { | |
998 Abort(kBailoutWasNotPrepared); | |
999 } | |
1000 | |
1001 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { | |
1002 Label not_zero; | |
1003 ExternalReference count = ExternalReference::stress_deopt_count(isolate()); | |
1004 | |
1005 __ Push(x0, x1, x2); | |
1006 __ Mrs(x2, NZCV); | |
1007 __ Mov(x0, count); | |
1008 __ Ldr(w1, MemOperand(x0)); | |
1009 __ Subs(x1, x1, 1); | |
1010 __ B(gt, ¬_zero); | |
1011 __ Mov(w1, FLAG_deopt_every_n_times); | |
1012 __ Str(w1, MemOperand(x0)); | |
1013 __ Pop(x2, x1, x0); | |
1014 ASSERT(frame_is_built_); | |
1015 __ Call(entry, RelocInfo::RUNTIME_ENTRY); | |
1016 __ Unreachable(); | |
1017 | |
1018 __ Bind(¬_zero); | |
1019 __ Str(w1, MemOperand(x0)); | |
1020 __ Msr(NZCV, x2); | |
1021 __ Pop(x2, x1, x0); | |
1022 } | |
1023 | |
1024 if (info()->ShouldTrapOnDeopt()) { | |
1025 Label dont_trap; | |
1026 __ B(&dont_trap, InvertBranchType(branch_type), reg, bit); | |
1027 __ Debug("trap_on_deopt", __LINE__, BREAK); | |
1028 __ Bind(&dont_trap); | |
1029 } | |
1030 | |
1031 ASSERT(info()->IsStub() || frame_is_built_); | |
1032 // Go through jump table if we need to build frame, or restore caller doubles. | |
1033 if (branch_type == always && | |
1034 frame_is_built_ && !info()->saves_caller_doubles()) { | |
1035 __ Call(entry, RelocInfo::RUNTIME_ENTRY); | |
1036 } else { | |
1037 // We often have several deopts to the same entry, reuse the last | |
1038 // jump entry if this is the case. | |
1039 if (deopt_jump_table_.is_empty() || | |
1040 (deopt_jump_table_.last()->address != entry) || | |
1041 (deopt_jump_table_.last()->bailout_type != bailout_type) || | |
1042 (deopt_jump_table_.last()->needs_frame != !frame_is_built_)) { | |
1043 Deoptimizer::JumpTableEntry* table_entry = | |
1044 new(zone()) Deoptimizer::JumpTableEntry(entry, | |
1045 bailout_type, | |
1046 !frame_is_built_); | |
1047 deopt_jump_table_.Add(table_entry, zone()); | |
1048 } | |
1049 __ B(&deopt_jump_table_.last()->label, | |
1050 branch_type, reg, bit); | |
1051 } | |
1052 } | |
1053 | |
1054 | |
1055 void LCodeGen::Deoptimize(LEnvironment* environment, | |
1056 Deoptimizer::BailoutType* override_bailout_type) { | |
1057 DeoptimizeBranch(environment, always, NoReg, -1, override_bailout_type); | |
1058 } | |
1059 | |
1060 | |
1061 void LCodeGen::DeoptimizeIf(Condition cond, LEnvironment* environment) { | |
1062 DeoptimizeBranch(environment, static_cast<BranchType>(cond)); | |
1063 } | |
1064 | |
1065 | |
1066 void LCodeGen::DeoptimizeIfZero(Register rt, LEnvironment* environment) { | |
1067 DeoptimizeBranch(environment, reg_zero, rt); | |
1068 } | |
1069 | |
1070 | |
1071 void LCodeGen::DeoptimizeIfNotZero(Register rt, LEnvironment* environment) { | |
1072 DeoptimizeBranch(environment, reg_not_zero, rt); | |
1073 } | |
1074 | |
1075 | |
1076 void LCodeGen::DeoptimizeIfNegative(Register rt, LEnvironment* environment) { | |
1077 int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit; | |
1078 DeoptimizeBranch(environment, reg_bit_set, rt, sign_bit); | |
1079 } | |
1080 | |
1081 | |
1082 void LCodeGen::DeoptimizeIfSmi(Register rt, | |
1083 LEnvironment* environment) { | |
1084 DeoptimizeBranch(environment, reg_bit_clear, rt, MaskToBit(kSmiTagMask)); | |
1085 } | |
1086 | |
1087 | |
1088 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LEnvironment* environment) { | |
1089 DeoptimizeBranch(environment, reg_bit_set, rt, MaskToBit(kSmiTagMask)); | |
1090 } | |
1091 | |
1092 | |
1093 void LCodeGen::DeoptimizeIfRoot(Register rt, | |
1094 Heap::RootListIndex index, | |
1095 LEnvironment* environment) { | |
1096 __ CompareRoot(rt, index); | |
1097 DeoptimizeIf(eq, environment); | |
1098 } | |
1099 | |
1100 | |
1101 void LCodeGen::DeoptimizeIfNotRoot(Register rt, | |
1102 Heap::RootListIndex index, | |
1103 LEnvironment* environment) { | |
1104 __ CompareRoot(rt, index); | |
1105 DeoptimizeIf(ne, environment); | |
1106 } | |
1107 | |
1108 | |
1109 void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, | |
1110 LEnvironment* environment) { | |
1111 __ TestForMinusZero(input); | |
1112 DeoptimizeIf(vs, environment); | |
1113 } | |
1114 | |
1115 | |
1116 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { | |
1117 if (!info()->IsStub()) { | |
1118 // Ensure that we have enough space after the previous lazy-bailout | |
1119 // instruction for patching the code here. | |
1120 intptr_t current_pc = masm()->pc_offset(); | |
1121 | |
1122 if (current_pc < (last_lazy_deopt_pc_ + space_needed)) { | |
1123 ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; | |
1124 ASSERT((padding_size % kInstructionSize) == 0); | |
1125 InstructionAccurateScope instruction_accurate( | |
1126 masm(), padding_size / kInstructionSize); | |
1127 | |
1128 while (padding_size > 0) { | |
1129 __ nop(); | |
1130 padding_size -= kInstructionSize; | |
1131 } | |
1132 } | |
1133 } | |
1134 last_lazy_deopt_pc_ = masm()->pc_offset(); | |
1135 } | |
1136 | |
1137 | |
1138 Register LCodeGen::ToRegister(LOperand* op) const { | |
1139 // TODO(all): support zero register results, as ToRegister32. | |
1140 ASSERT((op != NULL) && op->IsRegister()); | |
1141 return Register::FromAllocationIndex(op->index()); | |
1142 } | |
1143 | |
1144 | |
1145 Register LCodeGen::ToRegister32(LOperand* op) const { | |
1146 ASSERT(op != NULL); | |
1147 if (op->IsConstantOperand()) { | |
1148 // If this is a constant operand, the result must be the zero register. | |
1149 ASSERT(ToInteger32(LConstantOperand::cast(op)) == 0); | |
1150 return wzr; | |
1151 } else { | |
1152 return ToRegister(op).W(); | |
1153 } | |
1154 } | |
1155 | |
1156 | |
1157 Smi* LCodeGen::ToSmi(LConstantOperand* op) const { | |
1158 HConstant* constant = chunk_->LookupConstant(op); | |
1159 return Smi::FromInt(constant->Integer32Value()); | |
1160 } | |
1161 | |
1162 | |
1163 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { | |
1164 ASSERT((op != NULL) && op->IsDoubleRegister()); | |
1165 return DoubleRegister::FromAllocationIndex(op->index()); | |
1166 } | |
1167 | |
1168 | |
1169 Operand LCodeGen::ToOperand(LOperand* op) { | |
1170 ASSERT(op != NULL); | |
1171 if (op->IsConstantOperand()) { | |
1172 LConstantOperand* const_op = LConstantOperand::cast(op); | |
1173 HConstant* constant = chunk()->LookupConstant(const_op); | |
1174 Representation r = chunk_->LookupLiteralRepresentation(const_op); | |
1175 if (r.IsSmi()) { | |
1176 ASSERT(constant->HasSmiValue()); | |
1177 return Operand(Smi::FromInt(constant->Integer32Value())); | |
1178 } else if (r.IsInteger32()) { | |
1179 ASSERT(constant->HasInteger32Value()); | |
1180 return Operand(constant->Integer32Value()); | |
1181 } else if (r.IsDouble()) { | |
1182 Abort(kToOperandUnsupportedDoubleImmediate); | |
1183 } | |
1184 ASSERT(r.IsTagged()); | |
1185 return Operand(constant->handle(isolate())); | |
1186 } else if (op->IsRegister()) { | |
1187 return Operand(ToRegister(op)); | |
1188 } else if (op->IsDoubleRegister()) { | |
1189 Abort(kToOperandIsDoubleRegisterUnimplemented); | |
1190 return Operand(0); | |
1191 } | |
1192 // Stack slots not implemented, use ToMemOperand instead. | |
1193 UNREACHABLE(); | |
1194 return Operand(0); | |
1195 } | |
1196 | |
1197 | |
1198 Operand LCodeGen::ToOperand32I(LOperand* op) { | |
1199 return ToOperand32(op, SIGNED_INT32); | |
1200 } | |
1201 | |
1202 | |
1203 Operand LCodeGen::ToOperand32U(LOperand* op) { | |
1204 return ToOperand32(op, UNSIGNED_INT32); | |
1205 } | |
1206 | |
1207 | |
1208 Operand LCodeGen::ToOperand32(LOperand* op, IntegerSignedness signedness) { | |
1209 ASSERT(op != NULL); | |
1210 if (op->IsRegister()) { | |
1211 return Operand(ToRegister32(op)); | |
1212 } else if (op->IsConstantOperand()) { | |
1213 LConstantOperand* const_op = LConstantOperand::cast(op); | |
1214 HConstant* constant = chunk()->LookupConstant(const_op); | |
1215 Representation r = chunk_->LookupLiteralRepresentation(const_op); | |
1216 if (r.IsInteger32()) { | |
1217 ASSERT(constant->HasInteger32Value()); | |
1218 return Operand(signedness == SIGNED_INT32 | |
1219 ? constant->Integer32Value() | |
1220 : static_cast<uint32_t>(constant->Integer32Value())); | |
1221 } else { | |
1222 // Other constants not implemented. | |
1223 Abort(kToOperand32UnsupportedImmediate); | |
1224 } | |
1225 } | |
1226 // Other cases are not implemented. | |
1227 UNREACHABLE(); | |
1228 return Operand(0); | |
1229 } | |
1230 | |
1231 | |
1232 static ptrdiff_t ArgumentsOffsetWithoutFrame(ptrdiff_t index) { | |
1233 ASSERT(index < 0); | |
1234 return -(index + 1) * kPointerSize; | |
1235 } | |
1236 | |
1237 | |
1238 MemOperand LCodeGen::ToMemOperand(LOperand* op) const { | |
1239 ASSERT(op != NULL); | |
1240 ASSERT(!op->IsRegister()); | |
1241 ASSERT(!op->IsDoubleRegister()); | |
1242 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); | |
1243 if (NeedsEagerFrame()) { | |
1244 return MemOperand(fp, StackSlotOffset(op->index())); | |
1245 } else { | |
1246 // Retrieve parameter without eager stack-frame relative to the | |
1247 // stack-pointer. | |
1248 return MemOperand(masm()->StackPointer(), | |
1249 ArgumentsOffsetWithoutFrame(op->index())); | |
1250 } | |
1251 } | |
1252 | |
1253 | |
1254 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const { | |
1255 HConstant* constant = chunk_->LookupConstant(op); | |
1256 ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); | |
1257 return constant->handle(isolate()); | |
1258 } | |
1259 | |
1260 | |
1261 bool LCodeGen::IsSmi(LConstantOperand* op) const { | |
1262 return chunk_->LookupLiteralRepresentation(op).IsSmi(); | |
1263 } | |
1264 | |
1265 | |
1266 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const { | |
1267 return op->IsConstantOperand() && | |
1268 chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); | |
1269 } | |
1270 | |
1271 | |
1272 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { | |
1273 HConstant* constant = chunk_->LookupConstant(op); | |
1274 return constant->Integer32Value(); | |
1275 } | |
1276 | |
1277 | |
1278 double LCodeGen::ToDouble(LConstantOperand* op) const { | |
1279 HConstant* constant = chunk_->LookupConstant(op); | |
1280 ASSERT(constant->HasDoubleValue()); | |
1281 return constant->DoubleValue(); | |
1282 } | |
1283 | |
1284 | |
1285 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { | |
1286 Condition cond = nv; | |
1287 switch (op) { | |
1288 case Token::EQ: | |
1289 case Token::EQ_STRICT: | |
1290 cond = eq; | |
1291 break; | |
1292 case Token::NE: | |
1293 case Token::NE_STRICT: | |
1294 cond = ne; | |
1295 break; | |
1296 case Token::LT: | |
1297 cond = is_unsigned ? lo : lt; | |
1298 break; | |
1299 case Token::GT: | |
1300 cond = is_unsigned ? hi : gt; | |
1301 break; | |
1302 case Token::LTE: | |
1303 cond = is_unsigned ? ls : le; | |
1304 break; | |
1305 case Token::GTE: | |
1306 cond = is_unsigned ? hs : ge; | |
1307 break; | |
1308 case Token::IN: | |
1309 case Token::INSTANCEOF: | |
1310 default: | |
1311 UNREACHABLE(); | |
1312 } | |
1313 return cond; | |
1314 } | |
1315 | |
1316 | |
1317 template<class InstrType> | |
1318 void LCodeGen::EmitBranchGeneric(InstrType instr, | |
1319 const BranchGenerator& branch) { | |
1320 int left_block = instr->TrueDestination(chunk_); | |
1321 int right_block = instr->FalseDestination(chunk_); | |
1322 | |
1323 int next_block = GetNextEmittedBlock(); | |
1324 | |
1325 if (right_block == left_block) { | |
1326 EmitGoto(left_block); | |
1327 } else if (left_block == next_block) { | |
1328 branch.EmitInverted(chunk_->GetAssemblyLabel(right_block)); | |
1329 } else if (right_block == next_block) { | |
1330 branch.Emit(chunk_->GetAssemblyLabel(left_block)); | |
1331 } else { | |
1332 branch.Emit(chunk_->GetAssemblyLabel(left_block)); | |
1333 __ B(chunk_->GetAssemblyLabel(right_block)); | |
1334 } | |
1335 } | |
1336 | |
1337 | |
1338 template<class InstrType> | |
1339 void LCodeGen::EmitBranch(InstrType instr, Condition condition) { | |
1340 ASSERT((condition != al) && (condition != nv)); | |
1341 BranchOnCondition branch(this, condition); | |
1342 EmitBranchGeneric(instr, branch); | |
1343 } | |
1344 | |
1345 | |
1346 template<class InstrType> | |
1347 void LCodeGen::EmitCompareAndBranch(InstrType instr, | |
1348 Condition condition, | |
1349 const Register& lhs, | |
1350 const Operand& rhs) { | |
1351 ASSERT((condition != al) && (condition != nv)); | |
1352 CompareAndBranch branch(this, condition, lhs, rhs); | |
1353 EmitBranchGeneric(instr, branch); | |
1354 } | |
1355 | |
1356 | |
1357 template<class InstrType> | |
1358 void LCodeGen::EmitTestAndBranch(InstrType instr, | |
1359 Condition condition, | |
1360 const Register& value, | |
1361 uint64_t mask) { | |
1362 ASSERT((condition != al) && (condition != nv)); | |
1363 TestAndBranch branch(this, condition, value, mask); | |
1364 EmitBranchGeneric(instr, branch); | |
1365 } | |
1366 | |
1367 | |
1368 template<class InstrType> | |
1369 void LCodeGen::EmitBranchIfNonZeroNumber(InstrType instr, | |
1370 const FPRegister& value, | |
1371 const FPRegister& scratch) { | |
1372 BranchIfNonZeroNumber branch(this, value, scratch); | |
1373 EmitBranchGeneric(instr, branch); | |
1374 } | |
1375 | |
1376 | |
1377 template<class InstrType> | |
1378 void LCodeGen::EmitBranchIfHeapNumber(InstrType instr, | |
1379 const Register& value) { | |
1380 BranchIfHeapNumber branch(this, value); | |
1381 EmitBranchGeneric(instr, branch); | |
1382 } | |
1383 | |
1384 | |
1385 template<class InstrType> | |
1386 void LCodeGen::EmitBranchIfRoot(InstrType instr, | |
1387 const Register& value, | |
1388 Heap::RootListIndex index) { | |
1389 BranchIfRoot branch(this, value, index); | |
1390 EmitBranchGeneric(instr, branch); | |
1391 } | |
1392 | |
1393 | |
1394 void LCodeGen::DoGap(LGap* gap) { | |
1395 for (int i = LGap::FIRST_INNER_POSITION; | |
1396 i <= LGap::LAST_INNER_POSITION; | |
1397 i++) { | |
1398 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i); | |
1399 LParallelMove* move = gap->GetParallelMove(inner_pos); | |
1400 if (move != NULL) { | |
1401 resolver_.Resolve(move); | |
1402 } | |
1403 } | |
1404 } | |
1405 | |
1406 | |
1407 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { | |
1408 Register arguments = ToRegister(instr->arguments()); | |
1409 Register result = ToRegister(instr->result()); | |
1410 | |
1411 // The pointer to the arguments array come from DoArgumentsElements. | |
1412 // It does not point directly to the arguments and there is an offest of | |
1413 // two words that we must take into account when accessing an argument. | |
1414 // Subtracting the index from length accounts for one, so we add one more. | |
1415 | |
1416 if (instr->length()->IsConstantOperand() && | |
1417 instr->index()->IsConstantOperand()) { | |
1418 int index = ToInteger32(LConstantOperand::cast(instr->index())); | |
1419 int length = ToInteger32(LConstantOperand::cast(instr->length())); | |
1420 int offset = ((length - index) + 1) * kPointerSize; | |
1421 __ Ldr(result, MemOperand(arguments, offset)); | |
1422 } else if (instr->index()->IsConstantOperand()) { | |
1423 Register length = ToRegister32(instr->length()); | |
1424 int index = ToInteger32(LConstantOperand::cast(instr->index())); | |
1425 int loc = index - 1; | |
1426 if (loc != 0) { | |
1427 __ Sub(result.W(), length, loc); | |
1428 __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2)); | |
1429 } else { | |
1430 __ Ldr(result, MemOperand(arguments, length, UXTW, kPointerSizeLog2)); | |
1431 } | |
1432 } else { | |
1433 Register length = ToRegister32(instr->length()); | |
1434 Operand index = ToOperand32I(instr->index()); | |
1435 __ Sub(result.W(), length, index); | |
1436 __ Add(result.W(), result.W(), 1); | |
1437 __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2)); | |
1438 } | |
1439 } | |
1440 | |
1441 | |
1442 void LCodeGen::DoAddE(LAddE* instr) { | |
1443 Register result = ToRegister(instr->result()); | |
1444 Register left = ToRegister(instr->left()); | |
1445 Operand right = (instr->right()->IsConstantOperand()) | |
1446 ? ToInteger32(LConstantOperand::cast(instr->right())) | |
1447 : Operand(ToRegister32(instr->right()), SXTW); | |
1448 | |
1449 ASSERT(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)); | |
1450 __ Add(result, left, right); | |
1451 } | |
1452 | |
1453 | |
1454 void LCodeGen::DoAddI(LAddI* instr) { | |
1455 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | |
1456 Register result = ToRegister32(instr->result()); | |
1457 Register left = ToRegister32(instr->left()); | |
1458 Operand right = ToOperand32I(instr->right()); | |
1459 if (can_overflow) { | |
1460 __ Adds(result, left, right); | |
1461 DeoptimizeIf(vs, instr->environment()); | |
1462 } else { | |
1463 __ Add(result, left, right); | |
1464 } | |
1465 } | |
1466 | |
1467 | |
1468 void LCodeGen::DoAddS(LAddS* instr) { | |
1469 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | |
1470 Register result = ToRegister(instr->result()); | |
1471 Register left = ToRegister(instr->left()); | |
1472 Operand right = ToOperand(instr->right()); | |
1473 if (can_overflow) { | |
1474 __ Adds(result, left, right); | |
1475 DeoptimizeIf(vs, instr->environment()); | |
1476 } else { | |
1477 __ Add(result, left, right); | |
1478 } | |
1479 } | |
1480 | |
1481 | |
1482 void LCodeGen::DoAllocate(LAllocate* instr) { | |
1483 class DeferredAllocate: public LDeferredCode { | |
1484 public: | |
1485 DeferredAllocate(LCodeGen* codegen, LAllocate* instr) | |
1486 : LDeferredCode(codegen), instr_(instr) { } | |
1487 virtual void Generate() { codegen()->DoDeferredAllocate(instr_); } | |
1488 virtual LInstruction* instr() { return instr_; } | |
1489 private: | |
1490 LAllocate* instr_; | |
1491 }; | |
1492 | |
1493 DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr); | |
1494 | |
1495 Register result = ToRegister(instr->result()); | |
1496 Register temp1 = ToRegister(instr->temp1()); | |
1497 Register temp2 = ToRegister(instr->temp2()); | |
1498 | |
1499 // Allocate memory for the object. | |
1500 AllocationFlags flags = TAG_OBJECT; | |
1501 if (instr->hydrogen()->MustAllocateDoubleAligned()) { | |
1502 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); | |
1503 } | |
1504 | |
1505 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { | |
1506 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); | |
1507 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); | |
1508 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE); | |
1509 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { | |
1510 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); | |
1511 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE); | |
1512 } | |
1513 | |
1514 if (instr->size()->IsConstantOperand()) { | |
1515 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); | |
1516 if (size <= Page::kMaxRegularHeapObjectSize) { | |
1517 __ Allocate(size, result, temp1, temp2, deferred->entry(), flags); | |
1518 } else { | |
1519 __ B(deferred->entry()); | |
1520 } | |
1521 } else { | |
1522 Register size = ToRegister32(instr->size()); | |
1523 __ Sxtw(size.X(), size); | |
1524 __ Allocate(size.X(), result, temp1, temp2, deferred->entry(), flags); | |
1525 } | |
1526 | |
1527 __ Bind(deferred->exit()); | |
1528 | |
1529 if (instr->hydrogen()->MustPrefillWithFiller()) { | |
1530 Register filler_count = temp1; | |
1531 Register filler = temp2; | |
1532 Register untagged_result = ToRegister(instr->temp3()); | |
1533 | |
1534 if (instr->size()->IsConstantOperand()) { | |
1535 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); | |
1536 __ Mov(filler_count, size / kPointerSize); | |
1537 } else { | |
1538 __ Lsr(filler_count.W(), ToRegister32(instr->size()), kPointerSizeLog2); | |
1539 } | |
1540 | |
1541 __ Sub(untagged_result, result, kHeapObjectTag); | |
1542 __ Mov(filler, Operand(isolate()->factory()->one_pointer_filler_map())); | |
1543 __ FillFields(untagged_result, filler_count, filler); | |
1544 } else { | |
1545 ASSERT(instr->temp3() == NULL); | |
1546 } | |
1547 } | |
1548 | |
1549 | |
1550 void LCodeGen::DoDeferredAllocate(LAllocate* instr) { | |
1551 // TODO(3095996): Get rid of this. For now, we need to make the | |
1552 // result register contain a valid pointer because it is already | |
1553 // contained in the register pointer map. | |
1554 __ Mov(ToRegister(instr->result()), Smi::FromInt(0)); | |
1555 | |
1556 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); | |
1557 // We're in a SafepointRegistersScope so we can use any scratch registers. | |
1558 Register size = x0; | |
1559 if (instr->size()->IsConstantOperand()) { | |
1560 __ Mov(size, ToSmi(LConstantOperand::cast(instr->size()))); | |
1561 } else { | |
1562 __ SmiTag(size, ToRegister32(instr->size()).X()); | |
1563 } | |
1564 int flags = AllocateDoubleAlignFlag::encode( | |
1565 instr->hydrogen()->MustAllocateDoubleAligned()); | |
1566 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { | |
1567 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); | |
1568 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); | |
1569 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE); | |
1570 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { | |
1571 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); | |
1572 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE); | |
1573 } else { | |
1574 flags = AllocateTargetSpace::update(flags, NEW_SPACE); | |
1575 } | |
1576 __ Mov(x10, Smi::FromInt(flags)); | |
1577 __ Push(size, x10); | |
1578 | |
1579 CallRuntimeFromDeferred( | |
1580 Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); | |
1581 __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result())); | |
1582 } | |
1583 | |
1584 | |
1585 void LCodeGen::DoApplyArguments(LApplyArguments* instr) { | |
1586 Register receiver = ToRegister(instr->receiver()); | |
1587 Register function = ToRegister(instr->function()); | |
1588 Register length = ToRegister32(instr->length()); | |
1589 | |
1590 Register elements = ToRegister(instr->elements()); | |
1591 Register scratch = x5; | |
1592 ASSERT(receiver.Is(x0)); // Used for parameter count. | |
1593 ASSERT(function.Is(x1)); // Required by InvokeFunction. | |
1594 ASSERT(ToRegister(instr->result()).Is(x0)); | |
1595 ASSERT(instr->IsMarkedAsCall()); | |
1596 | |
1597 // Copy the arguments to this function possibly from the | |
1598 // adaptor frame below it. | |
1599 const uint32_t kArgumentsLimit = 1 * KB; | |
1600 __ Cmp(length, kArgumentsLimit); | |
1601 DeoptimizeIf(hi, instr->environment()); | |
1602 | |
1603 // Push the receiver and use the register to keep the original | |
1604 // number of arguments. | |
1605 __ Push(receiver); | |
1606 Register argc = receiver; | |
1607 receiver = NoReg; | |
1608 __ Sxtw(argc, length); | |
1609 // The arguments are at a one pointer size offset from elements. | |
1610 __ Add(elements, elements, 1 * kPointerSize); | |
1611 | |
1612 // Loop through the arguments pushing them onto the execution | |
1613 // stack. | |
1614 Label invoke, loop; | |
1615 // length is a small non-negative integer, due to the test above. | |
1616 __ Cbz(length, &invoke); | |
1617 __ Bind(&loop); | |
1618 __ Ldr(scratch, MemOperand(elements, length, SXTW, kPointerSizeLog2)); | |
1619 __ Push(scratch); | |
1620 __ Subs(length, length, 1); | |
1621 __ B(ne, &loop); | |
1622 | |
1623 __ Bind(&invoke); | |
1624 ASSERT(instr->HasPointerMap()); | |
1625 LPointerMap* pointers = instr->pointer_map(); | |
1626 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt); | |
1627 // The number of arguments is stored in argc (receiver) which is x0, as | |
1628 // expected by InvokeFunction. | |
1629 ParameterCount actual(argc); | |
1630 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator); | |
1631 } | |
1632 | |
1633 | |
1634 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { | |
1635 Register result = ToRegister(instr->result()); | |
1636 | |
1637 if (instr->hydrogen()->from_inlined()) { | |
1638 // When we are inside an inlined function, the arguments are the last things | |
1639 // that have been pushed on the stack. Therefore the arguments array can be | |
1640 // accessed directly from jssp. | |
1641 // However in the normal case, it is accessed via fp but there are two words | |
1642 // on the stack between fp and the arguments (the saved lr and fp) and the | |
1643 // LAccessArgumentsAt implementation take that into account. | |
1644 // In the inlined case we need to subtract the size of 2 words to jssp to | |
1645 // get a pointer which will work well with LAccessArgumentsAt. | |
1646 ASSERT(masm()->StackPointer().Is(jssp)); | |
1647 __ Sub(result, jssp, 2 * kPointerSize); | |
1648 } else { | |
1649 ASSERT(instr->temp() != NULL); | |
1650 Register previous_fp = ToRegister(instr->temp()); | |
1651 | |
1652 __ Ldr(previous_fp, | |
1653 MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | |
1654 __ Ldr(result, | |
1655 MemOperand(previous_fp, StandardFrameConstants::kContextOffset)); | |
1656 __ Cmp(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); | |
1657 __ Csel(result, fp, previous_fp, ne); | |
1658 } | |
1659 } | |
1660 | |
1661 | |
1662 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { | |
1663 Register elements = ToRegister(instr->elements()); | |
1664 Register result = ToRegister32(instr->result()); | |
1665 Label done; | |
1666 | |
1667 // If no arguments adaptor frame the number of arguments is fixed. | |
1668 __ Cmp(fp, elements); | |
1669 __ Mov(result, scope()->num_parameters()); | |
1670 __ B(eq, &done); | |
1671 | |
1672 // Arguments adaptor frame present. Get argument length from there. | |
1673 __ Ldr(result.X(), MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | |
1674 __ Ldr(result, | |
1675 UntagSmiMemOperand(result.X(), | |
1676 ArgumentsAdaptorFrameConstants::kLengthOffset)); | |
1677 | |
1678 // Argument length is in result register. | |
1679 __ Bind(&done); | |
1680 } | |
1681 | |
1682 | |
1683 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { | |
1684 DoubleRegister left = ToDoubleRegister(instr->left()); | |
1685 DoubleRegister right = ToDoubleRegister(instr->right()); | |
1686 DoubleRegister result = ToDoubleRegister(instr->result()); | |
1687 | |
1688 switch (instr->op()) { | |
1689 case Token::ADD: __ Fadd(result, left, right); break; | |
1690 case Token::SUB: __ Fsub(result, left, right); break; | |
1691 case Token::MUL: __ Fmul(result, left, right); break; | |
1692 case Token::DIV: __ Fdiv(result, left, right); break; | |
1693 case Token::MOD: { | |
1694 // The ECMA-262 remainder operator is the remainder from a truncating | |
1695 // (round-towards-zero) division. Note that this differs from IEEE-754. | |
1696 // | |
1697 // TODO(jbramley): See if it's possible to do this inline, rather than by | |
1698 // calling a helper function. With frintz (to produce the intermediate | |
1699 // quotient) and fmsub (to calculate the remainder without loss of | |
1700 // precision), it should be possible. However, we would need support for | |
1701 // fdiv in round-towards-zero mode, and the A64 simulator doesn't support | |
1702 // that yet. | |
1703 ASSERT(left.Is(d0)); | |
1704 ASSERT(right.Is(d1)); | |
1705 __ CallCFunction( | |
1706 ExternalReference::mod_two_doubles_operation(isolate()), | |
1707 0, 2); | |
1708 ASSERT(result.Is(d0)); | |
1709 break; | |
1710 } | |
1711 default: | |
1712 UNREACHABLE(); | |
1713 break; | |
1714 } | |
1715 } | |
1716 | |
1717 | |
1718 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { | |
1719 ASSERT(ToRegister(instr->context()).is(cp)); | |
1720 ASSERT(ToRegister(instr->left()).is(x1)); | |
1721 ASSERT(ToRegister(instr->right()).is(x0)); | |
1722 ASSERT(ToRegister(instr->result()).is(x0)); | |
1723 | |
1724 BinaryOpICStub stub(instr->op(), NO_OVERWRITE); | |
1725 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); | |
1726 } | |
1727 | |
1728 | |
1729 void LCodeGen::DoBitI(LBitI* instr) { | |
1730 Register result = ToRegister32(instr->result()); | |
1731 Register left = ToRegister32(instr->left()); | |
1732 Operand right = ToOperand32U(instr->right()); | |
1733 | |
1734 switch (instr->op()) { | |
1735 case Token::BIT_AND: __ And(result, left, right); break; | |
1736 case Token::BIT_OR: __ Orr(result, left, right); break; | |
1737 case Token::BIT_XOR: __ Eor(result, left, right); break; | |
1738 default: | |
1739 UNREACHABLE(); | |
1740 break; | |
1741 } | |
1742 } | |
1743 | |
1744 | |
1745 void LCodeGen::DoBitS(LBitS* instr) { | |
1746 Register result = ToRegister(instr->result()); | |
1747 Register left = ToRegister(instr->left()); | |
1748 Operand right = ToOperand(instr->right()); | |
1749 | |
1750 switch (instr->op()) { | |
1751 case Token::BIT_AND: __ And(result, left, right); break; | |
1752 case Token::BIT_OR: __ Orr(result, left, right); break; | |
1753 case Token::BIT_XOR: __ Eor(result, left, right); break; | |
1754 default: | |
1755 UNREACHABLE(); | |
1756 break; | |
1757 } | |
1758 } | |
1759 | |
1760 | |
1761 void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) { | |
1762 if (FLAG_debug_code && check->hydrogen()->skip_check()) { | |
1763 __ Assert(InvertCondition(cc), kEliminatedBoundsCheckFailed); | |
1764 } else { | |
1765 DeoptimizeIf(cc, check->environment()); | |
1766 } | |
1767 } | |
1768 | |
1769 | |
1770 void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) { | |
1771 if (instr->hydrogen()->skip_check()) return; | |
1772 | |
1773 ASSERT(instr->hydrogen()->length()->representation().IsInteger32()); | |
1774 Register length = ToRegister32(instr->length()); | |
1775 | |
1776 if (instr->index()->IsConstantOperand()) { | |
1777 int constant_index = | |
1778 ToInteger32(LConstantOperand::cast(instr->index())); | |
1779 | |
1780 if (instr->hydrogen()->length()->representation().IsSmi()) { | |
1781 __ Cmp(length, Smi::FromInt(constant_index)); | |
1782 } else { | |
1783 __ Cmp(length, constant_index); | |
1784 } | |
1785 } else { | |
1786 ASSERT(instr->hydrogen()->index()->representation().IsInteger32()); | |
1787 __ Cmp(length, ToRegister32(instr->index())); | |
1788 } | |
1789 Condition condition = instr->hydrogen()->allow_equality() ? lo : ls; | |
1790 ApplyCheckIf(condition, instr); | |
1791 } | |
1792 | |
1793 | |
1794 void LCodeGen::DoBranch(LBranch* instr) { | |
1795 Representation r = instr->hydrogen()->value()->representation(); | |
1796 Label* true_label = instr->TrueLabel(chunk_); | |
1797 Label* false_label = instr->FalseLabel(chunk_); | |
1798 | |
1799 if (r.IsInteger32()) { | |
1800 ASSERT(!info()->IsStub()); | |
1801 EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0); | |
1802 } else if (r.IsSmi()) { | |
1803 ASSERT(!info()->IsStub()); | |
1804 STATIC_ASSERT(kSmiTag == 0); | |
1805 EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0); | |
1806 } else if (r.IsDouble()) { | |
1807 DoubleRegister value = ToDoubleRegister(instr->value()); | |
1808 // Test the double value. Zero and NaN are false. | |
1809 EmitBranchIfNonZeroNumber(instr, value, double_scratch()); | |
1810 } else { | |
1811 ASSERT(r.IsTagged()); | |
1812 Register value = ToRegister(instr->value()); | |
1813 HType type = instr->hydrogen()->value()->type(); | |
1814 | |
1815 if (type.IsBoolean()) { | |
1816 ASSERT(!info()->IsStub()); | |
1817 __ CompareRoot(value, Heap::kTrueValueRootIndex); | |
1818 EmitBranch(instr, eq); | |
1819 } else if (type.IsSmi()) { | |
1820 ASSERT(!info()->IsStub()); | |
1821 EmitCompareAndBranch(instr, ne, value, Smi::FromInt(0)); | |
1822 } else if (type.IsJSArray()) { | |
1823 ASSERT(!info()->IsStub()); | |
1824 EmitGoto(instr->TrueDestination(chunk())); | |
1825 } else if (type.IsHeapNumber()) { | |
1826 ASSERT(!info()->IsStub()); | |
1827 __ Ldr(double_scratch(), FieldMemOperand(value, | |
1828 HeapNumber::kValueOffset)); | |
1829 // Test the double value. Zero and NaN are false. | |
1830 EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch()); | |
1831 } else if (type.IsString()) { | |
1832 ASSERT(!info()->IsStub()); | |
1833 Register temp = ToRegister(instr->temp1()); | |
1834 __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset)); | |
1835 EmitCompareAndBranch(instr, ne, temp, 0); | |
1836 } else { | |
1837 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); | |
1838 // Avoid deopts in the case where we've never executed this path before. | |
1839 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); | |
1840 | |
1841 if (expected.Contains(ToBooleanStub::UNDEFINED)) { | |
1842 // undefined -> false. | |
1843 __ JumpIfRoot( | |
1844 value, Heap::kUndefinedValueRootIndex, false_label); | |
1845 } | |
1846 | |
1847 if (expected.Contains(ToBooleanStub::BOOLEAN)) { | |
1848 // Boolean -> its value. | |
1849 __ JumpIfRoot( | |
1850 value, Heap::kTrueValueRootIndex, true_label); | |
1851 __ JumpIfRoot( | |
1852 value, Heap::kFalseValueRootIndex, false_label); | |
1853 } | |
1854 | |
1855 if (expected.Contains(ToBooleanStub::NULL_TYPE)) { | |
1856 // 'null' -> false. | |
1857 __ JumpIfRoot( | |
1858 value, Heap::kNullValueRootIndex, false_label); | |
1859 } | |
1860 | |
1861 if (expected.Contains(ToBooleanStub::SMI)) { | |
1862 // Smis: 0 -> false, all other -> true. | |
1863 ASSERT(Smi::FromInt(0) == 0); | |
1864 __ Cbz(value, false_label); | |
1865 __ JumpIfSmi(value, true_label); | |
1866 } else if (expected.NeedsMap()) { | |
1867 // If we need a map later and have a smi, deopt. | |
1868 DeoptimizeIfSmi(value, instr->environment()); | |
1869 } | |
1870 | |
1871 Register map = NoReg; | |
1872 Register scratch = NoReg; | |
1873 | |
1874 if (expected.NeedsMap()) { | |
1875 ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL)); | |
1876 map = ToRegister(instr->temp1()); | |
1877 scratch = ToRegister(instr->temp2()); | |
1878 | |
1879 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); | |
1880 | |
1881 if (expected.CanBeUndetectable()) { | |
1882 // Undetectable -> false. | |
1883 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); | |
1884 __ TestAndBranchIfAnySet( | |
1885 scratch, 1 << Map::kIsUndetectable, false_label); | |
1886 } | |
1887 } | |
1888 | |
1889 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) { | |
1890 // spec object -> true. | |
1891 __ CompareInstanceType(map, scratch, FIRST_SPEC_OBJECT_TYPE); | |
1892 __ B(ge, true_label); | |
1893 } | |
1894 | |
1895 if (expected.Contains(ToBooleanStub::STRING)) { | |
1896 // String value -> false iff empty. | |
1897 Label not_string; | |
1898 __ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE); | |
1899 __ B(ge, ¬_string); | |
1900 __ Ldr(scratch, FieldMemOperand(value, String::kLengthOffset)); | |
1901 __ Cbz(scratch, false_label); | |
1902 __ B(true_label); | |
1903 __ Bind(¬_string); | |
1904 } | |
1905 | |
1906 if (expected.Contains(ToBooleanStub::SYMBOL)) { | |
1907 // Symbol value -> true. | |
1908 __ CompareInstanceType(map, scratch, SYMBOL_TYPE); | |
1909 __ B(eq, true_label); | |
1910 } | |
1911 | |
1912 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { | |
1913 Label not_heap_number; | |
1914 __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, ¬_heap_number); | |
1915 | |
1916 __ Ldr(double_scratch(), | |
1917 FieldMemOperand(value, HeapNumber::kValueOffset)); | |
1918 __ Fcmp(double_scratch(), 0.0); | |
1919 // If we got a NaN (overflow bit is set), jump to the false branch. | |
1920 __ B(vs, false_label); | |
1921 __ B(eq, false_label); | |
1922 __ B(true_label); | |
1923 __ Bind(¬_heap_number); | |
1924 } | |
1925 | |
1926 if (!expected.IsGeneric()) { | |
1927 // We've seen something for the first time -> deopt. | |
1928 // This can only happen if we are not generic already. | |
1929 Deoptimize(instr->environment()); | |
1930 } | |
1931 } | |
1932 } | |
1933 } | |
1934 | |
1935 | |
1936 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, | |
1937 int formal_parameter_count, | |
1938 int arity, | |
1939 LInstruction* instr, | |
1940 Register function_reg) { | |
1941 bool dont_adapt_arguments = | |
1942 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; | |
1943 bool can_invoke_directly = | |
1944 dont_adapt_arguments || formal_parameter_count == arity; | |
1945 | |
1946 // The function interface relies on the following register assignments. | |
1947 ASSERT(function_reg.Is(x1) || function_reg.IsNone()); | |
1948 Register arity_reg = x0; | |
1949 | |
1950 LPointerMap* pointers = instr->pointer_map(); | |
1951 | |
1952 // If necessary, load the function object. | |
1953 if (function_reg.IsNone()) { | |
1954 function_reg = x1; | |
1955 __ LoadObject(function_reg, function); | |
1956 } | |
1957 | |
1958 if (FLAG_debug_code) { | |
1959 Label is_not_smi; | |
1960 // Try to confirm that function_reg (x1) is a tagged pointer. | |
1961 __ JumpIfNotSmi(function_reg, &is_not_smi); | |
1962 __ Abort(kExpectedFunctionObject); | |
1963 __ Bind(&is_not_smi); | |
1964 } | |
1965 | |
1966 if (can_invoke_directly) { | |
1967 // Change context. | |
1968 __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset)); | |
1969 | |
1970 // Set the arguments count if adaption is not needed. Assumes that x0 is | |
1971 // available to write to at this point. | |
1972 if (dont_adapt_arguments) { | |
1973 __ Mov(arity_reg, arity); | |
1974 } | |
1975 | |
1976 // Invoke function. | |
1977 __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset)); | |
1978 __ Call(x10); | |
1979 | |
1980 // Set up deoptimization. | |
1981 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); | |
1982 } else { | |
1983 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); | |
1984 ParameterCount count(arity); | |
1985 ParameterCount expected(formal_parameter_count); | |
1986 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator); | |
1987 } | |
1988 } | |
1989 | |
1990 | |
1991 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { | |
1992 ASSERT(instr->IsMarkedAsCall()); | |
1993 ASSERT(ToRegister(instr->result()).Is(x0)); | |
1994 | |
1995 LPointerMap* pointers = instr->pointer_map(); | |
1996 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); | |
1997 | |
1998 if (instr->target()->IsConstantOperand()) { | |
1999 LConstantOperand* target = LConstantOperand::cast(instr->target()); | |
2000 Handle<Code> code = Handle<Code>::cast(ToHandle(target)); | |
2001 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); | |
2002 // TODO(all): on ARM we use a call descriptor to specify a storage mode | |
2003 // but on A64 we only have one storage mode so it isn't necessary. Check | |
2004 // this understanding is correct. | |
2005 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None()); | |
2006 } else { | |
2007 ASSERT(instr->target()->IsRegister()); | |
2008 Register target = ToRegister(instr->target()); | |
2009 generator.BeforeCall(__ CallSize(target)); | |
2010 __ Add(target, target, Code::kHeaderSize - kHeapObjectTag); | |
2011 __ Call(target); | |
2012 } | |
2013 generator.AfterCall(); | |
2014 } | |
2015 | |
2016 | |
2017 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) { | |
2018 ASSERT(instr->IsMarkedAsCall()); | |
2019 ASSERT(ToRegister(instr->function()).is(x1)); | |
2020 | |
2021 if (instr->hydrogen()->pass_argument_count()) { | |
2022 __ Mov(x0, Operand(instr->arity())); | |
2023 } | |
2024 | |
2025 // Change context. | |
2026 __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset)); | |
2027 | |
2028 // Load the code entry address | |
2029 __ Ldr(x10, FieldMemOperand(x1, JSFunction::kCodeEntryOffset)); | |
2030 __ Call(x10); | |
2031 | |
2032 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); | |
2033 } | |
2034 | |
2035 | |
2036 void LCodeGen::DoCallRuntime(LCallRuntime* instr) { | |
2037 CallRuntime(instr->function(), instr->arity(), instr); | |
2038 } | |
2039 | |
2040 | |
2041 void LCodeGen::DoCallStub(LCallStub* instr) { | |
2042 ASSERT(ToRegister(instr->context()).is(cp)); | |
2043 ASSERT(ToRegister(instr->result()).is(x0)); | |
2044 switch (instr->hydrogen()->major_key()) { | |
2045 case CodeStub::RegExpExec: { | |
2046 RegExpExecStub stub; | |
2047 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); | |
2048 break; | |
2049 } | |
2050 case CodeStub::SubString: { | |
2051 SubStringStub stub; | |
2052 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); | |
2053 break; | |
2054 } | |
2055 case CodeStub::StringCompare: { | |
2056 StringCompareStub stub; | |
2057 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); | |
2058 break; | |
2059 } | |
2060 default: | |
2061 UNREACHABLE(); | |
2062 } | |
2063 } | |
2064 | |
2065 | |
2066 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { | |
2067 GenerateOsrPrologue(); | |
2068 } | |
2069 | |
2070 | |
2071 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { | |
2072 Register temp = ToRegister(instr->temp()); | |
2073 { | |
2074 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); | |
2075 __ Push(object); | |
2076 __ Mov(cp, 0); | |
2077 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); | |
2078 RecordSafepointWithRegisters( | |
2079 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); | |
2080 __ StoreToSafepointRegisterSlot(x0, temp); | |
2081 } | |
2082 DeoptimizeIfSmi(temp, instr->environment()); | |
2083 } | |
2084 | |
2085 | |
2086 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { | |
2087 class DeferredCheckMaps: public LDeferredCode { | |
2088 public: | |
2089 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) | |
2090 : LDeferredCode(codegen), instr_(instr), object_(object) { | |
2091 SetExit(check_maps()); | |
2092 } | |
2093 virtual void Generate() { | |
2094 codegen()->DoDeferredInstanceMigration(instr_, object_); | |
2095 } | |
2096 Label* check_maps() { return &check_maps_; } | |
2097 virtual LInstruction* instr() { return instr_; } | |
2098 private: | |
2099 LCheckMaps* instr_; | |
2100 Label check_maps_; | |
2101 Register object_; | |
2102 }; | |
2103 | |
2104 if (instr->hydrogen()->CanOmitMapChecks()) { | |
2105 ASSERT(instr->value() == NULL); | |
2106 ASSERT(instr->temp() == NULL); | |
2107 return; | |
2108 } | |
2109 | |
2110 Register object = ToRegister(instr->value()); | |
2111 Register map_reg = ToRegister(instr->temp()); | |
2112 | |
2113 __ Ldr(map_reg, FieldMemOperand(object, HeapObject::kMapOffset)); | |
2114 | |
2115 DeferredCheckMaps* deferred = NULL; | |
2116 if (instr->hydrogen()->has_migration_target()) { | |
2117 deferred = new(zone()) DeferredCheckMaps(this, instr, object); | |
2118 __ Bind(deferred->check_maps()); | |
2119 } | |
2120 | |
2121 UniqueSet<Map> map_set = instr->hydrogen()->map_set(); | |
2122 Label success; | |
2123 for (int i = 0; i < map_set.size(); i++) { | |
2124 Handle<Map> map = map_set.at(i).handle(); | |
2125 __ CompareMap(map_reg, map); | |
2126 __ B(eq, &success); | |
2127 } | |
2128 | |
2129 // We didn't match a map. | |
2130 if (instr->hydrogen()->has_migration_target()) { | |
2131 __ B(deferred->entry()); | |
2132 } else { | |
2133 Deoptimize(instr->environment()); | |
2134 } | |
2135 | |
2136 __ Bind(&success); | |
2137 } | |
2138 | |
2139 | |
2140 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { | |
2141 if (!instr->hydrogen()->value()->IsHeapObject()) { | |
2142 DeoptimizeIfSmi(ToRegister(instr->value()), instr->environment()); | |
2143 } | |
2144 } | |
2145 | |
2146 | |
2147 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { | |
2148 Register value = ToRegister(instr->value()); | |
2149 ASSERT(!instr->result() || ToRegister(instr->result()).Is(value)); | |
2150 DeoptimizeIfNotSmi(value, instr->environment()); | |
2151 } | |
2152 | |
2153 | |
2154 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { | |
2155 Register input = ToRegister(instr->value()); | |
2156 Register scratch = ToRegister(instr->temp()); | |
2157 | |
2158 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | |
2159 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | |
2160 | |
2161 if (instr->hydrogen()->is_interval_check()) { | |
2162 InstanceType first, last; | |
2163 instr->hydrogen()->GetCheckInterval(&first, &last); | |
2164 | |
2165 __ Cmp(scratch, first); | |
2166 if (first == last) { | |
2167 // If there is only one type in the interval check for equality. | |
2168 DeoptimizeIf(ne, instr->environment()); | |
2169 } else if (last == LAST_TYPE) { | |
2170 // We don't need to compare with the higher bound of the interval. | |
2171 DeoptimizeIf(lo, instr->environment()); | |
2172 } else { | |
2173 // If we are below the lower bound, set the C flag and clear the Z flag | |
2174 // to force a deopt. | |
2175 __ Ccmp(scratch, last, CFlag, hs); | |
2176 DeoptimizeIf(hi, instr->environment()); | |
2177 } | |
2178 } else { | |
2179 uint8_t mask; | |
2180 uint8_t tag; | |
2181 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); | |
2182 | |
2183 if (IsPowerOf2(mask)) { | |
2184 ASSERT((tag == 0) || (tag == mask)); | |
2185 // TODO(all): We might be able to use tbz/tbnz if we can guarantee that | |
2186 // the deopt handler is reachable by a tbz instruction. | |
2187 __ Tst(scratch, mask); | |
2188 DeoptimizeIf(tag == 0 ? ne : eq, instr->environment()); | |
2189 } else { | |
2190 if (tag == 0) { | |
2191 __ Tst(scratch, mask); | |
2192 } else { | |
2193 __ And(scratch, scratch, mask); | |
2194 __ Cmp(scratch, tag); | |
2195 } | |
2196 DeoptimizeIf(ne, instr->environment()); | |
2197 } | |
2198 } | |
2199 } | |
2200 | |
2201 | |
2202 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | |
2203 DoubleRegister input = ToDoubleRegister(instr->unclamped()); | |
2204 Register result = ToRegister32(instr->result()); | |
2205 __ ClampDoubleToUint8(result, input, double_scratch()); | |
2206 } | |
2207 | |
2208 | |
2209 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { | |
2210 Register input = ToRegister32(instr->unclamped()); | |
2211 Register result = ToRegister32(instr->result()); | |
2212 __ ClampInt32ToUint8(result, input); | |
2213 } | |
2214 | |
2215 | |
2216 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { | |
2217 Register input = ToRegister(instr->unclamped()); | |
2218 Register result = ToRegister32(instr->result()); | |
2219 Register scratch = ToRegister(instr->temp1()); | |
2220 Label done; | |
2221 | |
2222 // Both smi and heap number cases are handled. | |
2223 Label is_not_smi; | |
2224 __ JumpIfNotSmi(input, &is_not_smi); | |
2225 __ SmiUntag(result.X(), input); | |
2226 __ ClampInt32ToUint8(result); | |
2227 __ B(&done); | |
2228 | |
2229 __ Bind(&is_not_smi); | |
2230 | |
2231 // Check for heap number. | |
2232 Label is_heap_number; | |
2233 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | |
2234 __ JumpIfRoot(scratch, Heap::kHeapNumberMapRootIndex, &is_heap_number); | |
2235 | |
2236 // Check for undefined. Undefined is coverted to zero for clamping conversion. | |
2237 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, | |
2238 instr->environment()); | |
2239 __ Mov(result, 0); | |
2240 __ B(&done); | |
2241 | |
2242 // Heap number case. | |
2243 __ Bind(&is_heap_number); | |
2244 DoubleRegister dbl_scratch = double_scratch(); | |
2245 DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp2()); | |
2246 __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset)); | |
2247 __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2); | |
2248 | |
2249 __ Bind(&done); | |
2250 } | |
2251 | |
2252 | |
2253 void LCodeGen::DoDoubleBits(LDoubleBits* instr) { | |
2254 DoubleRegister value_reg = ToDoubleRegister(instr->value()); | |
2255 Register result_reg = ToRegister(instr->result()); | |
2256 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) { | |
2257 __ Fmov(result_reg, value_reg); | |
2258 __ Mov(result_reg, Operand(result_reg, LSR, 32)); | |
2259 } else { | |
2260 __ Fmov(result_reg.W(), value_reg.S()); | |
2261 } | |
2262 } | |
2263 | |
2264 | |
2265 void LCodeGen::DoConstructDouble(LConstructDouble* instr) { | |
2266 Register hi_reg = ToRegister(instr->hi()); | |
2267 Register lo_reg = ToRegister(instr->lo()); | |
2268 Register temp = ToRegister(instr->temp()); | |
2269 DoubleRegister result_reg = ToDoubleRegister(instr->result()); | |
2270 | |
2271 __ And(temp, lo_reg, Operand(0xffffffff)); | |
2272 __ Orr(temp, temp, Operand(hi_reg, LSL, 32)); | |
2273 __ Fmov(result_reg, temp); | |
2274 } | |
2275 | |
2276 | |
2277 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { | |
2278 Handle<String> class_name = instr->hydrogen()->class_name(); | |
2279 Label* true_label = instr->TrueLabel(chunk_); | |
2280 Label* false_label = instr->FalseLabel(chunk_); | |
2281 Register input = ToRegister(instr->value()); | |
2282 Register scratch1 = ToRegister(instr->temp1()); | |
2283 Register scratch2 = ToRegister(instr->temp2()); | |
2284 | |
2285 __ JumpIfSmi(input, false_label); | |
2286 | |
2287 Register map = scratch2; | |
2288 if (class_name->IsUtf8EqualTo(CStrVector("Function"))) { | |
2289 // Assuming the following assertions, we can use the same compares to test | |
2290 // for both being a function type and being in the object type range. | |
2291 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); | |
2292 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == | |
2293 FIRST_SPEC_OBJECT_TYPE + 1); | |
2294 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == | |
2295 LAST_SPEC_OBJECT_TYPE - 1); | |
2296 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); | |
2297 | |
2298 // We expect CompareObjectType to load the object instance type in scratch1. | |
2299 __ CompareObjectType(input, map, scratch1, FIRST_SPEC_OBJECT_TYPE); | |
2300 __ B(lt, false_label); | |
2301 __ B(eq, true_label); | |
2302 __ Cmp(scratch1, LAST_SPEC_OBJECT_TYPE); | |
2303 __ B(eq, true_label); | |
2304 } else { | |
2305 __ IsObjectJSObjectType(input, map, scratch1, false_label); | |
2306 } | |
2307 | |
2308 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. | |
2309 // Check if the constructor in the map is a function. | |
2310 __ Ldr(scratch1, FieldMemOperand(map, Map::kConstructorOffset)); | |
2311 | |
2312 // Objects with a non-function constructor have class 'Object'. | |
2313 if (class_name->IsUtf8EqualTo(CStrVector("Object"))) { | |
2314 __ JumpIfNotObjectType( | |
2315 scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, true_label); | |
2316 } else { | |
2317 __ JumpIfNotObjectType( | |
2318 scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, false_label); | |
2319 } | |
2320 | |
2321 // The constructor function is in scratch1. Get its instance class name. | |
2322 __ Ldr(scratch1, | |
2323 FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset)); | |
2324 __ Ldr(scratch1, | |
2325 FieldMemOperand(scratch1, | |
2326 SharedFunctionInfo::kInstanceClassNameOffset)); | |
2327 | |
2328 // The class name we are testing against is internalized since it's a literal. | |
2329 // The name in the constructor is internalized because of the way the context | |
2330 // is booted. This routine isn't expected to work for random API-created | |
2331 // classes and it doesn't have to because you can't access it with natives | |
2332 // syntax. Since both sides are internalized it is sufficient to use an | |
2333 // identity comparison. | |
2334 EmitCompareAndBranch(instr, eq, scratch1, Operand(class_name)); | |
2335 } | |
2336 | |
2337 | |
2338 void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) { | |
2339 ASSERT(instr->hydrogen()->representation().IsDouble()); | |
2340 FPRegister object = ToDoubleRegister(instr->object()); | |
2341 Register temp = ToRegister(instr->temp()); | |
2342 | |
2343 // If we don't have a NaN, we don't have the hole, so branch now to avoid the | |
2344 // (relatively expensive) hole-NaN check. | |
2345 __ Fcmp(object, object); | |
2346 __ B(vc, instr->FalseLabel(chunk_)); | |
2347 | |
2348 // We have a NaN, but is it the hole? | |
2349 __ Fmov(temp, object); | |
2350 EmitCompareAndBranch(instr, eq, temp, kHoleNanInt64); | |
2351 } | |
2352 | |
2353 | |
2354 void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) { | |
2355 ASSERT(instr->hydrogen()->representation().IsTagged()); | |
2356 Register object = ToRegister(instr->object()); | |
2357 | |
2358 EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex); | |
2359 } | |
2360 | |
2361 | |
2362 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { | |
2363 Register value = ToRegister(instr->value()); | |
2364 Register map = ToRegister(instr->temp()); | |
2365 | |
2366 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); | |
2367 EmitCompareAndBranch(instr, eq, map, Operand(instr->map())); | |
2368 } | |
2369 | |
2370 | |
2371 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { | |
2372 Representation rep = instr->hydrogen()->value()->representation(); | |
2373 ASSERT(!rep.IsInteger32()); | |
2374 Register scratch = ToRegister(instr->temp()); | |
2375 | |
2376 if (rep.IsDouble()) { | |
2377 __ JumpIfMinusZero(ToDoubleRegister(instr->value()), | |
2378 instr->TrueLabel(chunk())); | |
2379 } else { | |
2380 Register value = ToRegister(instr->value()); | |
2381 __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex, | |
2382 instr->FalseLabel(chunk()), DO_SMI_CHECK); | |
2383 __ Ldr(double_scratch(), FieldMemOperand(value, HeapNumber::kValueOffset)); | |
2384 __ JumpIfMinusZero(double_scratch(), instr->TrueLabel(chunk())); | |
2385 } | |
2386 EmitGoto(instr->FalseDestination(chunk())); | |
2387 } | |
2388 | |
2389 | |
2390 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { | |
2391 LOperand* left = instr->left(); | |
2392 LOperand* right = instr->right(); | |
2393 Condition cond = TokenToCondition(instr->op(), false); | |
2394 | |
2395 if (left->IsConstantOperand() && right->IsConstantOperand()) { | |
2396 // We can statically evaluate the comparison. | |
2397 double left_val = ToDouble(LConstantOperand::cast(left)); | |
2398 double right_val = ToDouble(LConstantOperand::cast(right)); | |
2399 int next_block = EvalComparison(instr->op(), left_val, right_val) ? | |
2400 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_); | |
2401 EmitGoto(next_block); | |
2402 } else { | |
2403 if (instr->is_double()) { | |
2404 if (right->IsConstantOperand()) { | |
2405 __ Fcmp(ToDoubleRegister(left), | |
2406 ToDouble(LConstantOperand::cast(right))); | |
2407 } else if (left->IsConstantOperand()) { | |
2408 // Transpose the operands and reverse the condition. | |
2409 __ Fcmp(ToDoubleRegister(right), | |
2410 ToDouble(LConstantOperand::cast(left))); | |
2411 cond = ReverseConditionForCmp(cond); | |
2412 } else { | |
2413 __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right)); | |
2414 } | |
2415 | |
2416 // If a NaN is involved, i.e. the result is unordered (V set), | |
2417 // jump to false block label. | |
2418 __ B(vs, instr->FalseLabel(chunk_)); | |
2419 EmitBranch(instr, cond); | |
2420 } else { | |
2421 if (instr->hydrogen_value()->representation().IsInteger32()) { | |
2422 if (right->IsConstantOperand()) { | |
2423 EmitCompareAndBranch(instr, | |
2424 cond, | |
2425 ToRegister32(left), | |
2426 ToOperand32I(right)); | |
2427 } else { | |
2428 // Transpose the operands and reverse the condition. | |
2429 EmitCompareAndBranch(instr, | |
2430 ReverseConditionForCmp(cond), | |
2431 ToRegister32(right), | |
2432 ToOperand32I(left)); | |
2433 } | |
2434 } else { | |
2435 ASSERT(instr->hydrogen_value()->representation().IsSmi()); | |
2436 if (right->IsConstantOperand()) { | |
2437 int32_t value = ToInteger32(LConstantOperand::cast(right)); | |
2438 EmitCompareAndBranch(instr, | |
2439 cond, | |
2440 ToRegister(left), | |
2441 Operand(Smi::FromInt(value))); | |
2442 } else if (left->IsConstantOperand()) { | |
2443 // Transpose the operands and reverse the condition. | |
2444 int32_t value = ToInteger32(LConstantOperand::cast(left)); | |
2445 EmitCompareAndBranch(instr, | |
2446 ReverseConditionForCmp(cond), | |
2447 ToRegister(right), | |
2448 Operand(Smi::FromInt(value))); | |
2449 } else { | |
2450 EmitCompareAndBranch(instr, | |
2451 cond, | |
2452 ToRegister(left), | |
2453 ToRegister(right)); | |
2454 } | |
2455 } | |
2456 } | |
2457 } | |
2458 } | |
2459 | |
2460 | |
2461 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { | |
2462 Register left = ToRegister(instr->left()); | |
2463 Register right = ToRegister(instr->right()); | |
2464 EmitCompareAndBranch(instr, eq, left, right); | |
2465 } | |
2466 | |
2467 | |
2468 void LCodeGen::DoCmpT(LCmpT* instr) { | |
2469 ASSERT(ToRegister(instr->context()).is(cp)); | |
2470 Token::Value op = instr->op(); | |
2471 Condition cond = TokenToCondition(op, false); | |
2472 | |
2473 ASSERT(ToRegister(instr->left()).Is(x1)); | |
2474 ASSERT(ToRegister(instr->right()).Is(x0)); | |
2475 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); | |
2476 CallCode(ic, RelocInfo::CODE_TARGET, instr); | |
2477 // Signal that we don't inline smi code before this stub. | |
2478 InlineSmiCheckInfo::EmitNotInlined(masm()); | |
2479 | |
2480 // Return true or false depending on CompareIC result. | |
2481 // This instruction is marked as call. We can clobber any register. | |
2482 ASSERT(instr->IsMarkedAsCall()); | |
2483 __ LoadTrueFalseRoots(x1, x2); | |
2484 __ Cmp(x0, 0); | |
2485 __ Csel(ToRegister(instr->result()), x1, x2, cond); | |
2486 } | |
2487 | |
2488 | |
2489 void LCodeGen::DoConstantD(LConstantD* instr) { | |
2490 ASSERT(instr->result()->IsDoubleRegister()); | |
2491 DoubleRegister result = ToDoubleRegister(instr->result()); | |
2492 __ Fmov(result, instr->value()); | |
2493 } | |
2494 | |
2495 | |
2496 void LCodeGen::DoConstantE(LConstantE* instr) { | |
2497 __ Mov(ToRegister(instr->result()), Operand(instr->value())); | |
2498 } | |
2499 | |
2500 | |
2501 void LCodeGen::DoConstantI(LConstantI* instr) { | |
2502 ASSERT(is_int32(instr->value())); | |
2503 // Cast the value here to ensure that the value isn't sign extended by the | |
2504 // implicit Operand constructor. | |
2505 __ Mov(ToRegister32(instr->result()), static_cast<uint32_t>(instr->value())); | |
2506 } | |
2507 | |
2508 | |
2509 void LCodeGen::DoConstantS(LConstantS* instr) { | |
2510 __ Mov(ToRegister(instr->result()), Operand(instr->value())); | |
2511 } | |
2512 | |
2513 | |
2514 void LCodeGen::DoConstantT(LConstantT* instr) { | |
2515 Handle<Object> value = instr->value(isolate()); | |
2516 AllowDeferredHandleDereference smi_check; | |
2517 __ LoadObject(ToRegister(instr->result()), value); | |
2518 } | |
2519 | |
2520 | |
2521 void LCodeGen::DoContext(LContext* instr) { | |
2522 // If there is a non-return use, the context must be moved to a register. | |
2523 Register result = ToRegister(instr->result()); | |
2524 if (info()->IsOptimizing()) { | |
2525 __ Ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset)); | |
2526 } else { | |
2527 // If there is no frame, the context must be in cp. | |
2528 ASSERT(result.is(cp)); | |
2529 } | |
2530 } | |
2531 | |
2532 | |
2533 void LCodeGen::DoCheckValue(LCheckValue* instr) { | |
2534 Register reg = ToRegister(instr->value()); | |
2535 Handle<HeapObject> object = instr->hydrogen()->object().handle(); | |
2536 AllowDeferredHandleDereference smi_check; | |
2537 if (isolate()->heap()->InNewSpace(*object)) { | |
2538 UseScratchRegisterScope temps(masm()); | |
2539 Register temp = temps.AcquireX(); | |
2540 Handle<Cell> cell = isolate()->factory()->NewCell(object); | |
2541 __ Mov(temp, Operand(Handle<Object>(cell))); | |
2542 __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset)); | |
2543 __ Cmp(reg, temp); | |
2544 } else { | |
2545 __ Cmp(reg, Operand(object)); | |
2546 } | |
2547 DeoptimizeIf(ne, instr->environment()); | |
2548 } | |
2549 | |
2550 | |
2551 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { | |
2552 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); | |
2553 ASSERT(instr->HasEnvironment()); | |
2554 LEnvironment* env = instr->environment(); | |
2555 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); | |
2556 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); | |
2557 } | |
2558 | |
2559 | |
2560 void LCodeGen::DoDateField(LDateField* instr) { | |
2561 Register object = ToRegister(instr->date()); | |
2562 Register result = ToRegister(instr->result()); | |
2563 Register temp1 = x10; | |
2564 Register temp2 = x11; | |
2565 Smi* index = instr->index(); | |
2566 Label runtime, done, deopt, obj_ok; | |
2567 | |
2568 ASSERT(object.is(result) && object.Is(x0)); | |
2569 ASSERT(instr->IsMarkedAsCall()); | |
2570 | |
2571 __ JumpIfSmi(object, &deopt); | |
2572 __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE); | |
2573 __ B(eq, &obj_ok); | |
2574 | |
2575 __ Bind(&deopt); | |
2576 Deoptimize(instr->environment()); | |
2577 | |
2578 __ Bind(&obj_ok); | |
2579 if (index->value() == 0) { | |
2580 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); | |
2581 } else { | |
2582 if (index->value() < JSDate::kFirstUncachedField) { | |
2583 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); | |
2584 __ Mov(temp1, Operand(stamp)); | |
2585 __ Ldr(temp1, MemOperand(temp1)); | |
2586 __ Ldr(temp2, FieldMemOperand(object, JSDate::kCacheStampOffset)); | |
2587 __ Cmp(temp1, temp2); | |
2588 __ B(ne, &runtime); | |
2589 __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset + | |
2590 kPointerSize * index->value())); | |
2591 __ B(&done); | |
2592 } | |
2593 | |
2594 __ Bind(&runtime); | |
2595 __ Mov(x1, Operand(index)); | |
2596 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2); | |
2597 } | |
2598 | |
2599 __ Bind(&done); | |
2600 } | |
2601 | |
2602 | |
2603 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { | |
2604 Deoptimizer::BailoutType type = instr->hydrogen()->type(); | |
2605 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the | |
2606 // needed return address), even though the implementation of LAZY and EAGER is | |
2607 // now identical. When LAZY is eventually completely folded into EAGER, remove | |
2608 // the special case below. | |
2609 if (info()->IsStub() && (type == Deoptimizer::EAGER)) { | |
2610 type = Deoptimizer::LAZY; | |
2611 } | |
2612 | |
2613 Comment(";;; deoptimize: %s", instr->hydrogen()->reason()); | |
2614 Deoptimize(instr->environment(), &type); | |
2615 } | |
2616 | |
2617 | |
2618 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { | |
2619 Register dividend = ToRegister32(instr->dividend()); | |
2620 int32_t divisor = instr->divisor(); | |
2621 Register result = ToRegister32(instr->result()); | |
2622 ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor)))); | |
2623 ASSERT(!result.is(dividend)); | |
2624 | |
2625 // Check for (0 / -x) that will produce negative zero. | |
2626 HDiv* hdiv = instr->hydrogen(); | |
2627 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | |
2628 __ Cmp(dividend, 0); | |
2629 DeoptimizeIf(eq, instr->environment()); | |
2630 } | |
2631 // Check for (kMinInt / -1). | |
2632 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { | |
2633 __ Cmp(dividend, kMinInt); | |
2634 DeoptimizeIf(eq, instr->environment()); | |
2635 } | |
2636 // Deoptimize if remainder will not be 0. | |
2637 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && | |
2638 divisor != 1 && divisor != -1) { | |
2639 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | |
2640 __ Tst(dividend, mask); | |
2641 DeoptimizeIf(ne, instr->environment()); | |
2642 } | |
2643 | |
2644 if (divisor == -1) { // Nice shortcut, not needed for correctness. | |
2645 __ Neg(result, dividend); | |
2646 return; | |
2647 } | |
2648 int32_t shift = WhichPowerOf2Abs(divisor); | |
2649 if (shift == 0) { | |
2650 __ Mov(result, dividend); | |
2651 } else if (shift == 1) { | |
2652 __ Add(result, dividend, Operand(dividend, LSR, 31)); | |
2653 } else { | |
2654 __ Mov(result, Operand(dividend, ASR, 31)); | |
2655 __ Add(result, dividend, Operand(result, LSR, 32 - shift)); | |
2656 } | |
2657 if (shift > 0) __ Mov(result, Operand(result, ASR, shift)); | |
2658 if (divisor < 0) __ Neg(result, result); | |
2659 } | |
2660 | |
2661 | |
2662 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { | |
2663 Register dividend = ToRegister32(instr->dividend()); | |
2664 int32_t divisor = instr->divisor(); | |
2665 Register result = ToRegister32(instr->result()); | |
2666 ASSERT(!AreAliased(dividend, result)); | |
2667 | |
2668 if (divisor == 0) { | |
2669 Deoptimize(instr->environment()); | |
2670 return; | |
2671 } | |
2672 | |
2673 // Check for (0 / -x) that will produce negative zero. | |
2674 HDiv* hdiv = instr->hydrogen(); | |
2675 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | |
2676 DeoptimizeIfZero(dividend, instr->environment()); | |
2677 } | |
2678 | |
2679 __ TruncatingDiv(result, dividend, Abs(divisor)); | |
2680 if (divisor < 0) __ Neg(result, result); | |
2681 | |
2682 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { | |
2683 Register temp = ToRegister32(instr->temp()); | |
2684 ASSERT(!AreAliased(dividend, result, temp)); | |
2685 __ Sxtw(dividend.X(), dividend); | |
2686 __ Mov(temp, divisor); | |
2687 __ Smsubl(temp.X(), result, temp, dividend.X()); | |
2688 DeoptimizeIfNotZero(temp, instr->environment()); | |
2689 } | |
2690 } | |
2691 | |
2692 | |
2693 void LCodeGen::DoDivI(LDivI* instr) { | |
2694 HBinaryOperation* hdiv = instr->hydrogen(); | |
2695 Register dividend = ToRegister32(instr->left()); | |
2696 Register divisor = ToRegister32(instr->right()); | |
2697 Register result = ToRegister32(instr->result()); | |
2698 | |
2699 // Issue the division first, and then check for any deopt cases whilst the | |
2700 // result is computed. | |
2701 __ Sdiv(result, dividend, divisor); | |
2702 | |
2703 if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | |
2704 ASSERT_EQ(NULL, instr->temp()); | |
2705 return; | |
2706 } | |
2707 | |
2708 Label deopt; | |
2709 // Check for x / 0. | |
2710 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | |
2711 __ Cbz(divisor, &deopt); | |
2712 } | |
2713 | |
2714 // Check for (0 / -x) as that will produce negative zero. | |
2715 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
2716 __ Cmp(divisor, 0); | |
2717 | |
2718 // If the divisor < 0 (mi), compare the dividend, and deopt if it is | |
2719 // zero, ie. zero dividend with negative divisor deopts. | |
2720 // If the divisor >= 0 (pl, the opposite of mi) set the flags to | |
2721 // condition ne, so we don't deopt, ie. positive divisor doesn't deopt. | |
2722 __ Ccmp(dividend, 0, NoFlag, mi); | |
2723 __ B(eq, &deopt); | |
2724 } | |
2725 | |
2726 // Check for (kMinInt / -1). | |
2727 if (hdiv->CheckFlag(HValue::kCanOverflow)) { | |
2728 // Test dividend for kMinInt by subtracting one (cmp) and checking for | |
2729 // overflow. | |
2730 __ Cmp(dividend, 1); | |
2731 // If overflow is set, ie. dividend = kMinInt, compare the divisor with | |
2732 // -1. If overflow is clear, set the flags for condition ne, as the | |
2733 // dividend isn't -1, and thus we shouldn't deopt. | |
2734 __ Ccmp(divisor, -1, NoFlag, vs); | |
2735 __ B(eq, &deopt); | |
2736 } | |
2737 | |
2738 // Compute remainder and deopt if it's not zero. | |
2739 Register remainder = ToRegister32(instr->temp()); | |
2740 __ Msub(remainder, result, divisor, dividend); | |
2741 __ Cbnz(remainder, &deopt); | |
2742 | |
2743 Label div_ok; | |
2744 __ B(&div_ok); | |
2745 __ Bind(&deopt); | |
2746 Deoptimize(instr->environment()); | |
2747 __ Bind(&div_ok); | |
2748 } | |
2749 | |
2750 | |
2751 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) { | |
2752 DoubleRegister input = ToDoubleRegister(instr->value()); | |
2753 Register result = ToRegister32(instr->result()); | |
2754 | |
2755 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
2756 DeoptimizeIfMinusZero(input, instr->environment()); | |
2757 } | |
2758 | |
2759 __ TryConvertDoubleToInt32(result, input, double_scratch()); | |
2760 DeoptimizeIf(ne, instr->environment()); | |
2761 | |
2762 if (instr->tag_result()) { | |
2763 __ SmiTag(result.X()); | |
2764 } | |
2765 } | |
2766 | |
2767 | |
2768 void LCodeGen::DoDrop(LDrop* instr) { | |
2769 __ Drop(instr->count()); | |
2770 } | |
2771 | |
2772 | |
2773 void LCodeGen::DoDummy(LDummy* instr) { | |
2774 // Nothing to see here, move on! | |
2775 } | |
2776 | |
2777 | |
2778 void LCodeGen::DoDummyUse(LDummyUse* instr) { | |
2779 // Nothing to see here, move on! | |
2780 } | |
2781 | |
2782 | |
2783 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { | |
2784 ASSERT(ToRegister(instr->context()).is(cp)); | |
2785 // FunctionLiteral instruction is marked as call, we can trash any register. | |
2786 ASSERT(instr->IsMarkedAsCall()); | |
2787 | |
2788 // Use the fast case closure allocation code that allocates in new | |
2789 // space for nested functions that don't need literals cloning. | |
2790 bool pretenure = instr->hydrogen()->pretenure(); | |
2791 if (!pretenure && instr->hydrogen()->has_no_literals()) { | |
2792 FastNewClosureStub stub(instr->hydrogen()->strict_mode(), | |
2793 instr->hydrogen()->is_generator()); | |
2794 __ Mov(x2, Operand(instr->hydrogen()->shared_info())); | |
2795 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); | |
2796 } else { | |
2797 __ Mov(x2, Operand(instr->hydrogen()->shared_info())); | |
2798 __ Mov(x1, Operand(pretenure ? factory()->true_value() | |
2799 : factory()->false_value())); | |
2800 __ Push(cp, x2, x1); | |
2801 CallRuntime(Runtime::kNewClosure, 3, instr); | |
2802 } | |
2803 } | |
2804 | |
2805 | |
2806 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { | |
2807 Register map = ToRegister(instr->map()); | |
2808 Register result = ToRegister(instr->result()); | |
2809 Label load_cache, done; | |
2810 | |
2811 __ EnumLengthUntagged(result, map); | |
2812 __ Cbnz(result, &load_cache); | |
2813 | |
2814 __ Mov(result, Operand(isolate()->factory()->empty_fixed_array())); | |
2815 __ B(&done); | |
2816 | |
2817 __ Bind(&load_cache); | |
2818 __ LoadInstanceDescriptors(map, result); | |
2819 __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); | |
2820 __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); | |
2821 DeoptimizeIfZero(result, instr->environment()); | |
2822 | |
2823 __ Bind(&done); | |
2824 } | |
2825 | |
2826 | |
2827 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { | |
2828 Register object = ToRegister(instr->object()); | |
2829 Register null_value = x5; | |
2830 | |
2831 ASSERT(instr->IsMarkedAsCall()); | |
2832 ASSERT(object.Is(x0)); | |
2833 | |
2834 Label deopt; | |
2835 | |
2836 __ JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &deopt); | |
2837 | |
2838 __ LoadRoot(null_value, Heap::kNullValueRootIndex); | |
2839 __ Cmp(object, null_value); | |
2840 __ B(eq, &deopt); | |
2841 | |
2842 __ JumpIfSmi(object, &deopt); | |
2843 | |
2844 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); | |
2845 __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE); | |
2846 __ B(le, &deopt); | |
2847 | |
2848 Label use_cache, call_runtime; | |
2849 __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime); | |
2850 | |
2851 __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); | |
2852 __ B(&use_cache); | |
2853 | |
2854 __ Bind(&deopt); | |
2855 Deoptimize(instr->environment()); | |
2856 | |
2857 // Get the set of properties to enumerate. | |
2858 __ Bind(&call_runtime); | |
2859 __ Push(object); | |
2860 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); | |
2861 | |
2862 __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset)); | |
2863 __ JumpIfNotRoot(x1, Heap::kMetaMapRootIndex, &deopt); | |
2864 | |
2865 __ Bind(&use_cache); | |
2866 } | |
2867 | |
2868 | |
2869 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { | |
2870 Register input = ToRegister(instr->value()); | |
2871 Register result = ToRegister(instr->result()); | |
2872 | |
2873 __ AssertString(input); | |
2874 | |
2875 // Assert that we can use a W register load to get the hash. | |
2876 ASSERT((String::kHashShift + String::kArrayIndexValueBits) < kWRegSizeInBits); | |
2877 __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset)); | |
2878 __ IndexFromHash(result, result); | |
2879 } | |
2880 | |
2881 | |
2882 void LCodeGen::EmitGoto(int block) { | |
2883 // Do not emit jump if we are emitting a goto to the next block. | |
2884 if (!IsNextEmittedBlock(block)) { | |
2885 __ B(chunk_->GetAssemblyLabel(LookupDestination(block))); | |
2886 } | |
2887 } | |
2888 | |
2889 | |
2890 void LCodeGen::DoGoto(LGoto* instr) { | |
2891 EmitGoto(instr->block_id()); | |
2892 } | |
2893 | |
2894 | |
2895 void LCodeGen::DoHasCachedArrayIndexAndBranch( | |
2896 LHasCachedArrayIndexAndBranch* instr) { | |
2897 Register input = ToRegister(instr->value()); | |
2898 Register temp = ToRegister32(instr->temp()); | |
2899 | |
2900 // Assert that the cache status bits fit in a W register. | |
2901 ASSERT(is_uint32(String::kContainsCachedArrayIndexMask)); | |
2902 __ Ldr(temp, FieldMemOperand(input, String::kHashFieldOffset)); | |
2903 __ Tst(temp, String::kContainsCachedArrayIndexMask); | |
2904 EmitBranch(instr, eq); | |
2905 } | |
2906 | |
2907 | |
2908 // HHasInstanceTypeAndBranch instruction is built with an interval of type | |
2909 // to test but is only used in very restricted ways. The only possible kinds | |
2910 // of intervals are: | |
2911 // - [ FIRST_TYPE, instr->to() ] | |
2912 // - [ instr->form(), LAST_TYPE ] | |
2913 // - instr->from() == instr->to() | |
2914 // | |
2915 // These kinds of intervals can be check with only one compare instruction | |
2916 // providing the correct value and test condition are used. | |
2917 // | |
2918 // TestType() will return the value to use in the compare instruction and | |
2919 // BranchCondition() will return the condition to use depending on the kind | |
2920 // of interval actually specified in the instruction. | |
2921 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) { | |
2922 InstanceType from = instr->from(); | |
2923 InstanceType to = instr->to(); | |
2924 if (from == FIRST_TYPE) return to; | |
2925 ASSERT((from == to) || (to == LAST_TYPE)); | |
2926 return from; | |
2927 } | |
2928 | |
2929 | |
2930 // See comment above TestType function for what this function does. | |
2931 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { | |
2932 InstanceType from = instr->from(); | |
2933 InstanceType to = instr->to(); | |
2934 if (from == to) return eq; | |
2935 if (to == LAST_TYPE) return hs; | |
2936 if (from == FIRST_TYPE) return ls; | |
2937 UNREACHABLE(); | |
2938 return eq; | |
2939 } | |
2940 | |
2941 | |
2942 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) { | |
2943 Register input = ToRegister(instr->value()); | |
2944 Register scratch = ToRegister(instr->temp()); | |
2945 | |
2946 if (!instr->hydrogen()->value()->IsHeapObject()) { | |
2947 __ JumpIfSmi(input, instr->FalseLabel(chunk_)); | |
2948 } | |
2949 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen())); | |
2950 EmitBranch(instr, BranchCondition(instr->hydrogen())); | |
2951 } | |
2952 | |
2953 | |
2954 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { | |
2955 Register result = ToRegister(instr->result()); | |
2956 Register base = ToRegister(instr->base_object()); | |
2957 if (instr->offset()->IsConstantOperand()) { | |
2958 __ Add(result, base, ToOperand32I(instr->offset())); | |
2959 } else { | |
2960 __ Add(result, base, Operand(ToRegister32(instr->offset()), SXTW)); | |
2961 } | |
2962 } | |
2963 | |
2964 | |
2965 void LCodeGen::DoInstanceOf(LInstanceOf* instr) { | |
2966 ASSERT(ToRegister(instr->context()).is(cp)); | |
2967 // Assert that the arguments are in the registers expected by InstanceofStub. | |
2968 ASSERT(ToRegister(instr->left()).Is(InstanceofStub::left())); | |
2969 ASSERT(ToRegister(instr->right()).Is(InstanceofStub::right())); | |
2970 | |
2971 InstanceofStub stub(InstanceofStub::kArgsInRegisters); | |
2972 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); | |
2973 | |
2974 // InstanceofStub returns a result in x0: | |
2975 // 0 => not an instance | |
2976 // smi 1 => instance. | |
2977 __ Cmp(x0, 0); | |
2978 __ LoadTrueFalseRoots(x0, x1); | |
2979 __ Csel(x0, x0, x1, eq); | |
2980 } | |
2981 | |
2982 | |
2983 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { | |
2984 class DeferredInstanceOfKnownGlobal: public LDeferredCode { | |
2985 public: | |
2986 DeferredInstanceOfKnownGlobal(LCodeGen* codegen, | |
2987 LInstanceOfKnownGlobal* instr) | |
2988 : LDeferredCode(codegen), instr_(instr) { } | |
2989 virtual void Generate() { | |
2990 codegen()->DoDeferredInstanceOfKnownGlobal(instr_); | |
2991 } | |
2992 virtual LInstruction* instr() { return instr_; } | |
2993 private: | |
2994 LInstanceOfKnownGlobal* instr_; | |
2995 }; | |
2996 | |
2997 DeferredInstanceOfKnownGlobal* deferred = | |
2998 new(zone()) DeferredInstanceOfKnownGlobal(this, instr); | |
2999 | |
3000 Label map_check, return_false, cache_miss, done; | |
3001 Register object = ToRegister(instr->value()); | |
3002 Register result = ToRegister(instr->result()); | |
3003 // x4 is expected in the associated deferred code and stub. | |
3004 Register map_check_site = x4; | |
3005 Register map = x5; | |
3006 | |
3007 // This instruction is marked as call. We can clobber any register. | |
3008 ASSERT(instr->IsMarkedAsCall()); | |
3009 | |
3010 // We must take into account that object is in x11. | |
3011 ASSERT(object.Is(x11)); | |
3012 Register scratch = x10; | |
3013 | |
3014 // A Smi is not instance of anything. | |
3015 __ JumpIfSmi(object, &return_false); | |
3016 | |
3017 // This is the inlined call site instanceof cache. The two occurences of the | |
3018 // hole value will be patched to the last map/result pair generated by the | |
3019 // instanceof stub. | |
3020 __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); | |
3021 { | |
3022 // Below we use Factory::the_hole_value() on purpose instead of loading from | |
3023 // the root array to force relocation and later be able to patch with a | |
3024 // custom value. | |
3025 InstructionAccurateScope scope(masm(), 5); | |
3026 __ bind(&map_check); | |
3027 // Will be patched with the cached map. | |
3028 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value()); | |
3029 __ LoadRelocated(scratch, Operand(Handle<Object>(cell))); | |
3030 __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset)); | |
3031 __ cmp(map, scratch); | |
3032 __ b(&cache_miss, ne); | |
3033 // The address of this instruction is computed relative to the map check | |
3034 // above, so check the size of the code generated. | |
3035 ASSERT(masm()->InstructionsGeneratedSince(&map_check) == 4); | |
3036 // Will be patched with the cached result. | |
3037 __ LoadRelocated(result, Operand(factory()->the_hole_value())); | |
3038 } | |
3039 __ B(&done); | |
3040 | |
3041 // The inlined call site cache did not match. | |
3042 // Check null and string before calling the deferred code. | |
3043 __ Bind(&cache_miss); | |
3044 // Compute the address of the map check. It must not be clobbered until the | |
3045 // InstanceOfStub has used it. | |
3046 __ Adr(map_check_site, &map_check); | |
3047 // Null is not instance of anything. | |
3048 __ JumpIfRoot(object, Heap::kNullValueRootIndex, &return_false); | |
3049 | |
3050 // String values are not instances of anything. | |
3051 // Return false if the object is a string. Otherwise, jump to the deferred | |
3052 // code. | |
3053 // Note that we can't jump directly to deferred code from | |
3054 // IsObjectJSStringType, because it uses tbz for the jump and the deferred | |
3055 // code can be out of range. | |
3056 __ IsObjectJSStringType(object, scratch, NULL, &return_false); | |
3057 __ B(deferred->entry()); | |
3058 | |
3059 __ Bind(&return_false); | |
3060 __ LoadRoot(result, Heap::kFalseValueRootIndex); | |
3061 | |
3062 // Here result is either true or false. | |
3063 __ Bind(deferred->exit()); | |
3064 __ Bind(&done); | |
3065 } | |
3066 | |
3067 | |
3068 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { | |
3069 Register result = ToRegister(instr->result()); | |
3070 ASSERT(result.Is(x0)); // InstanceofStub returns its result in x0. | |
3071 InstanceofStub::Flags flags = InstanceofStub::kNoFlags; | |
3072 flags = static_cast<InstanceofStub::Flags>( | |
3073 flags | InstanceofStub::kArgsInRegisters); | |
3074 flags = static_cast<InstanceofStub::Flags>( | |
3075 flags | InstanceofStub::kReturnTrueFalseObject); | |
3076 flags = static_cast<InstanceofStub::Flags>( | |
3077 flags | InstanceofStub::kCallSiteInlineCheck); | |
3078 | |
3079 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); | |
3080 LoadContextFromDeferred(instr->context()); | |
3081 | |
3082 // Prepare InstanceofStub arguments. | |
3083 ASSERT(ToRegister(instr->value()).Is(InstanceofStub::left())); | |
3084 __ LoadObject(InstanceofStub::right(), instr->function()); | |
3085 | |
3086 InstanceofStub stub(flags); | |
3087 CallCodeGeneric(stub.GetCode(isolate()), | |
3088 RelocInfo::CODE_TARGET, | |
3089 instr, | |
3090 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); | |
3091 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); | |
3092 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); | |
3093 | |
3094 // Put the result value into the result register slot. | |
3095 __ StoreToSafepointRegisterSlot(result, result); | |
3096 } | |
3097 | |
3098 | |
3099 void LCodeGen::DoInstructionGap(LInstructionGap* instr) { | |
3100 DoGap(instr); | |
3101 } | |
3102 | |
3103 | |
3104 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { | |
3105 Register value = ToRegister32(instr->value()); | |
3106 DoubleRegister result = ToDoubleRegister(instr->result()); | |
3107 __ Scvtf(result, value); | |
3108 } | |
3109 | |
3110 | |
3111 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { | |
3112 ASSERT(ToRegister(instr->context()).is(cp)); | |
3113 // The function is required to be in x1. | |
3114 ASSERT(ToRegister(instr->function()).is(x1)); | |
3115 ASSERT(instr->HasPointerMap()); | |
3116 | |
3117 Handle<JSFunction> known_function = instr->hydrogen()->known_function(); | |
3118 if (known_function.is_null()) { | |
3119 LPointerMap* pointers = instr->pointer_map(); | |
3120 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); | |
3121 ParameterCount count(instr->arity()); | |
3122 __ InvokeFunction(x1, count, CALL_FUNCTION, generator); | |
3123 } else { | |
3124 CallKnownFunction(known_function, | |
3125 instr->hydrogen()->formal_parameter_count(), | |
3126 instr->arity(), | |
3127 instr, | |
3128 x1); | |
3129 } | |
3130 } | |
3131 | |
3132 | |
3133 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) { | |
3134 Register temp1 = ToRegister(instr->temp1()); | |
3135 Register temp2 = ToRegister(instr->temp2()); | |
3136 | |
3137 // Get the frame pointer for the calling frame. | |
3138 __ Ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | |
3139 | |
3140 // Skip the arguments adaptor frame if it exists. | |
3141 Label check_frame_marker; | |
3142 __ Ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset)); | |
3143 __ Cmp(temp2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); | |
3144 __ B(ne, &check_frame_marker); | |
3145 __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset)); | |
3146 | |
3147 // Check the marker in the calling frame. | |
3148 __ Bind(&check_frame_marker); | |
3149 __ Ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset)); | |
3150 | |
3151 EmitCompareAndBranch( | |
3152 instr, eq, temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT))); | |
3153 } | |
3154 | |
3155 | |
3156 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) { | |
3157 Label* is_object = instr->TrueLabel(chunk_); | |
3158 Label* is_not_object = instr->FalseLabel(chunk_); | |
3159 Register value = ToRegister(instr->value()); | |
3160 Register map = ToRegister(instr->temp1()); | |
3161 Register scratch = ToRegister(instr->temp2()); | |
3162 | |
3163 __ JumpIfSmi(value, is_not_object); | |
3164 __ JumpIfRoot(value, Heap::kNullValueRootIndex, is_object); | |
3165 | |
3166 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); | |
3167 | |
3168 // Check for undetectable objects. | |
3169 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); | |
3170 __ TestAndBranchIfAnySet(scratch, 1 << Map::kIsUndetectable, is_not_object); | |
3171 | |
3172 // Check that instance type is in object type range. | |
3173 __ IsInstanceJSObjectType(map, scratch, NULL); | |
3174 // Flags have been updated by IsInstanceJSObjectType. We can now test the | |
3175 // flags for "le" condition to check if the object's type is a valid | |
3176 // JS object type. | |
3177 EmitBranch(instr, le); | |
3178 } | |
3179 | |
3180 | |
3181 Condition LCodeGen::EmitIsString(Register input, | |
3182 Register temp1, | |
3183 Label* is_not_string, | |
3184 SmiCheck check_needed = INLINE_SMI_CHECK) { | |
3185 if (check_needed == INLINE_SMI_CHECK) { | |
3186 __ JumpIfSmi(input, is_not_string); | |
3187 } | |
3188 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE); | |
3189 | |
3190 return lt; | |
3191 } | |
3192 | |
3193 | |
3194 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { | |
3195 Register val = ToRegister(instr->value()); | |
3196 Register scratch = ToRegister(instr->temp()); | |
3197 | |
3198 SmiCheck check_needed = | |
3199 instr->hydrogen()->value()->IsHeapObject() | |
3200 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | |
3201 Condition true_cond = | |
3202 EmitIsString(val, scratch, instr->FalseLabel(chunk_), check_needed); | |
3203 | |
3204 EmitBranch(instr, true_cond); | |
3205 } | |
3206 | |
3207 | |
3208 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { | |
3209 Register value = ToRegister(instr->value()); | |
3210 STATIC_ASSERT(kSmiTag == 0); | |
3211 EmitTestAndBranch(instr, eq, value, kSmiTagMask); | |
3212 } | |
3213 | |
3214 | |
3215 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { | |
3216 Register input = ToRegister(instr->value()); | |
3217 Register temp = ToRegister(instr->temp()); | |
3218 | |
3219 if (!instr->hydrogen()->value()->IsHeapObject()) { | |
3220 __ JumpIfSmi(input, instr->FalseLabel(chunk_)); | |
3221 } | |
3222 __ Ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset)); | |
3223 __ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset)); | |
3224 | |
3225 EmitTestAndBranch(instr, ne, temp, 1 << Map::kIsUndetectable); | |
3226 } | |
3227 | |
3228 | |
3229 static const char* LabelType(LLabel* label) { | |
3230 if (label->is_loop_header()) return " (loop header)"; | |
3231 if (label->is_osr_entry()) return " (OSR entry)"; | |
3232 return ""; | |
3233 } | |
3234 | |
3235 | |
3236 void LCodeGen::DoLabel(LLabel* label) { | |
3237 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", | |
3238 current_instruction_, | |
3239 label->hydrogen_value()->id(), | |
3240 label->block_id(), | |
3241 LabelType(label)); | |
3242 | |
3243 __ Bind(label->label()); | |
3244 current_block_ = label->block_id(); | |
3245 DoGap(label); | |
3246 } | |
3247 | |
3248 | |
3249 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { | |
3250 Register context = ToRegister(instr->context()); | |
3251 Register result = ToRegister(instr->result()); | |
3252 __ Ldr(result, ContextMemOperand(context, instr->slot_index())); | |
3253 if (instr->hydrogen()->RequiresHoleCheck()) { | |
3254 if (instr->hydrogen()->DeoptimizesOnHole()) { | |
3255 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, | |
3256 instr->environment()); | |
3257 } else { | |
3258 Label not_the_hole; | |
3259 __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, ¬_the_hole); | |
3260 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); | |
3261 __ Bind(¬_the_hole); | |
3262 } | |
3263 } | |
3264 } | |
3265 | |
3266 | |
3267 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { | |
3268 Register function = ToRegister(instr->function()); | |
3269 Register result = ToRegister(instr->result()); | |
3270 Register temp = ToRegister(instr->temp()); | |
3271 Label deopt; | |
3272 | |
3273 // Check that the function really is a function. Leaves map in the result | |
3274 // register. | |
3275 __ JumpIfNotObjectType(function, result, temp, JS_FUNCTION_TYPE, &deopt); | |
3276 | |
3277 // Make sure that the function has an instance prototype. | |
3278 Label non_instance; | |
3279 __ Ldrb(temp, FieldMemOperand(result, Map::kBitFieldOffset)); | |
3280 __ Tbnz(temp, Map::kHasNonInstancePrototype, &non_instance); | |
3281 | |
3282 // Get the prototype or initial map from the function. | |
3283 __ Ldr(result, FieldMemOperand(function, | |
3284 JSFunction::kPrototypeOrInitialMapOffset)); | |
3285 | |
3286 // Check that the function has a prototype or an initial map. | |
3287 __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &deopt); | |
3288 | |
3289 // If the function does not have an initial map, we're done. | |
3290 Label done; | |
3291 __ CompareObjectType(result, temp, temp, MAP_TYPE); | |
3292 __ B(ne, &done); | |
3293 | |
3294 // Get the prototype from the initial map. | |
3295 __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); | |
3296 __ B(&done); | |
3297 | |
3298 // Non-instance prototype: fetch prototype from constructor field in initial | |
3299 // map. | |
3300 __ Bind(&non_instance); | |
3301 __ Ldr(result, FieldMemOperand(result, Map::kConstructorOffset)); | |
3302 __ B(&done); | |
3303 | |
3304 // Deoptimize case. | |
3305 __ Bind(&deopt); | |
3306 Deoptimize(instr->environment()); | |
3307 | |
3308 // All done. | |
3309 __ Bind(&done); | |
3310 } | |
3311 | |
3312 | |
3313 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { | |
3314 Register result = ToRegister(instr->result()); | |
3315 __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); | |
3316 __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset)); | |
3317 if (instr->hydrogen()->RequiresHoleCheck()) { | |
3318 DeoptimizeIfRoot( | |
3319 result, Heap::kTheHoleValueRootIndex, instr->environment()); | |
3320 } | |
3321 } | |
3322 | |
3323 | |
3324 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { | |
3325 ASSERT(ToRegister(instr->context()).is(cp)); | |
3326 ASSERT(ToRegister(instr->global_object()).Is(x0)); | |
3327 ASSERT(ToRegister(instr->result()).Is(x0)); | |
3328 __ Mov(x2, Operand(instr->name())); | |
3329 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL; | |
3330 Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode); | |
3331 CallCode(ic, RelocInfo::CODE_TARGET, instr); | |
3332 } | |
3333 | |
3334 | |
3335 MemOperand LCodeGen::PrepareKeyedExternalArrayOperand( | |
3336 Register key, | |
3337 Register base, | |
3338 Register scratch, | |
3339 bool key_is_smi, | |
3340 bool key_is_constant, | |
3341 int constant_key, | |
3342 ElementsKind elements_kind, | |
3343 int additional_index) { | |
3344 int element_size_shift = ElementsKindToShiftSize(elements_kind); | |
3345 int additional_offset = IsFixedTypedArrayElementsKind(elements_kind) | |
3346 ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag | |
3347 : 0; | |
3348 | |
3349 if (key_is_constant) { | |
3350 int base_offset = ((constant_key + additional_index) << element_size_shift); | |
3351 return MemOperand(base, base_offset + additional_offset); | |
3352 } | |
3353 | |
3354 if (additional_index == 0) { | |
3355 if (key_is_smi) { | |
3356 // Key is smi: untag, and scale by element size. | |
3357 __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift)); | |
3358 return MemOperand(scratch, additional_offset); | |
3359 } else { | |
3360 // Key is not smi, and element size is not byte: scale by element size. | |
3361 if (additional_offset == 0) { | |
3362 return MemOperand(base, key, SXTW, element_size_shift); | |
3363 } else { | |
3364 __ Add(scratch, base, Operand(key, SXTW, element_size_shift)); | |
3365 return MemOperand(scratch, additional_offset); | |
3366 } | |
3367 } | |
3368 } else { | |
3369 // TODO(all): Try to combine these cases a bit more intelligently. | |
3370 if (additional_offset == 0) { | |
3371 if (key_is_smi) { | |
3372 __ SmiUntag(scratch, key); | |
3373 __ Add(scratch.W(), scratch.W(), additional_index); | |
3374 } else { | |
3375 __ Add(scratch.W(), key.W(), additional_index); | |
3376 } | |
3377 return MemOperand(base, scratch, LSL, element_size_shift); | |
3378 } else { | |
3379 if (key_is_smi) { | |
3380 __ Add(scratch, base, | |
3381 Operand::UntagSmiAndScale(key, element_size_shift)); | |
3382 } else { | |
3383 __ Add(scratch, base, Operand(key, SXTW, element_size_shift)); | |
3384 } | |
3385 return MemOperand( | |
3386 scratch, | |
3387 (additional_index << element_size_shift) + additional_offset); | |
3388 } | |
3389 } | |
3390 } | |
3391 | |
3392 | |
3393 void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) { | |
3394 Register ext_ptr = ToRegister(instr->elements()); | |
3395 Register scratch; | |
3396 ElementsKind elements_kind = instr->elements_kind(); | |
3397 | |
3398 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); | |
3399 bool key_is_constant = instr->key()->IsConstantOperand(); | |
3400 Register key = no_reg; | |
3401 int constant_key = 0; | |
3402 if (key_is_constant) { | |
3403 ASSERT(instr->temp() == NULL); | |
3404 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); | |
3405 if (constant_key & 0xf0000000) { | |
3406 Abort(kArrayIndexConstantValueTooBig); | |
3407 } | |
3408 } else { | |
3409 scratch = ToRegister(instr->temp()); | |
3410 key = ToRegister(instr->key()); | |
3411 } | |
3412 | |
3413 MemOperand mem_op = | |
3414 PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi, | |
3415 key_is_constant, constant_key, | |
3416 elements_kind, | |
3417 instr->additional_index()); | |
3418 | |
3419 if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) || | |
3420 (elements_kind == FLOAT32_ELEMENTS)) { | |
3421 DoubleRegister result = ToDoubleRegister(instr->result()); | |
3422 __ Ldr(result.S(), mem_op); | |
3423 __ Fcvt(result, result.S()); | |
3424 } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) || | |
3425 (elements_kind == FLOAT64_ELEMENTS)) { | |
3426 DoubleRegister result = ToDoubleRegister(instr->result()); | |
3427 __ Ldr(result, mem_op); | |
3428 } else { | |
3429 Register result = ToRegister(instr->result()); | |
3430 | |
3431 switch (elements_kind) { | |
3432 case EXTERNAL_INT8_ELEMENTS: | |
3433 case INT8_ELEMENTS: | |
3434 __ Ldrsb(result, mem_op); | |
3435 break; | |
3436 case EXTERNAL_UINT8_CLAMPED_ELEMENTS: | |
3437 case EXTERNAL_UINT8_ELEMENTS: | |
3438 case UINT8_ELEMENTS: | |
3439 case UINT8_CLAMPED_ELEMENTS: | |
3440 __ Ldrb(result, mem_op); | |
3441 break; | |
3442 case EXTERNAL_INT16_ELEMENTS: | |
3443 case INT16_ELEMENTS: | |
3444 __ Ldrsh(result, mem_op); | |
3445 break; | |
3446 case EXTERNAL_UINT16_ELEMENTS: | |
3447 case UINT16_ELEMENTS: | |
3448 __ Ldrh(result, mem_op); | |
3449 break; | |
3450 case EXTERNAL_INT32_ELEMENTS: | |
3451 case INT32_ELEMENTS: | |
3452 __ Ldrsw(result, mem_op); | |
3453 break; | |
3454 case EXTERNAL_UINT32_ELEMENTS: | |
3455 case UINT32_ELEMENTS: | |
3456 __ Ldr(result.W(), mem_op); | |
3457 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { | |
3458 // Deopt if value > 0x80000000. | |
3459 __ Tst(result, 0xFFFFFFFF80000000); | |
3460 DeoptimizeIf(ne, instr->environment()); | |
3461 } | |
3462 break; | |
3463 case FLOAT32_ELEMENTS: | |
3464 case FLOAT64_ELEMENTS: | |
3465 case EXTERNAL_FLOAT32_ELEMENTS: | |
3466 case EXTERNAL_FLOAT64_ELEMENTS: | |
3467 case FAST_HOLEY_DOUBLE_ELEMENTS: | |
3468 case FAST_HOLEY_ELEMENTS: | |
3469 case FAST_HOLEY_SMI_ELEMENTS: | |
3470 case FAST_DOUBLE_ELEMENTS: | |
3471 case FAST_ELEMENTS: | |
3472 case FAST_SMI_ELEMENTS: | |
3473 case DICTIONARY_ELEMENTS: | |
3474 case SLOPPY_ARGUMENTS_ELEMENTS: | |
3475 UNREACHABLE(); | |
3476 break; | |
3477 } | |
3478 } | |
3479 } | |
3480 | |
3481 | |
3482 void LCodeGen::CalcKeyedArrayBaseRegister(Register base, | |
3483 Register elements, | |
3484 Register key, | |
3485 bool key_is_tagged, | |
3486 ElementsKind elements_kind) { | |
3487 int element_size_shift = ElementsKindToShiftSize(elements_kind); | |
3488 | |
3489 // Even though the HLoad/StoreKeyed instructions force the input | |
3490 // representation for the key to be an integer, the input gets replaced during | |
3491 // bounds check elimination with the index argument to the bounds check, which | |
3492 // can be tagged, so that case must be handled here, too. | |
3493 if (key_is_tagged) { | |
3494 __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift)); | |
3495 } else { | |
3496 // Sign extend key because it could be a 32-bit negative value or contain | |
3497 // garbage in the top 32-bits. The address computation happens in 64-bit. | |
3498 ASSERT((element_size_shift >= 0) && (element_size_shift <= 4)); | |
3499 __ Add(base, elements, Operand(key, SXTW, element_size_shift)); | |
3500 } | |
3501 } | |
3502 | |
3503 | |
3504 void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) { | |
3505 Register elements = ToRegister(instr->elements()); | |
3506 DoubleRegister result = ToDoubleRegister(instr->result()); | |
3507 Register load_base; | |
3508 int offset = 0; | |
3509 | |
3510 if (instr->key()->IsConstantOperand()) { | |
3511 ASSERT(instr->hydrogen()->RequiresHoleCheck() || | |
3512 (instr->temp() == NULL)); | |
3513 | |
3514 int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); | |
3515 if (constant_key & 0xf0000000) { | |
3516 Abort(kArrayIndexConstantValueTooBig); | |
3517 } | |
3518 offset = FixedDoubleArray::OffsetOfElementAt(constant_key + | |
3519 instr->additional_index()); | |
3520 load_base = elements; | |
3521 } else { | |
3522 load_base = ToRegister(instr->temp()); | |
3523 Register key = ToRegister(instr->key()); | |
3524 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); | |
3525 CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged, | |
3526 instr->hydrogen()->elements_kind()); | |
3527 offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index()); | |
3528 } | |
3529 __ Ldr(result, FieldMemOperand(load_base, offset)); | |
3530 | |
3531 if (instr->hydrogen()->RequiresHoleCheck()) { | |
3532 Register scratch = ToRegister(instr->temp()); | |
3533 | |
3534 // TODO(all): Is it faster to reload this value to an integer register, or | |
3535 // move from fp to integer? | |
3536 __ Fmov(scratch, result); | |
3537 __ Cmp(scratch, kHoleNanInt64); | |
3538 DeoptimizeIf(eq, instr->environment()); | |
3539 } | |
3540 } | |
3541 | |
3542 | |
3543 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) { | |
3544 Register elements = ToRegister(instr->elements()); | |
3545 Register result = ToRegister(instr->result()); | |
3546 Register load_base; | |
3547 int offset = 0; | |
3548 | |
3549 if (instr->key()->IsConstantOperand()) { | |
3550 ASSERT(instr->temp() == NULL); | |
3551 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); | |
3552 offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + | |
3553 instr->additional_index()); | |
3554 load_base = elements; | |
3555 } else { | |
3556 load_base = ToRegister(instr->temp()); | |
3557 Register key = ToRegister(instr->key()); | |
3558 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); | |
3559 CalcKeyedArrayBaseRegister(load_base, elements, key, key_is_tagged, | |
3560 instr->hydrogen()->elements_kind()); | |
3561 offset = FixedArray::OffsetOfElementAt(instr->additional_index()); | |
3562 } | |
3563 Representation representation = instr->hydrogen()->representation(); | |
3564 | |
3565 if (representation.IsInteger32() && | |
3566 instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS) { | |
3567 STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0); | |
3568 __ Load(result, UntagSmiFieldMemOperand(load_base, offset), | |
3569 Representation::Integer32()); | |
3570 } else { | |
3571 __ Load(result, FieldMemOperand(load_base, offset), | |
3572 representation); | |
3573 } | |
3574 | |
3575 if (instr->hydrogen()->RequiresHoleCheck()) { | |
3576 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { | |
3577 DeoptimizeIfNotSmi(result, instr->environment()); | |
3578 } else { | |
3579 DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, | |
3580 instr->environment()); | |
3581 } | |
3582 } | |
3583 } | |
3584 | |
3585 | |
3586 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { | |
3587 ASSERT(ToRegister(instr->context()).is(cp)); | |
3588 ASSERT(ToRegister(instr->object()).Is(x1)); | |
3589 ASSERT(ToRegister(instr->key()).Is(x0)); | |
3590 | |
3591 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); | |
3592 CallCode(ic, RelocInfo::CODE_TARGET, instr); | |
3593 | |
3594 ASSERT(ToRegister(instr->result()).Is(x0)); | |
3595 } | |
3596 | |
3597 | |
3598 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { | |
3599 HObjectAccess access = instr->hydrogen()->access(); | |
3600 int offset = access.offset(); | |
3601 Register object = ToRegister(instr->object()); | |
3602 | |
3603 if (access.IsExternalMemory()) { | |
3604 Register result = ToRegister(instr->result()); | |
3605 __ Load(result, MemOperand(object, offset), access.representation()); | |
3606 return; | |
3607 } | |
3608 | |
3609 if (instr->hydrogen()->representation().IsDouble()) { | |
3610 FPRegister result = ToDoubleRegister(instr->result()); | |
3611 __ Ldr(result, FieldMemOperand(object, offset)); | |
3612 return; | |
3613 } | |
3614 | |
3615 Register result = ToRegister(instr->result()); | |
3616 Register source; | |
3617 if (access.IsInobject()) { | |
3618 source = object; | |
3619 } else { | |
3620 // Load the properties array, using result as a scratch register. | |
3621 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); | |
3622 source = result; | |
3623 } | |
3624 | |
3625 if (access.representation().IsSmi() && | |
3626 instr->hydrogen()->representation().IsInteger32()) { | |
3627 // Read int value directly from upper half of the smi. | |
3628 STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0); | |
3629 __ Load(result, UntagSmiFieldMemOperand(source, offset), | |
3630 Representation::Integer32()); | |
3631 } else { | |
3632 __ Load(result, FieldMemOperand(source, offset), access.representation()); | |
3633 } | |
3634 } | |
3635 | |
3636 | |
3637 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { | |
3638 ASSERT(ToRegister(instr->context()).is(cp)); | |
3639 // LoadIC expects x2 to hold the name, and x0 to hold the receiver. | |
3640 ASSERT(ToRegister(instr->object()).is(x0)); | |
3641 __ Mov(x2, Operand(instr->name())); | |
3642 | |
3643 Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL); | |
3644 CallCode(ic, RelocInfo::CODE_TARGET, instr); | |
3645 | |
3646 ASSERT(ToRegister(instr->result()).is(x0)); | |
3647 } | |
3648 | |
3649 | |
3650 void LCodeGen::DoLoadRoot(LLoadRoot* instr) { | |
3651 Register result = ToRegister(instr->result()); | |
3652 __ LoadRoot(result, instr->index()); | |
3653 } | |
3654 | |
3655 | |
3656 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) { | |
3657 Register result = ToRegister(instr->result()); | |
3658 Register map = ToRegister(instr->value()); | |
3659 __ EnumLengthSmi(result, map); | |
3660 } | |
3661 | |
3662 | |
3663 void LCodeGen::DoMathAbs(LMathAbs* instr) { | |
3664 Representation r = instr->hydrogen()->value()->representation(); | |
3665 if (r.IsDouble()) { | |
3666 DoubleRegister input = ToDoubleRegister(instr->value()); | |
3667 DoubleRegister result = ToDoubleRegister(instr->result()); | |
3668 __ Fabs(result, input); | |
3669 } else if (r.IsSmi() || r.IsInteger32()) { | |
3670 Register input = r.IsSmi() ? ToRegister(instr->value()) | |
3671 : ToRegister32(instr->value()); | |
3672 Register result = r.IsSmi() ? ToRegister(instr->result()) | |
3673 : ToRegister32(instr->result()); | |
3674 Label done; | |
3675 __ Abs(result, input, NULL, &done); | |
3676 Deoptimize(instr->environment()); | |
3677 __ Bind(&done); | |
3678 } | |
3679 } | |
3680 | |
3681 | |
3682 void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr, | |
3683 Label* exit, | |
3684 Label* allocation_entry) { | |
3685 // Handle the tricky cases of MathAbsTagged: | |
3686 // - HeapNumber inputs. | |
3687 // - Negative inputs produce a positive result, so a new HeapNumber is | |
3688 // allocated to hold it. | |
3689 // - Positive inputs are returned as-is, since there is no need to allocate | |
3690 // a new HeapNumber for the result. | |
3691 // - The (smi) input -0x80000000, produces +0x80000000, which does not fit | |
3692 // a smi. In this case, the inline code sets the result and jumps directly | |
3693 // to the allocation_entry label. | |
3694 ASSERT(instr->context() != NULL); | |
3695 ASSERT(ToRegister(instr->context()).is(cp)); | |
3696 Register input = ToRegister(instr->value()); | |
3697 Register temp1 = ToRegister(instr->temp1()); | |
3698 Register temp2 = ToRegister(instr->temp2()); | |
3699 Register result_bits = ToRegister(instr->temp3()); | |
3700 Register result = ToRegister(instr->result()); | |
3701 | |
3702 Label runtime_allocation; | |
3703 | |
3704 // Deoptimize if the input is not a HeapNumber. | |
3705 __ Ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset)); | |
3706 DeoptimizeIfNotRoot(temp1, Heap::kHeapNumberMapRootIndex, | |
3707 instr->environment()); | |
3708 | |
3709 // If the argument is positive, we can return it as-is, without any need to | |
3710 // allocate a new HeapNumber for the result. We have to do this in integer | |
3711 // registers (rather than with fabs) because we need to be able to distinguish | |
3712 // the two zeroes. | |
3713 __ Ldr(result_bits, FieldMemOperand(input, HeapNumber::kValueOffset)); | |
3714 __ Mov(result, input); | |
3715 __ Tbz(result_bits, kXSignBit, exit); | |
3716 | |
3717 // Calculate abs(input) by clearing the sign bit. | |
3718 __ Bic(result_bits, result_bits, kXSignMask); | |
3719 | |
3720 // Allocate a new HeapNumber to hold the result. | |
3721 // result_bits The bit representation of the (double) result. | |
3722 __ Bind(allocation_entry); | |
3723 __ AllocateHeapNumber(result, &runtime_allocation, temp1, temp2); | |
3724 // The inline (non-deferred) code will store result_bits into result. | |
3725 __ B(exit); | |
3726 | |
3727 __ Bind(&runtime_allocation); | |
3728 if (FLAG_debug_code) { | |
3729 // Because result is in the pointer map, we need to make sure it has a valid | |
3730 // tagged value before we call the runtime. We speculatively set it to the | |
3731 // input (for abs(+x)) or to a smi (for abs(-SMI_MIN)), so it should already | |
3732 // be valid. | |
3733 Label result_ok; | |
3734 Register input = ToRegister(instr->value()); | |
3735 __ JumpIfSmi(result, &result_ok); | |
3736 __ Cmp(input, result); | |
3737 __ Assert(eq, kUnexpectedValue); | |
3738 __ Bind(&result_ok); | |
3739 } | |
3740 | |
3741 { PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); | |
3742 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr, | |
3743 instr->context()); | |
3744 __ StoreToSafepointRegisterSlot(x0, result); | |
3745 } | |
3746 // The inline (non-deferred) code will store result_bits into result. | |
3747 } | |
3748 | |
3749 | |
3750 void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) { | |
3751 // Class for deferred case. | |
3752 class DeferredMathAbsTagged: public LDeferredCode { | |
3753 public: | |
3754 DeferredMathAbsTagged(LCodeGen* codegen, LMathAbsTagged* instr) | |
3755 : LDeferredCode(codegen), instr_(instr) { } | |
3756 virtual void Generate() { | |
3757 codegen()->DoDeferredMathAbsTagged(instr_, exit(), | |
3758 allocation_entry()); | |
3759 } | |
3760 virtual LInstruction* instr() { return instr_; } | |
3761 Label* allocation_entry() { return &allocation; } | |
3762 private: | |
3763 LMathAbsTagged* instr_; | |
3764 Label allocation; | |
3765 }; | |
3766 | |
3767 // TODO(jbramley): The early-exit mechanism would skip the new frame handling | |
3768 // in GenerateDeferredCode. Tidy this up. | |
3769 ASSERT(!NeedsDeferredFrame()); | |
3770 | |
3771 DeferredMathAbsTagged* deferred = | |
3772 new(zone()) DeferredMathAbsTagged(this, instr); | |
3773 | |
3774 ASSERT(instr->hydrogen()->value()->representation().IsTagged() || | |
3775 instr->hydrogen()->value()->representation().IsSmi()); | |
3776 Register input = ToRegister(instr->value()); | |
3777 Register result_bits = ToRegister(instr->temp3()); | |
3778 Register result = ToRegister(instr->result()); | |
3779 Label done; | |
3780 | |
3781 // Handle smis inline. | |
3782 // We can treat smis as 64-bit integers, since the (low-order) tag bits will | |
3783 // never get set by the negation. This is therefore the same as the Integer32 | |
3784 // case in DoMathAbs, except that it operates on 64-bit values. | |
3785 STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0)); | |
3786 | |
3787 __ JumpIfNotSmi(input, deferred->entry()); | |
3788 | |
3789 __ Abs(result, input, NULL, &done); | |
3790 | |
3791 // The result is the magnitude (abs) of the smallest value a smi can | |
3792 // represent, encoded as a double. | |
3793 __ Mov(result_bits, double_to_rawbits(0x80000000)); | |
3794 __ B(deferred->allocation_entry()); | |
3795 | |
3796 __ Bind(deferred->exit()); | |
3797 __ Str(result_bits, FieldMemOperand(result, HeapNumber::kValueOffset)); | |
3798 | |
3799 __ Bind(&done); | |
3800 } | |
3801 | |
3802 | |
3803 void LCodeGen::DoMathExp(LMathExp* instr) { | |
3804 DoubleRegister input = ToDoubleRegister(instr->value()); | |
3805 DoubleRegister result = ToDoubleRegister(instr->result()); | |
3806 DoubleRegister double_temp1 = ToDoubleRegister(instr->double_temp1()); | |
3807 DoubleRegister double_temp2 = double_scratch(); | |
3808 Register temp1 = ToRegister(instr->temp1()); | |
3809 Register temp2 = ToRegister(instr->temp2()); | |
3810 Register temp3 = ToRegister(instr->temp3()); | |
3811 | |
3812 MathExpGenerator::EmitMathExp(masm(), input, result, | |
3813 double_temp1, double_temp2, | |
3814 temp1, temp2, temp3); | |
3815 } | |
3816 | |
3817 | |
3818 void LCodeGen::DoMathFloor(LMathFloor* instr) { | |
3819 // TODO(jbramley): If we could provide a double result, we could use frintm | |
3820 // and produce a valid double result in a single instruction. | |
3821 DoubleRegister input = ToDoubleRegister(instr->value()); | |
3822 Register result = ToRegister(instr->result()); | |
3823 | |
3824 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
3825 DeoptimizeIfMinusZero(input, instr->environment()); | |
3826 } | |
3827 | |
3828 __ Fcvtms(result, input); | |
3829 | |
3830 // Check that the result fits into a 32-bit integer. | |
3831 // - The result did not overflow. | |
3832 __ Cmp(result, Operand(result, SXTW)); | |
3833 // - The input was not NaN. | |
3834 __ Fccmp(input, input, NoFlag, eq); | |
3835 DeoptimizeIf(ne, instr->environment()); | |
3836 } | |
3837 | |
3838 | |
3839 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { | |
3840 Register dividend = ToRegister32(instr->dividend()); | |
3841 Register result = ToRegister32(instr->result()); | |
3842 int32_t divisor = instr->divisor(); | |
3843 | |
3844 // If the divisor is positive, things are easy: There can be no deopts and we | |
3845 // can simply do an arithmetic right shift. | |
3846 if (divisor == 1) return; | |
3847 int32_t shift = WhichPowerOf2Abs(divisor); | |
3848 if (divisor > 1) { | |
3849 __ Mov(result, Operand(dividend, ASR, shift)); | |
3850 return; | |
3851 } | |
3852 | |
3853 // If the divisor is negative, we have to negate and handle edge cases. | |
3854 Label not_kmin_int, done; | |
3855 __ Negs(result, dividend); | |
3856 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
3857 DeoptimizeIf(eq, instr->environment()); | |
3858 } | |
3859 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | |
3860 // Note that we could emit branch-free code, but that would need one more | |
3861 // register. | |
3862 if (divisor == -1) { | |
3863 DeoptimizeIf(vs, instr->environment()); | |
3864 } else { | |
3865 __ B(vc, ¬_kmin_int); | |
3866 __ Mov(result, kMinInt / divisor); | |
3867 __ B(&done); | |
3868 } | |
3869 } | |
3870 __ bind(¬_kmin_int); | |
3871 __ Mov(result, Operand(dividend, ASR, shift)); | |
3872 __ bind(&done); | |
3873 } | |
3874 | |
3875 | |
3876 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { | |
3877 Register dividend = ToRegister32(instr->dividend()); | |
3878 int32_t divisor = instr->divisor(); | |
3879 Register result = ToRegister32(instr->result()); | |
3880 ASSERT(!AreAliased(dividend, result)); | |
3881 | |
3882 if (divisor == 0) { | |
3883 Deoptimize(instr->environment()); | |
3884 return; | |
3885 } | |
3886 | |
3887 // Check for (0 / -x) that will produce negative zero. | |
3888 HMathFloorOfDiv* hdiv = instr->hydrogen(); | |
3889 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | |
3890 __ Cmp(dividend, 0); | |
3891 DeoptimizeIf(eq, instr->environment()); | |
3892 } | |
3893 | |
3894 // Easy case: We need no dynamic check for the dividend and the flooring | |
3895 // division is the same as the truncating division. | |
3896 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || | |
3897 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { | |
3898 __ TruncatingDiv(result, dividend, Abs(divisor)); | |
3899 if (divisor < 0) __ Neg(result, result); | |
3900 return; | |
3901 } | |
3902 | |
3903 // In the general case we may need to adjust before and after the truncating | |
3904 // division to get a flooring division. | |
3905 Register temp = ToRegister32(instr->temp()); | |
3906 ASSERT(!AreAliased(temp, dividend, result)); | |
3907 Label needs_adjustment, done; | |
3908 __ Cmp(dividend, 0); | |
3909 __ B(divisor > 0 ? lt : gt, &needs_adjustment); | |
3910 __ TruncatingDiv(result, dividend, Abs(divisor)); | |
3911 if (divisor < 0) __ Neg(result, result); | |
3912 __ B(&done); | |
3913 __ bind(&needs_adjustment); | |
3914 __ Add(temp, dividend, Operand(divisor > 0 ? 1 : -1)); | |
3915 __ TruncatingDiv(result, temp, Abs(divisor)); | |
3916 if (divisor < 0) __ Neg(result, result); | |
3917 __ Sub(result, result, Operand(1)); | |
3918 __ bind(&done); | |
3919 } | |
3920 | |
3921 | |
3922 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { | |
3923 Register dividend = ToRegister32(instr->dividend()); | |
3924 Register divisor = ToRegister32(instr->divisor()); | |
3925 Register remainder = ToRegister32(instr->temp()); | |
3926 Register result = ToRegister32(instr->result()); | |
3927 | |
3928 // This can't cause an exception on ARM, so we can speculatively | |
3929 // execute it already now. | |
3930 __ Sdiv(result, dividend, divisor); | |
3931 | |
3932 // Check for x / 0. | |
3933 DeoptimizeIfZero(divisor, instr->environment()); | |
3934 | |
3935 // Check for (kMinInt / -1). | |
3936 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { | |
3937 // The V flag will be set iff dividend == kMinInt. | |
3938 __ Cmp(dividend, 1); | |
3939 __ Ccmp(divisor, -1, NoFlag, vs); | |
3940 DeoptimizeIf(eq, instr->environment()); | |
3941 } | |
3942 | |
3943 // Check for (0 / -x) that will produce negative zero. | |
3944 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
3945 __ Cmp(divisor, 0); | |
3946 __ Ccmp(dividend, 0, ZFlag, mi); | |
3947 // "divisor" can't be null because the code would have already been | |
3948 // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0). | |
3949 // In this case we need to deoptimize to produce a -0. | |
3950 DeoptimizeIf(eq, instr->environment()); | |
3951 } | |
3952 | |
3953 Label done; | |
3954 // If both operands have the same sign then we are done. | |
3955 __ Eor(remainder, dividend, divisor); | |
3956 __ Tbz(remainder, kWSignBit, &done); | |
3957 | |
3958 // Check if the result needs to be corrected. | |
3959 __ Msub(remainder, result, divisor, dividend); | |
3960 __ Cbz(remainder, &done); | |
3961 __ Sub(result, result, 1); | |
3962 | |
3963 __ Bind(&done); | |
3964 } | |
3965 | |
3966 | |
3967 void LCodeGen::DoMathLog(LMathLog* instr) { | |
3968 ASSERT(instr->IsMarkedAsCall()); | |
3969 ASSERT(ToDoubleRegister(instr->value()).is(d0)); | |
3970 __ CallCFunction(ExternalReference::math_log_double_function(isolate()), | |
3971 0, 1); | |
3972 ASSERT(ToDoubleRegister(instr->result()).Is(d0)); | |
3973 } | |
3974 | |
3975 | |
3976 void LCodeGen::DoMathClz32(LMathClz32* instr) { | |
3977 Register input = ToRegister32(instr->value()); | |
3978 Register result = ToRegister32(instr->result()); | |
3979 __ Clz(result, input); | |
3980 } | |
3981 | |
3982 | |
3983 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { | |
3984 DoubleRegister input = ToDoubleRegister(instr->value()); | |
3985 DoubleRegister result = ToDoubleRegister(instr->result()); | |
3986 Label done; | |
3987 | |
3988 // Math.pow(x, 0.5) differs from fsqrt(x) in the following cases: | |
3989 // Math.pow(-Infinity, 0.5) == +Infinity | |
3990 // Math.pow(-0.0, 0.5) == +0.0 | |
3991 | |
3992 // Catch -infinity inputs first. | |
3993 // TODO(jbramley): A constant infinity register would be helpful here. | |
3994 __ Fmov(double_scratch(), kFP64NegativeInfinity); | |
3995 __ Fcmp(double_scratch(), input); | |
3996 __ Fabs(result, input); | |
3997 __ B(&done, eq); | |
3998 | |
3999 // Add +0.0 to convert -0.0 to +0.0. | |
4000 __ Fadd(double_scratch(), input, fp_zero); | |
4001 __ Fsqrt(result, double_scratch()); | |
4002 | |
4003 __ Bind(&done); | |
4004 } | |
4005 | |
4006 | |
4007 void LCodeGen::DoPower(LPower* instr) { | |
4008 Representation exponent_type = instr->hydrogen()->right()->representation(); | |
4009 // Having marked this as a call, we can use any registers. | |
4010 // Just make sure that the input/output registers are the expected ones. | |
4011 ASSERT(!instr->right()->IsDoubleRegister() || | |
4012 ToDoubleRegister(instr->right()).is(d1)); | |
4013 ASSERT(exponent_type.IsInteger32() || !instr->right()->IsRegister() || | |
4014 ToRegister(instr->right()).is(x11)); | |
4015 ASSERT(!exponent_type.IsInteger32() || ToRegister(instr->right()).is(x12)); | |
4016 ASSERT(ToDoubleRegister(instr->left()).is(d0)); | |
4017 ASSERT(ToDoubleRegister(instr->result()).is(d0)); | |
4018 | |
4019 if (exponent_type.IsSmi()) { | |
4020 MathPowStub stub(MathPowStub::TAGGED); | |
4021 __ CallStub(&stub); | |
4022 } else if (exponent_type.IsTagged()) { | |
4023 Label no_deopt; | |
4024 __ JumpIfSmi(x11, &no_deopt); | |
4025 __ Ldr(x0, FieldMemOperand(x11, HeapObject::kMapOffset)); | |
4026 DeoptimizeIfNotRoot(x0, Heap::kHeapNumberMapRootIndex, | |
4027 instr->environment()); | |
4028 __ Bind(&no_deopt); | |
4029 MathPowStub stub(MathPowStub::TAGGED); | |
4030 __ CallStub(&stub); | |
4031 } else if (exponent_type.IsInteger32()) { | |
4032 // Ensure integer exponent has no garbage in top 32-bits, as MathPowStub | |
4033 // supports large integer exponents. | |
4034 Register exponent = ToRegister(instr->right()); | |
4035 __ Sxtw(exponent, exponent); | |
4036 MathPowStub stub(MathPowStub::INTEGER); | |
4037 __ CallStub(&stub); | |
4038 } else { | |
4039 ASSERT(exponent_type.IsDouble()); | |
4040 MathPowStub stub(MathPowStub::DOUBLE); | |
4041 __ CallStub(&stub); | |
4042 } | |
4043 } | |
4044 | |
4045 | |
4046 void LCodeGen::DoMathRound(LMathRound* instr) { | |
4047 // TODO(jbramley): We could provide a double result here using frint. | |
4048 DoubleRegister input = ToDoubleRegister(instr->value()); | |
4049 DoubleRegister temp1 = ToDoubleRegister(instr->temp1()); | |
4050 Register result = ToRegister(instr->result()); | |
4051 Label try_rounding; | |
4052 Label done; | |
4053 | |
4054 // Math.round() rounds to the nearest integer, with ties going towards | |
4055 // +infinity. This does not match any IEEE-754 rounding mode. | |
4056 // - Infinities and NaNs are propagated unchanged, but cause deopts because | |
4057 // they can't be represented as integers. | |
4058 // - The sign of the result is the same as the sign of the input. This means | |
4059 // that -0.0 rounds to itself, and values -0.5 <= input < 0 also produce a | |
4060 // result of -0.0. | |
4061 | |
4062 DoubleRegister dot_five = double_scratch(); | |
4063 __ Fmov(dot_five, 0.5); | |
4064 __ Fabs(temp1, input); | |
4065 __ Fcmp(temp1, dot_five); | |
4066 // If input is in [-0.5, -0], the result is -0. | |
4067 // If input is in [+0, +0.5[, the result is +0. | |
4068 // If the input is +0.5, the result is 1. | |
4069 __ B(hi, &try_rounding); // hi so NaN will also branch. | |
4070 | |
4071 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
4072 __ Fmov(result, input); | |
4073 DeoptimizeIfNegative(result, instr->environment()); // [-0.5, -0.0]. | |
4074 } | |
4075 __ Fcmp(input, dot_five); | |
4076 __ Mov(result, 1); // +0.5. | |
4077 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on | |
4078 // flag kBailoutOnMinusZero, will return 0 (xzr). | |
4079 __ Csel(result, result, xzr, eq); | |
4080 __ B(&done); | |
4081 | |
4082 __ Bind(&try_rounding); | |
4083 // Since we're providing a 32-bit result, we can implement ties-to-infinity by | |
4084 // adding 0.5 to the input, then taking the floor of the result. This does not | |
4085 // work for very large positive doubles because adding 0.5 would cause an | |
4086 // intermediate rounding stage, so a different approach will be necessary if a | |
4087 // double result is needed. | |
4088 __ Fadd(temp1, input, dot_five); | |
4089 __ Fcvtms(result, temp1); | |
4090 | |
4091 // Deopt if | |
4092 // * the input was NaN | |
4093 // * the result is not representable using a 32-bit integer. | |
4094 __ Fcmp(input, 0.0); | |
4095 __ Ccmp(result, Operand(result.W(), SXTW), NoFlag, vc); | |
4096 DeoptimizeIf(ne, instr->environment()); | |
4097 | |
4098 __ Bind(&done); | |
4099 } | |
4100 | |
4101 | |
4102 void LCodeGen::DoMathSqrt(LMathSqrt* instr) { | |
4103 DoubleRegister input = ToDoubleRegister(instr->value()); | |
4104 DoubleRegister result = ToDoubleRegister(instr->result()); | |
4105 __ Fsqrt(result, input); | |
4106 } | |
4107 | |
4108 | |
4109 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { | |
4110 HMathMinMax::Operation op = instr->hydrogen()->operation(); | |
4111 if (instr->hydrogen()->representation().IsInteger32()) { | |
4112 Register result = ToRegister32(instr->result()); | |
4113 Register left = ToRegister32(instr->left()); | |
4114 Operand right = ToOperand32I(instr->right()); | |
4115 | |
4116 __ Cmp(left, right); | |
4117 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le); | |
4118 } else if (instr->hydrogen()->representation().IsSmi()) { | |
4119 Register result = ToRegister(instr->result()); | |
4120 Register left = ToRegister(instr->left()); | |
4121 Operand right = ToOperand(instr->right()); | |
4122 | |
4123 __ Cmp(left, right); | |
4124 __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le); | |
4125 } else { | |
4126 ASSERT(instr->hydrogen()->representation().IsDouble()); | |
4127 DoubleRegister result = ToDoubleRegister(instr->result()); | |
4128 DoubleRegister left = ToDoubleRegister(instr->left()); | |
4129 DoubleRegister right = ToDoubleRegister(instr->right()); | |
4130 | |
4131 if (op == HMathMinMax::kMathMax) { | |
4132 __ Fmax(result, left, right); | |
4133 } else { | |
4134 ASSERT(op == HMathMinMax::kMathMin); | |
4135 __ Fmin(result, left, right); | |
4136 } | |
4137 } | |
4138 } | |
4139 | |
4140 | |
4141 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { | |
4142 Register dividend = ToRegister32(instr->dividend()); | |
4143 int32_t divisor = instr->divisor(); | |
4144 ASSERT(dividend.is(ToRegister32(instr->result()))); | |
4145 | |
4146 // Theoretically, a variation of the branch-free code for integer division by | |
4147 // a power of 2 (calculating the remainder via an additional multiplication | |
4148 // (which gets simplified to an 'and') and subtraction) should be faster, and | |
4149 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to | |
4150 // indicate that positive dividends are heavily favored, so the branching | |
4151 // version performs better. | |
4152 HMod* hmod = instr->hydrogen(); | |
4153 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | |
4154 Label dividend_is_not_negative, done; | |
4155 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { | |
4156 __ Cmp(dividend, 0); | |
4157 __ B(pl, ÷nd_is_not_negative); | |
4158 // Note that this is correct even for kMinInt operands. | |
4159 __ Neg(dividend, dividend); | |
4160 __ And(dividend, dividend, mask); | |
4161 __ Negs(dividend, dividend); | |
4162 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
4163 DeoptimizeIf(eq, instr->environment()); | |
4164 } | |
4165 __ B(&done); | |
4166 } | |
4167 | |
4168 __ bind(÷nd_is_not_negative); | |
4169 __ And(dividend, dividend, mask); | |
4170 __ bind(&done); | |
4171 } | |
4172 | |
4173 | |
4174 void LCodeGen::DoModByConstI(LModByConstI* instr) { | |
4175 Register dividend = ToRegister32(instr->dividend()); | |
4176 int32_t divisor = instr->divisor(); | |
4177 Register result = ToRegister32(instr->result()); | |
4178 Register temp = ToRegister32(instr->temp()); | |
4179 ASSERT(!AreAliased(dividend, result, temp)); | |
4180 | |
4181 if (divisor == 0) { | |
4182 Deoptimize(instr->environment()); | |
4183 return; | |
4184 } | |
4185 | |
4186 __ TruncatingDiv(result, dividend, Abs(divisor)); | |
4187 __ Sxtw(dividend.X(), dividend); | |
4188 __ Mov(temp, Abs(divisor)); | |
4189 __ Smsubl(result.X(), result, temp, dividend.X()); | |
4190 | |
4191 // Check for negative zero. | |
4192 HMod* hmod = instr->hydrogen(); | |
4193 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
4194 Label remainder_not_zero; | |
4195 __ Cbnz(result, &remainder_not_zero); | |
4196 DeoptimizeIfNegative(dividend, instr->environment()); | |
4197 __ bind(&remainder_not_zero); | |
4198 } | |
4199 } | |
4200 | |
4201 | |
4202 void LCodeGen::DoModI(LModI* instr) { | |
4203 Register dividend = ToRegister32(instr->left()); | |
4204 Register divisor = ToRegister32(instr->right()); | |
4205 Register result = ToRegister32(instr->result()); | |
4206 | |
4207 Label deopt, done; | |
4208 // modulo = dividend - quotient * divisor | |
4209 __ Sdiv(result, dividend, divisor); | |
4210 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { | |
4211 // Combine the deoptimization sites. | |
4212 Label ok; | |
4213 __ Cbnz(divisor, &ok); | |
4214 __ Bind(&deopt); | |
4215 Deoptimize(instr->environment()); | |
4216 __ Bind(&ok); | |
4217 } | |
4218 __ Msub(result, result, divisor, dividend); | |
4219 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
4220 __ Cbnz(result, &done); | |
4221 if (deopt.is_bound()) { // TODO(all) This is a hack, remove this... | |
4222 __ Tbnz(dividend, kWSignBit, &deopt); | |
4223 } else { | |
4224 DeoptimizeIfNegative(dividend, instr->environment()); | |
4225 } | |
4226 } | |
4227 __ Bind(&done); | |
4228 } | |
4229 | |
4230 | |
4231 void LCodeGen::DoMulConstIS(LMulConstIS* instr) { | |
4232 ASSERT(instr->hydrogen()->representation().IsSmiOrInteger32()); | |
4233 bool is_smi = instr->hydrogen()->representation().IsSmi(); | |
4234 Register result = | |
4235 is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result()); | |
4236 Register left = | |
4237 is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ; | |
4238 int32_t right = ToInteger32(instr->right()); | |
4239 | |
4240 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | |
4241 bool bailout_on_minus_zero = | |
4242 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | |
4243 | |
4244 if (bailout_on_minus_zero) { | |
4245 if (right < 0) { | |
4246 // The result is -0 if right is negative and left is zero. | |
4247 DeoptimizeIfZero(left, instr->environment()); | |
4248 } else if (right == 0) { | |
4249 // The result is -0 if the right is zero and the left is negative. | |
4250 DeoptimizeIfNegative(left, instr->environment()); | |
4251 } | |
4252 } | |
4253 | |
4254 switch (right) { | |
4255 // Cases which can detect overflow. | |
4256 case -1: | |
4257 if (can_overflow) { | |
4258 // Only 0x80000000 can overflow here. | |
4259 __ Negs(result, left); | |
4260 DeoptimizeIf(vs, instr->environment()); | |
4261 } else { | |
4262 __ Neg(result, left); | |
4263 } | |
4264 break; | |
4265 case 0: | |
4266 // This case can never overflow. | |
4267 __ Mov(result, 0); | |
4268 break; | |
4269 case 1: | |
4270 // This case can never overflow. | |
4271 __ Mov(result, left, kDiscardForSameWReg); | |
4272 break; | |
4273 case 2: | |
4274 if (can_overflow) { | |
4275 __ Adds(result, left, left); | |
4276 DeoptimizeIf(vs, instr->environment()); | |
4277 } else { | |
4278 __ Add(result, left, left); | |
4279 } | |
4280 break; | |
4281 | |
4282 // All other cases cannot detect overflow, because it would probably be no | |
4283 // faster than using the smull method in LMulI. | |
4284 // TODO(jbramley): Investigate this, and add overflow support if it would | |
4285 // be useful. | |
4286 default: | |
4287 ASSERT(!can_overflow); | |
4288 | |
4289 // Multiplication by constant powers of two (and some related values) | |
4290 // can be done efficiently with shifted operands. | |
4291 if (right >= 0) { | |
4292 if (IsPowerOf2(right)) { | |
4293 // result = left << log2(right) | |
4294 __ Lsl(result, left, WhichPowerOf2(right)); | |
4295 } else if (IsPowerOf2(right - 1)) { | |
4296 // result = left + left << log2(right - 1) | |
4297 __ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1))); | |
4298 } else if (IsPowerOf2(right + 1)) { | |
4299 // result = -left + left << log2(right + 1) | |
4300 __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1))); | |
4301 __ Neg(result, result); | |
4302 } else { | |
4303 UNREACHABLE(); | |
4304 } | |
4305 } else { | |
4306 if (IsPowerOf2(-right)) { | |
4307 // result = -left << log2(-right) | |
4308 __ Neg(result, Operand(left, LSL, WhichPowerOf2(-right))); | |
4309 } else if (IsPowerOf2(-right + 1)) { | |
4310 // result = left - left << log2(-right + 1) | |
4311 __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(-right + 1))); | |
4312 } else if (IsPowerOf2(-right - 1)) { | |
4313 // result = -left - left << log2(-right - 1) | |
4314 __ Add(result, left, Operand(left, LSL, WhichPowerOf2(-right - 1))); | |
4315 __ Neg(result, result); | |
4316 } else { | |
4317 UNREACHABLE(); | |
4318 } | |
4319 } | |
4320 break; | |
4321 } | |
4322 } | |
4323 | |
4324 | |
4325 void LCodeGen::DoMulI(LMulI* instr) { | |
4326 Register result = ToRegister32(instr->result()); | |
4327 Register left = ToRegister32(instr->left()); | |
4328 Register right = ToRegister32(instr->right()); | |
4329 | |
4330 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | |
4331 bool bailout_on_minus_zero = | |
4332 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | |
4333 | |
4334 if (bailout_on_minus_zero && !left.Is(right)) { | |
4335 // If one operand is zero and the other is negative, the result is -0. | |
4336 // - Set Z (eq) if either left or right, or both, are 0. | |
4337 __ Cmp(left, 0); | |
4338 __ Ccmp(right, 0, ZFlag, ne); | |
4339 // - If so (eq), set N (mi) if left + right is negative. | |
4340 // - Otherwise, clear N. | |
4341 __ Ccmn(left, right, NoFlag, eq); | |
4342 DeoptimizeIf(mi, instr->environment()); | |
4343 } | |
4344 | |
4345 if (can_overflow) { | |
4346 __ Smull(result.X(), left, right); | |
4347 __ Cmp(result.X(), Operand(result, SXTW)); | |
4348 DeoptimizeIf(ne, instr->environment()); | |
4349 } else { | |
4350 __ Mul(result, left, right); | |
4351 } | |
4352 } | |
4353 | |
4354 | |
4355 void LCodeGen::DoMulS(LMulS* instr) { | |
4356 Register result = ToRegister(instr->result()); | |
4357 Register left = ToRegister(instr->left()); | |
4358 Register right = ToRegister(instr->right()); | |
4359 | |
4360 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | |
4361 bool bailout_on_minus_zero = | |
4362 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | |
4363 | |
4364 if (bailout_on_minus_zero && !left.Is(right)) { | |
4365 // If one operand is zero and the other is negative, the result is -0. | |
4366 // - Set Z (eq) if either left or right, or both, are 0. | |
4367 __ Cmp(left, 0); | |
4368 __ Ccmp(right, 0, ZFlag, ne); | |
4369 // - If so (eq), set N (mi) if left + right is negative. | |
4370 // - Otherwise, clear N. | |
4371 __ Ccmn(left, right, NoFlag, eq); | |
4372 DeoptimizeIf(mi, instr->environment()); | |
4373 } | |
4374 | |
4375 STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0)); | |
4376 if (can_overflow) { | |
4377 __ Smulh(result, left, right); | |
4378 __ Cmp(result, Operand(result.W(), SXTW)); | |
4379 __ SmiTag(result); | |
4380 DeoptimizeIf(ne, instr->environment()); | |
4381 } else { | |
4382 if (AreAliased(result, left, right)) { | |
4383 // All three registers are the same: half untag the input and then | |
4384 // multiply, giving a tagged result. | |
4385 STATIC_ASSERT((kSmiShift % 2) == 0); | |
4386 __ Asr(result, left, kSmiShift / 2); | |
4387 __ Mul(result, result, result); | |
4388 } else if (result.Is(left) && !left.Is(right)) { | |
4389 // Registers result and left alias, right is distinct: untag left into | |
4390 // result, and then multiply by right, giving a tagged result. | |
4391 __ SmiUntag(result, left); | |
4392 __ Mul(result, result, right); | |
4393 } else { | |
4394 ASSERT(!left.Is(result)); | |
4395 // Registers result and right alias, left is distinct, or all registers | |
4396 // are distinct: untag right into result, and then multiply by left, | |
4397 // giving a tagged result. | |
4398 __ SmiUntag(result, right); | |
4399 __ Mul(result, left, result); | |
4400 } | |
4401 } | |
4402 } | |
4403 | |
4404 | |
4405 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { | |
4406 // TODO(3095996): Get rid of this. For now, we need to make the | |
4407 // result register contain a valid pointer because it is already | |
4408 // contained in the register pointer map. | |
4409 Register result = ToRegister(instr->result()); | |
4410 __ Mov(result, 0); | |
4411 | |
4412 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); | |
4413 // NumberTagU and NumberTagD use the context from the frame, rather than | |
4414 // the environment's HContext or HInlinedContext value. | |
4415 // They only call Runtime::kAllocateHeapNumber. | |
4416 // The corresponding HChange instructions are added in a phase that does | |
4417 // not have easy access to the local context. | |
4418 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | |
4419 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); | |
4420 RecordSafepointWithRegisters( | |
4421 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); | |
4422 __ StoreToSafepointRegisterSlot(x0, result); | |
4423 } | |
4424 | |
4425 | |
4426 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { | |
4427 class DeferredNumberTagD: public LDeferredCode { | |
4428 public: | |
4429 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) | |
4430 : LDeferredCode(codegen), instr_(instr) { } | |
4431 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } | |
4432 virtual LInstruction* instr() { return instr_; } | |
4433 private: | |
4434 LNumberTagD* instr_; | |
4435 }; | |
4436 | |
4437 DoubleRegister input = ToDoubleRegister(instr->value()); | |
4438 Register result = ToRegister(instr->result()); | |
4439 Register temp1 = ToRegister(instr->temp1()); | |
4440 Register temp2 = ToRegister(instr->temp2()); | |
4441 | |
4442 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); | |
4443 if (FLAG_inline_new) { | |
4444 __ AllocateHeapNumber(result, deferred->entry(), temp1, temp2); | |
4445 } else { | |
4446 __ B(deferred->entry()); | |
4447 } | |
4448 | |
4449 __ Bind(deferred->exit()); | |
4450 __ Str(input, FieldMemOperand(result, HeapNumber::kValueOffset)); | |
4451 } | |
4452 | |
4453 | |
4454 void LCodeGen::DoDeferredNumberTagU(LInstruction* instr, | |
4455 LOperand* value, | |
4456 LOperand* temp1, | |
4457 LOperand* temp2) { | |
4458 Label slow, convert_and_store; | |
4459 Register src = ToRegister32(value); | |
4460 Register dst = ToRegister(instr->result()); | |
4461 Register scratch1 = ToRegister(temp1); | |
4462 | |
4463 if (FLAG_inline_new) { | |
4464 Register scratch2 = ToRegister(temp2); | |
4465 __ AllocateHeapNumber(dst, &slow, scratch1, scratch2); | |
4466 __ B(&convert_and_store); | |
4467 } | |
4468 | |
4469 // Slow case: call the runtime system to do the number allocation. | |
4470 __ Bind(&slow); | |
4471 // TODO(3095996): Put a valid pointer value in the stack slot where the result | |
4472 // register is stored, as this register is in the pointer map, but contains an | |
4473 // integer value. | |
4474 __ Mov(dst, 0); | |
4475 { | |
4476 // Preserve the value of all registers. | |
4477 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); | |
4478 | |
4479 // NumberTagU and NumberTagD use the context from the frame, rather than | |
4480 // the environment's HContext or HInlinedContext value. | |
4481 // They only call Runtime::kAllocateHeapNumber. | |
4482 // The corresponding HChange instructions are added in a phase that does | |
4483 // not have easy access to the local context. | |
4484 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | |
4485 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); | |
4486 RecordSafepointWithRegisters( | |
4487 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); | |
4488 __ StoreToSafepointRegisterSlot(x0, dst); | |
4489 } | |
4490 | |
4491 // Convert number to floating point and store in the newly allocated heap | |
4492 // number. | |
4493 __ Bind(&convert_and_store); | |
4494 DoubleRegister dbl_scratch = double_scratch(); | |
4495 __ Ucvtf(dbl_scratch, src); | |
4496 __ Str(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset)); | |
4497 } | |
4498 | |
4499 | |
4500 void LCodeGen::DoNumberTagU(LNumberTagU* instr) { | |
4501 class DeferredNumberTagU: public LDeferredCode { | |
4502 public: | |
4503 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) | |
4504 : LDeferredCode(codegen), instr_(instr) { } | |
4505 virtual void Generate() { | |
4506 codegen()->DoDeferredNumberTagU(instr_, | |
4507 instr_->value(), | |
4508 instr_->temp1(), | |
4509 instr_->temp2()); | |
4510 } | |
4511 virtual LInstruction* instr() { return instr_; } | |
4512 private: | |
4513 LNumberTagU* instr_; | |
4514 }; | |
4515 | |
4516 Register value = ToRegister32(instr->value()); | |
4517 Register result = ToRegister(instr->result()); | |
4518 | |
4519 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr); | |
4520 __ Cmp(value, Smi::kMaxValue); | |
4521 __ B(hi, deferred->entry()); | |
4522 __ SmiTag(result, value.X()); | |
4523 __ Bind(deferred->exit()); | |
4524 } | |
4525 | |
4526 | |
4527 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { | |
4528 Register input = ToRegister(instr->value()); | |
4529 Register scratch = ToRegister(instr->temp()); | |
4530 DoubleRegister result = ToDoubleRegister(instr->result()); | |
4531 bool can_convert_undefined_to_nan = | |
4532 instr->hydrogen()->can_convert_undefined_to_nan(); | |
4533 | |
4534 Label done, load_smi; | |
4535 | |
4536 // Work out what untag mode we're working with. | |
4537 HValue* value = instr->hydrogen()->value(); | |
4538 NumberUntagDMode mode = value->representation().IsSmi() | |
4539 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; | |
4540 | |
4541 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { | |
4542 __ JumpIfSmi(input, &load_smi); | |
4543 | |
4544 Label convert_undefined; | |
4545 | |
4546 // Heap number map check. | |
4547 __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | |
4548 if (can_convert_undefined_to_nan) { | |
4549 __ JumpIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex, | |
4550 &convert_undefined); | |
4551 } else { | |
4552 DeoptimizeIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex, | |
4553 instr->environment()); | |
4554 } | |
4555 | |
4556 // Load heap number. | |
4557 __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset)); | |
4558 if (instr->hydrogen()->deoptimize_on_minus_zero()) { | |
4559 DeoptimizeIfMinusZero(result, instr->environment()); | |
4560 } | |
4561 __ B(&done); | |
4562 | |
4563 if (can_convert_undefined_to_nan) { | |
4564 __ Bind(&convert_undefined); | |
4565 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, | |
4566 instr->environment()); | |
4567 | |
4568 __ LoadRoot(scratch, Heap::kNanValueRootIndex); | |
4569 __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset)); | |
4570 __ B(&done); | |
4571 } | |
4572 | |
4573 } else { | |
4574 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); | |
4575 // Fall through to load_smi. | |
4576 } | |
4577 | |
4578 // Smi to double register conversion. | |
4579 __ Bind(&load_smi); | |
4580 __ SmiUntagToDouble(result, input); | |
4581 | |
4582 __ Bind(&done); | |
4583 } | |
4584 | |
4585 | |
4586 void LCodeGen::DoOsrEntry(LOsrEntry* instr) { | |
4587 // This is a pseudo-instruction that ensures that the environment here is | |
4588 // properly registered for deoptimization and records the assembler's PC | |
4589 // offset. | |
4590 LEnvironment* environment = instr->environment(); | |
4591 | |
4592 // If the environment were already registered, we would have no way of | |
4593 // backpatching it with the spill slot operands. | |
4594 ASSERT(!environment->HasBeenRegistered()); | |
4595 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | |
4596 | |
4597 GenerateOsrPrologue(); | |
4598 } | |
4599 | |
4600 | |
4601 void LCodeGen::DoParameter(LParameter* instr) { | |
4602 // Nothing to do. | |
4603 } | |
4604 | |
4605 | |
4606 void LCodeGen::DoPushArgument(LPushArgument* instr) { | |
4607 LOperand* argument = instr->value(); | |
4608 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) { | |
4609 Abort(kDoPushArgumentNotImplementedForDoubleType); | |
4610 } else { | |
4611 __ Push(ToRegister(argument)); | |
4612 } | |
4613 } | |
4614 | |
4615 | |
4616 void LCodeGen::DoReturn(LReturn* instr) { | |
4617 if (FLAG_trace && info()->IsOptimizing()) { | |
4618 // Push the return value on the stack as the parameter. | |
4619 // Runtime::TraceExit returns its parameter in x0. We're leaving the code | |
4620 // managed by the register allocator and tearing down the frame, it's | |
4621 // safe to write to the context register. | |
4622 __ Push(x0); | |
4623 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | |
4624 __ CallRuntime(Runtime::kTraceExit, 1); | |
4625 } | |
4626 | |
4627 if (info()->saves_caller_doubles()) { | |
4628 RestoreCallerDoubles(); | |
4629 } | |
4630 | |
4631 int no_frame_start = -1; | |
4632 if (NeedsEagerFrame()) { | |
4633 Register stack_pointer = masm()->StackPointer(); | |
4634 __ Mov(stack_pointer, fp); | |
4635 no_frame_start = masm_->pc_offset(); | |
4636 __ Pop(fp, lr); | |
4637 } | |
4638 | |
4639 if (instr->has_constant_parameter_count()) { | |
4640 int parameter_count = ToInteger32(instr->constant_parameter_count()); | |
4641 __ Drop(parameter_count + 1); | |
4642 } else { | |
4643 Register parameter_count = ToRegister(instr->parameter_count()); | |
4644 __ DropBySMI(parameter_count); | |
4645 } | |
4646 __ Ret(); | |
4647 | |
4648 if (no_frame_start != -1) { | |
4649 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset()); | |
4650 } | |
4651 } | |
4652 | |
4653 | |
4654 MemOperand LCodeGen::BuildSeqStringOperand(Register string, | |
4655 Register temp, | |
4656 LOperand* index, | |
4657 String::Encoding encoding) { | |
4658 if (index->IsConstantOperand()) { | |
4659 int offset = ToInteger32(LConstantOperand::cast(index)); | |
4660 if (encoding == String::TWO_BYTE_ENCODING) { | |
4661 offset *= kUC16Size; | |
4662 } | |
4663 STATIC_ASSERT(kCharSize == 1); | |
4664 return FieldMemOperand(string, SeqString::kHeaderSize + offset); | |
4665 } | |
4666 | |
4667 if (encoding == String::ONE_BYTE_ENCODING) { | |
4668 __ Add(temp, string, Operand(ToRegister32(index), SXTW)); | |
4669 } else { | |
4670 STATIC_ASSERT(kUC16Size == 2); | |
4671 __ Add(temp, string, Operand(ToRegister32(index), SXTW, 1)); | |
4672 } | |
4673 return FieldMemOperand(temp, SeqString::kHeaderSize); | |
4674 } | |
4675 | |
4676 | |
4677 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { | |
4678 String::Encoding encoding = instr->hydrogen()->encoding(); | |
4679 Register string = ToRegister(instr->string()); | |
4680 Register result = ToRegister(instr->result()); | |
4681 Register temp = ToRegister(instr->temp()); | |
4682 | |
4683 if (FLAG_debug_code) { | |
4684 // Even though this lithium instruction comes with a temp register, we | |
4685 // can't use it here because we want to use "AtStart" constraints on the | |
4686 // inputs and the debug code here needs a scratch register. | |
4687 UseScratchRegisterScope temps(masm()); | |
4688 Register dbg_temp = temps.AcquireX(); | |
4689 | |
4690 __ Ldr(dbg_temp, FieldMemOperand(string, HeapObject::kMapOffset)); | |
4691 __ Ldrb(dbg_temp, FieldMemOperand(dbg_temp, Map::kInstanceTypeOffset)); | |
4692 | |
4693 __ And(dbg_temp, dbg_temp, | |
4694 Operand(kStringRepresentationMask | kStringEncodingMask)); | |
4695 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; | |
4696 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; | |
4697 __ Cmp(dbg_temp, Operand(encoding == String::ONE_BYTE_ENCODING | |
4698 ? one_byte_seq_type : two_byte_seq_type)); | |
4699 __ Check(eq, kUnexpectedStringType); | |
4700 } | |
4701 | |
4702 MemOperand operand = | |
4703 BuildSeqStringOperand(string, temp, instr->index(), encoding); | |
4704 if (encoding == String::ONE_BYTE_ENCODING) { | |
4705 __ Ldrb(result, operand); | |
4706 } else { | |
4707 __ Ldrh(result, operand); | |
4708 } | |
4709 } | |
4710 | |
4711 | |
4712 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { | |
4713 String::Encoding encoding = instr->hydrogen()->encoding(); | |
4714 Register string = ToRegister(instr->string()); | |
4715 Register value = ToRegister(instr->value()); | |
4716 Register temp = ToRegister(instr->temp()); | |
4717 | |
4718 if (FLAG_debug_code) { | |
4719 ASSERT(ToRegister(instr->context()).is(cp)); | |
4720 Register index = ToRegister(instr->index()); | |
4721 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; | |
4722 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; | |
4723 int encoding_mask = | |
4724 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING | |
4725 ? one_byte_seq_type : two_byte_seq_type; | |
4726 __ EmitSeqStringSetCharCheck(string, index, kIndexIsInteger32, temp, | |
4727 encoding_mask); | |
4728 } | |
4729 MemOperand operand = | |
4730 BuildSeqStringOperand(string, temp, instr->index(), encoding); | |
4731 if (encoding == String::ONE_BYTE_ENCODING) { | |
4732 __ Strb(value, operand); | |
4733 } else { | |
4734 __ Strh(value, operand); | |
4735 } | |
4736 } | |
4737 | |
4738 | |
4739 void LCodeGen::DoSmiTag(LSmiTag* instr) { | |
4740 HChange* hchange = instr->hydrogen(); | |
4741 Register input = ToRegister(instr->value()); | |
4742 Register output = ToRegister(instr->result()); | |
4743 if (hchange->CheckFlag(HValue::kCanOverflow) && | |
4744 hchange->value()->CheckFlag(HValue::kUint32)) { | |
4745 DeoptimizeIfNegative(input.W(), instr->environment()); | |
4746 } | |
4747 __ SmiTag(output, input); | |
4748 } | |
4749 | |
4750 | |
4751 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { | |
4752 Register input = ToRegister(instr->value()); | |
4753 Register result = ToRegister(instr->result()); | |
4754 Label done, untag; | |
4755 | |
4756 if (instr->needs_check()) { | |
4757 DeoptimizeIfNotSmi(input, instr->environment()); | |
4758 } | |
4759 | |
4760 __ Bind(&untag); | |
4761 __ SmiUntag(result, input); | |
4762 __ Bind(&done); | |
4763 } | |
4764 | |
4765 | |
4766 void LCodeGen::DoShiftI(LShiftI* instr) { | |
4767 LOperand* right_op = instr->right(); | |
4768 Register left = ToRegister32(instr->left()); | |
4769 Register result = ToRegister32(instr->result()); | |
4770 | |
4771 if (right_op->IsRegister()) { | |
4772 Register right = ToRegister32(instr->right()); | |
4773 switch (instr->op()) { | |
4774 case Token::ROR: __ Ror(result, left, right); break; | |
4775 case Token::SAR: __ Asr(result, left, right); break; | |
4776 case Token::SHL: __ Lsl(result, left, right); break; | |
4777 case Token::SHR: | |
4778 if (instr->can_deopt()) { | |
4779 Label right_not_zero; | |
4780 __ Cbnz(right, &right_not_zero); | |
4781 DeoptimizeIfNegative(left, instr->environment()); | |
4782 __ Bind(&right_not_zero); | |
4783 } | |
4784 __ Lsr(result, left, right); | |
4785 break; | |
4786 default: UNREACHABLE(); | |
4787 } | |
4788 } else { | |
4789 ASSERT(right_op->IsConstantOperand()); | |
4790 int shift_count = ToInteger32(LConstantOperand::cast(right_op)) & 0x1f; | |
4791 if (shift_count == 0) { | |
4792 if ((instr->op() == Token::SHR) && instr->can_deopt()) { | |
4793 DeoptimizeIfNegative(left, instr->environment()); | |
4794 } | |
4795 __ Mov(result, left, kDiscardForSameWReg); | |
4796 } else { | |
4797 switch (instr->op()) { | |
4798 case Token::ROR: __ Ror(result, left, shift_count); break; | |
4799 case Token::SAR: __ Asr(result, left, shift_count); break; | |
4800 case Token::SHL: __ Lsl(result, left, shift_count); break; | |
4801 case Token::SHR: __ Lsr(result, left, shift_count); break; | |
4802 default: UNREACHABLE(); | |
4803 } | |
4804 } | |
4805 } | |
4806 } | |
4807 | |
4808 | |
4809 void LCodeGen::DoShiftS(LShiftS* instr) { | |
4810 LOperand* right_op = instr->right(); | |
4811 Register left = ToRegister(instr->left()); | |
4812 Register result = ToRegister(instr->result()); | |
4813 | |
4814 // Only ROR by register needs a temp. | |
4815 ASSERT(((instr->op() == Token::ROR) && right_op->IsRegister()) || | |
4816 (instr->temp() == NULL)); | |
4817 | |
4818 if (right_op->IsRegister()) { | |
4819 Register right = ToRegister(instr->right()); | |
4820 switch (instr->op()) { | |
4821 case Token::ROR: { | |
4822 Register temp = ToRegister(instr->temp()); | |
4823 __ Ubfx(temp, right, kSmiShift, 5); | |
4824 __ SmiUntag(result, left); | |
4825 __ Ror(result.W(), result.W(), temp.W()); | |
4826 __ SmiTag(result); | |
4827 break; | |
4828 } | |
4829 case Token::SAR: | |
4830 __ Ubfx(result, right, kSmiShift, 5); | |
4831 __ Asr(result, left, result); | |
4832 __ Bic(result, result, kSmiShiftMask); | |
4833 break; | |
4834 case Token::SHL: | |
4835 __ Ubfx(result, right, kSmiShift, 5); | |
4836 __ Lsl(result, left, result); | |
4837 break; | |
4838 case Token::SHR: | |
4839 if (instr->can_deopt()) { | |
4840 Label right_not_zero; | |
4841 __ Cbnz(right, &right_not_zero); | |
4842 DeoptimizeIfNegative(left, instr->environment()); | |
4843 __ Bind(&right_not_zero); | |
4844 } | |
4845 __ Ubfx(result, right, kSmiShift, 5); | |
4846 __ Lsr(result, left, result); | |
4847 __ Bic(result, result, kSmiShiftMask); | |
4848 break; | |
4849 default: UNREACHABLE(); | |
4850 } | |
4851 } else { | |
4852 ASSERT(right_op->IsConstantOperand()); | |
4853 int shift_count = ToInteger32(LConstantOperand::cast(right_op)) & 0x1f; | |
4854 if (shift_count == 0) { | |
4855 if ((instr->op() == Token::SHR) && instr->can_deopt()) { | |
4856 DeoptimizeIfNegative(left, instr->environment()); | |
4857 } | |
4858 __ Mov(result, left); | |
4859 } else { | |
4860 switch (instr->op()) { | |
4861 case Token::ROR: | |
4862 __ SmiUntag(result, left); | |
4863 __ Ror(result.W(), result.W(), shift_count); | |
4864 __ SmiTag(result); | |
4865 break; | |
4866 case Token::SAR: | |
4867 __ Asr(result, left, shift_count); | |
4868 __ Bic(result, result, kSmiShiftMask); | |
4869 break; | |
4870 case Token::SHL: | |
4871 __ Lsl(result, left, shift_count); | |
4872 break; | |
4873 case Token::SHR: | |
4874 __ Lsr(result, left, shift_count); | |
4875 __ Bic(result, result, kSmiShiftMask); | |
4876 break; | |
4877 default: UNREACHABLE(); | |
4878 } | |
4879 } | |
4880 } | |
4881 } | |
4882 | |
4883 | |
4884 void LCodeGen::DoDebugBreak(LDebugBreak* instr) { | |
4885 __ Debug("LDebugBreak", 0, BREAK); | |
4886 } | |
4887 | |
4888 | |
4889 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { | |
4890 ASSERT(ToRegister(instr->context()).is(cp)); | |
4891 Register scratch1 = x5; | |
4892 Register scratch2 = x6; | |
4893 ASSERT(instr->IsMarkedAsCall()); | |
4894 | |
4895 ASM_UNIMPLEMENTED_BREAK("DoDeclareGlobals"); | |
4896 // TODO(all): if Mov could handle object in new space then it could be used | |
4897 // here. | |
4898 __ LoadHeapObject(scratch1, instr->hydrogen()->pairs()); | |
4899 __ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags())); | |
4900 __ Push(cp, scratch1, scratch2); // The context is the first argument. | |
4901 CallRuntime(Runtime::kDeclareGlobals, 3, instr); | |
4902 } | |
4903 | |
4904 | |
4905 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) { | |
4906 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); | |
4907 LoadContextFromDeferred(instr->context()); | |
4908 __ CallRuntimeSaveDoubles(Runtime::kStackGuard); | |
4909 RecordSafepointWithLazyDeopt( | |
4910 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); | |
4911 ASSERT(instr->HasEnvironment()); | |
4912 LEnvironment* env = instr->environment(); | |
4913 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); | |
4914 } | |
4915 | |
4916 | |
4917 void LCodeGen::DoStackCheck(LStackCheck* instr) { | |
4918 class DeferredStackCheck: public LDeferredCode { | |
4919 public: | |
4920 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) | |
4921 : LDeferredCode(codegen), instr_(instr) { } | |
4922 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); } | |
4923 virtual LInstruction* instr() { return instr_; } | |
4924 private: | |
4925 LStackCheck* instr_; | |
4926 }; | |
4927 | |
4928 ASSERT(instr->HasEnvironment()); | |
4929 LEnvironment* env = instr->environment(); | |
4930 // There is no LLazyBailout instruction for stack-checks. We have to | |
4931 // prepare for lazy deoptimization explicitly here. | |
4932 if (instr->hydrogen()->is_function_entry()) { | |
4933 // Perform stack overflow check. | |
4934 Label done; | |
4935 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex); | |
4936 __ B(hs, &done); | |
4937 | |
4938 PredictableCodeSizeScope predictable(masm_, | |
4939 Assembler::kCallSizeWithRelocation); | |
4940 ASSERT(instr->context()->IsRegister()); | |
4941 ASSERT(ToRegister(instr->context()).is(cp)); | |
4942 CallCode(isolate()->builtins()->StackCheck(), | |
4943 RelocInfo::CODE_TARGET, | |
4944 instr); | |
4945 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); | |
4946 | |
4947 __ Bind(&done); | |
4948 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); | |
4949 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); | |
4950 } else { | |
4951 ASSERT(instr->hydrogen()->is_backwards_branch()); | |
4952 // Perform stack overflow check if this goto needs it before jumping. | |
4953 DeferredStackCheck* deferred_stack_check = | |
4954 new(zone()) DeferredStackCheck(this, instr); | |
4955 __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex); | |
4956 __ B(lo, deferred_stack_check->entry()); | |
4957 | |
4958 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); | |
4959 __ Bind(instr->done_label()); | |
4960 deferred_stack_check->SetExit(instr->done_label()); | |
4961 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); | |
4962 // Don't record a deoptimization index for the safepoint here. | |
4963 // This will be done explicitly when emitting call and the safepoint in | |
4964 // the deferred code. | |
4965 } | |
4966 } | |
4967 | |
4968 | |
4969 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { | |
4970 Register function = ToRegister(instr->function()); | |
4971 Register code_object = ToRegister(instr->code_object()); | |
4972 Register temp = ToRegister(instr->temp()); | |
4973 __ Add(temp, code_object, Code::kHeaderSize - kHeapObjectTag); | |
4974 __ Str(temp, FieldMemOperand(function, JSFunction::kCodeEntryOffset)); | |
4975 } | |
4976 | |
4977 | |
4978 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { | |
4979 Register context = ToRegister(instr->context()); | |
4980 Register value = ToRegister(instr->value()); | |
4981 Register scratch = ToRegister(instr->temp()); | |
4982 MemOperand target = ContextMemOperand(context, instr->slot_index()); | |
4983 | |
4984 Label skip_assignment; | |
4985 | |
4986 if (instr->hydrogen()->RequiresHoleCheck()) { | |
4987 __ Ldr(scratch, target); | |
4988 if (instr->hydrogen()->DeoptimizesOnHole()) { | |
4989 DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, | |
4990 instr->environment()); | |
4991 } else { | |
4992 __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment); | |
4993 } | |
4994 } | |
4995 | |
4996 __ Str(value, target); | |
4997 if (instr->hydrogen()->NeedsWriteBarrier()) { | |
4998 SmiCheck check_needed = | |
4999 instr->hydrogen()->value()->IsHeapObject() | |
5000 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | |
5001 __ RecordWriteContextSlot(context, | |
5002 target.offset(), | |
5003 value, | |
5004 scratch, | |
5005 GetLinkRegisterState(), | |
5006 kSaveFPRegs, | |
5007 EMIT_REMEMBERED_SET, | |
5008 check_needed); | |
5009 } | |
5010 __ Bind(&skip_assignment); | |
5011 } | |
5012 | |
5013 | |
5014 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { | |
5015 Register value = ToRegister(instr->value()); | |
5016 Register cell = ToRegister(instr->temp1()); | |
5017 | |
5018 // Load the cell. | |
5019 __ Mov(cell, Operand(instr->hydrogen()->cell().handle())); | |
5020 | |
5021 // If the cell we are storing to contains the hole it could have | |
5022 // been deleted from the property dictionary. In that case, we need | |
5023 // to update the property details in the property dictionary to mark | |
5024 // it as no longer deleted. We deoptimize in that case. | |
5025 if (instr->hydrogen()->RequiresHoleCheck()) { | |
5026 Register payload = ToRegister(instr->temp2()); | |
5027 __ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset)); | |
5028 DeoptimizeIfRoot( | |
5029 payload, Heap::kTheHoleValueRootIndex, instr->environment()); | |
5030 } | |
5031 | |
5032 // Store the value. | |
5033 __ Str(value, FieldMemOperand(cell, Cell::kValueOffset)); | |
5034 // Cells are always rescanned, so no write barrier here. | |
5035 } | |
5036 | |
5037 | |
5038 void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) { | |
5039 Register ext_ptr = ToRegister(instr->elements()); | |
5040 Register key = no_reg; | |
5041 Register scratch; | |
5042 ElementsKind elements_kind = instr->elements_kind(); | |
5043 | |
5044 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); | |
5045 bool key_is_constant = instr->key()->IsConstantOperand(); | |
5046 int constant_key = 0; | |
5047 if (key_is_constant) { | |
5048 ASSERT(instr->temp() == NULL); | |
5049 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); | |
5050 if (constant_key & 0xf0000000) { | |
5051 Abort(kArrayIndexConstantValueTooBig); | |
5052 } | |
5053 } else { | |
5054 key = ToRegister(instr->key()); | |
5055 scratch = ToRegister(instr->temp()); | |
5056 } | |
5057 | |
5058 MemOperand dst = | |
5059 PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi, | |
5060 key_is_constant, constant_key, | |
5061 elements_kind, | |
5062 instr->additional_index()); | |
5063 | |
5064 if ((elements_kind == EXTERNAL_FLOAT32_ELEMENTS) || | |
5065 (elements_kind == FLOAT32_ELEMENTS)) { | |
5066 DoubleRegister value = ToDoubleRegister(instr->value()); | |
5067 DoubleRegister dbl_scratch = double_scratch(); | |
5068 __ Fcvt(dbl_scratch.S(), value); | |
5069 __ Str(dbl_scratch.S(), dst); | |
5070 } else if ((elements_kind == EXTERNAL_FLOAT64_ELEMENTS) || | |
5071 (elements_kind == FLOAT64_ELEMENTS)) { | |
5072 DoubleRegister value = ToDoubleRegister(instr->value()); | |
5073 __ Str(value, dst); | |
5074 } else { | |
5075 Register value = ToRegister(instr->value()); | |
5076 | |
5077 switch (elements_kind) { | |
5078 case EXTERNAL_UINT8_CLAMPED_ELEMENTS: | |
5079 case EXTERNAL_INT8_ELEMENTS: | |
5080 case EXTERNAL_UINT8_ELEMENTS: | |
5081 case UINT8_ELEMENTS: | |
5082 case UINT8_CLAMPED_ELEMENTS: | |
5083 case INT8_ELEMENTS: | |
5084 __ Strb(value, dst); | |
5085 break; | |
5086 case EXTERNAL_INT16_ELEMENTS: | |
5087 case EXTERNAL_UINT16_ELEMENTS: | |
5088 case INT16_ELEMENTS: | |
5089 case UINT16_ELEMENTS: | |
5090 __ Strh(value, dst); | |
5091 break; | |
5092 case EXTERNAL_INT32_ELEMENTS: | |
5093 case EXTERNAL_UINT32_ELEMENTS: | |
5094 case INT32_ELEMENTS: | |
5095 case UINT32_ELEMENTS: | |
5096 __ Str(value.W(), dst); | |
5097 break; | |
5098 case FLOAT32_ELEMENTS: | |
5099 case FLOAT64_ELEMENTS: | |
5100 case EXTERNAL_FLOAT32_ELEMENTS: | |
5101 case EXTERNAL_FLOAT64_ELEMENTS: | |
5102 case FAST_DOUBLE_ELEMENTS: | |
5103 case FAST_ELEMENTS: | |
5104 case FAST_SMI_ELEMENTS: | |
5105 case FAST_HOLEY_DOUBLE_ELEMENTS: | |
5106 case FAST_HOLEY_ELEMENTS: | |
5107 case FAST_HOLEY_SMI_ELEMENTS: | |
5108 case DICTIONARY_ELEMENTS: | |
5109 case SLOPPY_ARGUMENTS_ELEMENTS: | |
5110 UNREACHABLE(); | |
5111 break; | |
5112 } | |
5113 } | |
5114 } | |
5115 | |
5116 | |
5117 void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) { | |
5118 Register elements = ToRegister(instr->elements()); | |
5119 DoubleRegister value = ToDoubleRegister(instr->value()); | |
5120 Register store_base = no_reg; | |
5121 int offset = 0; | |
5122 | |
5123 if (instr->key()->IsConstantOperand()) { | |
5124 int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); | |
5125 if (constant_key & 0xf0000000) { | |
5126 Abort(kArrayIndexConstantValueTooBig); | |
5127 } | |
5128 offset = FixedDoubleArray::OffsetOfElementAt(constant_key + | |
5129 instr->additional_index()); | |
5130 store_base = elements; | |
5131 } else { | |
5132 store_base = ToRegister(instr->temp()); | |
5133 Register key = ToRegister(instr->key()); | |
5134 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); | |
5135 CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged, | |
5136 instr->hydrogen()->elements_kind()); | |
5137 offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index()); | |
5138 } | |
5139 | |
5140 if (instr->NeedsCanonicalization()) { | |
5141 DoubleRegister dbl_scratch = double_scratch(); | |
5142 __ Fmov(dbl_scratch, | |
5143 FixedDoubleArray::canonical_not_the_hole_nan_as_double()); | |
5144 __ Fmaxnm(dbl_scratch, dbl_scratch, value); | |
5145 __ Str(dbl_scratch, FieldMemOperand(store_base, offset)); | |
5146 } else { | |
5147 __ Str(value, FieldMemOperand(store_base, offset)); | |
5148 } | |
5149 } | |
5150 | |
5151 | |
5152 void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) { | |
5153 Register value = ToRegister(instr->value()); | |
5154 Register elements = ToRegister(instr->elements()); | |
5155 Register scratch = no_reg; | |
5156 Register store_base = no_reg; | |
5157 Register key = no_reg; | |
5158 int offset = 0; | |
5159 | |
5160 if (!instr->key()->IsConstantOperand() || | |
5161 instr->hydrogen()->NeedsWriteBarrier()) { | |
5162 scratch = ToRegister(instr->temp()); | |
5163 } | |
5164 | |
5165 if (instr->key()->IsConstantOperand()) { | |
5166 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); | |
5167 offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + | |
5168 instr->additional_index()); | |
5169 store_base = elements; | |
5170 } else { | |
5171 store_base = scratch; | |
5172 key = ToRegister(instr->key()); | |
5173 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); | |
5174 CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged, | |
5175 instr->hydrogen()->elements_kind()); | |
5176 offset = FixedArray::OffsetOfElementAt(instr->additional_index()); | |
5177 } | |
5178 Representation representation = instr->hydrogen()->value()->representation(); | |
5179 if (representation.IsInteger32()) { | |
5180 ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); | |
5181 ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS); | |
5182 STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0); | |
5183 __ Store(value, UntagSmiFieldMemOperand(store_base, offset), | |
5184 Representation::Integer32()); | |
5185 } else { | |
5186 __ Store(value, FieldMemOperand(store_base, offset), representation); | |
5187 } | |
5188 | |
5189 if (instr->hydrogen()->NeedsWriteBarrier()) { | |
5190 ASSERT(representation.IsTagged()); | |
5191 // This assignment may cause element_addr to alias store_base. | |
5192 Register element_addr = scratch; | |
5193 SmiCheck check_needed = | |
5194 instr->hydrogen()->value()->IsHeapObject() | |
5195 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | |
5196 // Compute address of modified element and store it into key register. | |
5197 __ Add(element_addr, store_base, offset - kHeapObjectTag); | |
5198 __ RecordWrite(elements, element_addr, value, GetLinkRegisterState(), | |
5199 kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed); | |
5200 } | |
5201 } | |
5202 | |
5203 | |
5204 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { | |
5205 ASSERT(ToRegister(instr->context()).is(cp)); | |
5206 ASSERT(ToRegister(instr->object()).Is(x2)); | |
5207 ASSERT(ToRegister(instr->key()).Is(x1)); | |
5208 ASSERT(ToRegister(instr->value()).Is(x0)); | |
5209 | |
5210 Handle<Code> ic = instr->strict_mode() == STRICT | |
5211 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() | |
5212 : isolate()->builtins()->KeyedStoreIC_Initialize(); | |
5213 CallCode(ic, RelocInfo::CODE_TARGET, instr); | |
5214 } | |
5215 | |
5216 | |
5217 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { | |
5218 Representation representation = instr->representation(); | |
5219 | |
5220 Register object = ToRegister(instr->object()); | |
5221 HObjectAccess access = instr->hydrogen()->access(); | |
5222 Handle<Map> transition = instr->transition(); | |
5223 int offset = access.offset(); | |
5224 | |
5225 if (access.IsExternalMemory()) { | |
5226 ASSERT(transition.is_null()); | |
5227 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); | |
5228 Register value = ToRegister(instr->value()); | |
5229 __ Store(value, MemOperand(object, offset), representation); | |
5230 return; | |
5231 } else if (representation.IsDouble()) { | |
5232 ASSERT(transition.is_null()); | |
5233 ASSERT(access.IsInobject()); | |
5234 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); | |
5235 FPRegister value = ToDoubleRegister(instr->value()); | |
5236 __ Str(value, FieldMemOperand(object, offset)); | |
5237 return; | |
5238 } | |
5239 | |
5240 Register value = ToRegister(instr->value()); | |
5241 | |
5242 SmiCheck check_needed = instr->hydrogen()->value()->IsHeapObject() | |
5243 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | |
5244 | |
5245 if (representation.IsHeapObject() && | |
5246 !instr->hydrogen()->value()->type().IsHeapObject()) { | |
5247 DeoptimizeIfSmi(value, instr->environment()); | |
5248 | |
5249 // We know that value is a smi now, so we can omit the check below. | |
5250 check_needed = OMIT_SMI_CHECK; | |
5251 } | |
5252 | |
5253 if (!transition.is_null()) { | |
5254 // Store the new map value. | |
5255 Register new_map_value = ToRegister(instr->temp0()); | |
5256 __ Mov(new_map_value, Operand(transition)); | |
5257 __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset)); | |
5258 if (instr->hydrogen()->NeedsWriteBarrierForMap()) { | |
5259 // Update the write barrier for the map field. | |
5260 __ RecordWriteField(object, | |
5261 HeapObject::kMapOffset, | |
5262 new_map_value, | |
5263 ToRegister(instr->temp1()), | |
5264 GetLinkRegisterState(), | |
5265 kSaveFPRegs, | |
5266 OMIT_REMEMBERED_SET, | |
5267 OMIT_SMI_CHECK); | |
5268 } | |
5269 } | |
5270 | |
5271 // Do the store. | |
5272 Register destination; | |
5273 if (access.IsInobject()) { | |
5274 destination = object; | |
5275 } else { | |
5276 Register temp0 = ToRegister(instr->temp0()); | |
5277 __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset)); | |
5278 destination = temp0; | |
5279 } | |
5280 | |
5281 if (representation.IsSmi() && | |
5282 instr->hydrogen()->value()->representation().IsInteger32()) { | |
5283 ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); | |
5284 #ifdef DEBUG | |
5285 Register temp0 = ToRegister(instr->temp0()); | |
5286 __ Ldr(temp0, FieldMemOperand(destination, offset)); | |
5287 __ AssertSmi(temp0); | |
5288 // If destination aliased temp0, restore it to the address calculated | |
5289 // earlier. | |
5290 if (destination.Is(temp0)) { | |
5291 ASSERT(!access.IsInobject()); | |
5292 __ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset)); | |
5293 } | |
5294 #endif | |
5295 STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0); | |
5296 __ Store(value, UntagSmiFieldMemOperand(destination, offset), | |
5297 Representation::Integer32()); | |
5298 } else { | |
5299 __ Store(value, FieldMemOperand(destination, offset), representation); | |
5300 } | |
5301 if (instr->hydrogen()->NeedsWriteBarrier()) { | |
5302 __ RecordWriteField(destination, | |
5303 offset, | |
5304 value, // Clobbered. | |
5305 ToRegister(instr->temp1()), // Clobbered. | |
5306 GetLinkRegisterState(), | |
5307 kSaveFPRegs, | |
5308 EMIT_REMEMBERED_SET, | |
5309 check_needed); | |
5310 } | |
5311 } | |
5312 | |
5313 | |
5314 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { | |
5315 ASSERT(ToRegister(instr->context()).is(cp)); | |
5316 ASSERT(ToRegister(instr->value()).is(x0)); | |
5317 ASSERT(ToRegister(instr->object()).is(x1)); | |
5318 | |
5319 // Name must be in x2. | |
5320 __ Mov(x2, Operand(instr->name())); | |
5321 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode()); | |
5322 CallCode(ic, RelocInfo::CODE_TARGET, instr); | |
5323 } | |
5324 | |
5325 | |
5326 void LCodeGen::DoStringAdd(LStringAdd* instr) { | |
5327 ASSERT(ToRegister(instr->context()).is(cp)); | |
5328 ASSERT(ToRegister(instr->left()).Is(x1)); | |
5329 ASSERT(ToRegister(instr->right()).Is(x0)); | |
5330 StringAddStub stub(instr->hydrogen()->flags(), | |
5331 instr->hydrogen()->pretenure_flag()); | |
5332 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); | |
5333 } | |
5334 | |
5335 | |
5336 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { | |
5337 class DeferredStringCharCodeAt: public LDeferredCode { | |
5338 public: | |
5339 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) | |
5340 : LDeferredCode(codegen), instr_(instr) { } | |
5341 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); } | |
5342 virtual LInstruction* instr() { return instr_; } | |
5343 private: | |
5344 LStringCharCodeAt* instr_; | |
5345 }; | |
5346 | |
5347 DeferredStringCharCodeAt* deferred = | |
5348 new(zone()) DeferredStringCharCodeAt(this, instr); | |
5349 | |
5350 StringCharLoadGenerator::Generate(masm(), | |
5351 ToRegister(instr->string()), | |
5352 ToRegister32(instr->index()), | |
5353 ToRegister(instr->result()), | |
5354 deferred->entry()); | |
5355 __ Bind(deferred->exit()); | |
5356 } | |
5357 | |
5358 | |
5359 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { | |
5360 Register string = ToRegister(instr->string()); | |
5361 Register result = ToRegister(instr->result()); | |
5362 | |
5363 // TODO(3095996): Get rid of this. For now, we need to make the | |
5364 // result register contain a valid pointer because it is already | |
5365 // contained in the register pointer map. | |
5366 __ Mov(result, 0); | |
5367 | |
5368 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); | |
5369 __ Push(string); | |
5370 // Push the index as a smi. This is safe because of the checks in | |
5371 // DoStringCharCodeAt above. | |
5372 Register index = ToRegister(instr->index()); | |
5373 __ SmiTag(index); | |
5374 __ Push(index); | |
5375 | |
5376 CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr, | |
5377 instr->context()); | |
5378 __ AssertSmi(x0); | |
5379 __ SmiUntag(x0); | |
5380 __ StoreToSafepointRegisterSlot(x0, result); | |
5381 } | |
5382 | |
5383 | |
5384 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { | |
5385 class DeferredStringCharFromCode: public LDeferredCode { | |
5386 public: | |
5387 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) | |
5388 : LDeferredCode(codegen), instr_(instr) { } | |
5389 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); } | |
5390 virtual LInstruction* instr() { return instr_; } | |
5391 private: | |
5392 LStringCharFromCode* instr_; | |
5393 }; | |
5394 | |
5395 DeferredStringCharFromCode* deferred = | |
5396 new(zone()) DeferredStringCharFromCode(this, instr); | |
5397 | |
5398 ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); | |
5399 Register char_code = ToRegister32(instr->char_code()); | |
5400 Register result = ToRegister(instr->result()); | |
5401 | |
5402 __ Cmp(char_code, String::kMaxOneByteCharCode); | |
5403 __ B(hi, deferred->entry()); | |
5404 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); | |
5405 __ Add(result, result, Operand(char_code, SXTW, kPointerSizeLog2)); | |
5406 __ Ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize)); | |
5407 __ CompareRoot(result, Heap::kUndefinedValueRootIndex); | |
5408 __ B(eq, deferred->entry()); | |
5409 __ Bind(deferred->exit()); | |
5410 } | |
5411 | |
5412 | |
5413 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { | |
5414 Register char_code = ToRegister(instr->char_code()); | |
5415 Register result = ToRegister(instr->result()); | |
5416 | |
5417 // TODO(3095996): Get rid of this. For now, we need to make the | |
5418 // result register contain a valid pointer because it is already | |
5419 // contained in the register pointer map. | |
5420 __ Mov(result, 0); | |
5421 | |
5422 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); | |
5423 __ SmiTag(char_code); | |
5424 __ Push(char_code); | |
5425 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context()); | |
5426 __ StoreToSafepointRegisterSlot(x0, result); | |
5427 } | |
5428 | |
5429 | |
5430 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { | |
5431 ASSERT(ToRegister(instr->context()).is(cp)); | |
5432 Token::Value op = instr->op(); | |
5433 | |
5434 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); | |
5435 CallCode(ic, RelocInfo::CODE_TARGET, instr); | |
5436 InlineSmiCheckInfo::EmitNotInlined(masm()); | |
5437 | |
5438 Condition condition = TokenToCondition(op, false); | |
5439 | |
5440 EmitCompareAndBranch(instr, condition, x0, 0); | |
5441 } | |
5442 | |
5443 | |
5444 void LCodeGen::DoSubI(LSubI* instr) { | |
5445 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | |
5446 Register result = ToRegister32(instr->result()); | |
5447 Register left = ToRegister32(instr->left()); | |
5448 Operand right = ToOperand32I(instr->right()); | |
5449 if (can_overflow) { | |
5450 __ Subs(result, left, right); | |
5451 DeoptimizeIf(vs, instr->environment()); | |
5452 } else { | |
5453 __ Sub(result, left, right); | |
5454 } | |
5455 } | |
5456 | |
5457 | |
5458 void LCodeGen::DoSubS(LSubS* instr) { | |
5459 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | |
5460 Register result = ToRegister(instr->result()); | |
5461 Register left = ToRegister(instr->left()); | |
5462 Operand right = ToOperand(instr->right()); | |
5463 if (can_overflow) { | |
5464 __ Subs(result, left, right); | |
5465 DeoptimizeIf(vs, instr->environment()); | |
5466 } else { | |
5467 __ Sub(result, left, right); | |
5468 } | |
5469 } | |
5470 | |
5471 | |
5472 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, | |
5473 LOperand* value, | |
5474 LOperand* temp1, | |
5475 LOperand* temp2) { | |
5476 Register input = ToRegister(value); | |
5477 Register scratch1 = ToRegister(temp1); | |
5478 DoubleRegister dbl_scratch1 = double_scratch(); | |
5479 | |
5480 Label done; | |
5481 | |
5482 // Load heap object map. | |
5483 __ Ldr(scratch1, FieldMemOperand(input, HeapObject::kMapOffset)); | |
5484 | |
5485 if (instr->truncating()) { | |
5486 Register output = ToRegister(instr->result()); | |
5487 Register scratch2 = ToRegister(temp2); | |
5488 Label check_bools; | |
5489 | |
5490 // If it's not a heap number, jump to undefined check. | |
5491 __ JumpIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, &check_bools); | |
5492 | |
5493 // A heap number: load value and convert to int32 using truncating function. | |
5494 __ TruncateHeapNumberToI(output, input); | |
5495 __ B(&done); | |
5496 | |
5497 __ Bind(&check_bools); | |
5498 | |
5499 Register true_root = output; | |
5500 Register false_root = scratch2; | |
5501 __ LoadTrueFalseRoots(true_root, false_root); | |
5502 __ Cmp(scratch1, true_root); | |
5503 __ Cset(output, eq); | |
5504 __ Ccmp(scratch1, false_root, ZFlag, ne); | |
5505 __ B(eq, &done); | |
5506 | |
5507 // Output contains zero, undefined is converted to zero for truncating | |
5508 // conversions. | |
5509 DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, | |
5510 instr->environment()); | |
5511 } else { | |
5512 Register output = ToRegister32(instr->result()); | |
5513 | |
5514 DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2); | |
5515 | |
5516 // Deoptimized if it's not a heap number. | |
5517 DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, | |
5518 instr->environment()); | |
5519 | |
5520 // A heap number: load value and convert to int32 using non-truncating | |
5521 // function. If the result is out of range, branch to deoptimize. | |
5522 __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset)); | |
5523 __ TryConvertDoubleToInt32(output, dbl_scratch1, dbl_scratch2); | |
5524 DeoptimizeIf(ne, instr->environment()); | |
5525 | |
5526 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
5527 __ Cmp(output, 0); | |
5528 __ B(ne, &done); | |
5529 __ Fmov(scratch1, dbl_scratch1); | |
5530 DeoptimizeIfNegative(scratch1, instr->environment()); | |
5531 } | |
5532 } | |
5533 __ Bind(&done); | |
5534 } | |
5535 | |
5536 | |
5537 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { | |
5538 class DeferredTaggedToI: public LDeferredCode { | |
5539 public: | |
5540 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) | |
5541 : LDeferredCode(codegen), instr_(instr) { } | |
5542 virtual void Generate() { | |
5543 codegen()->DoDeferredTaggedToI(instr_, instr_->value(), instr_->temp1(), | |
5544 instr_->temp2()); | |
5545 } | |
5546 | |
5547 virtual LInstruction* instr() { return instr_; } | |
5548 private: | |
5549 LTaggedToI* instr_; | |
5550 }; | |
5551 | |
5552 Register input = ToRegister(instr->value()); | |
5553 Register output = ToRegister(instr->result()); | |
5554 | |
5555 if (instr->hydrogen()->value()->representation().IsSmi()) { | |
5556 __ SmiUntag(output, input); | |
5557 } else { | |
5558 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); | |
5559 | |
5560 __ JumpIfNotSmi(input, deferred->entry()); | |
5561 __ SmiUntag(output, input); | |
5562 __ Bind(deferred->exit()); | |
5563 } | |
5564 } | |
5565 | |
5566 | |
5567 void LCodeGen::DoThisFunction(LThisFunction* instr) { | |
5568 Register result = ToRegister(instr->result()); | |
5569 __ Ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); | |
5570 } | |
5571 | |
5572 | |
5573 void LCodeGen::DoToFastProperties(LToFastProperties* instr) { | |
5574 ASSERT(ToRegister(instr->value()).Is(x0)); | |
5575 ASSERT(ToRegister(instr->result()).Is(x0)); | |
5576 __ Push(x0); | |
5577 CallRuntime(Runtime::kToFastProperties, 1, instr); | |
5578 } | |
5579 | |
5580 | |
5581 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { | |
5582 ASSERT(ToRegister(instr->context()).is(cp)); | |
5583 Label materialized; | |
5584 // Registers will be used as follows: | |
5585 // x7 = literals array. | |
5586 // x1 = regexp literal. | |
5587 // x0 = regexp literal clone. | |
5588 // x10-x12 are used as temporaries. | |
5589 int literal_offset = | |
5590 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index()); | |
5591 __ LoadObject(x7, instr->hydrogen()->literals()); | |
5592 __ Ldr(x1, FieldMemOperand(x7, literal_offset)); | |
5593 __ JumpIfNotRoot(x1, Heap::kUndefinedValueRootIndex, &materialized); | |
5594 | |
5595 // Create regexp literal using runtime function | |
5596 // Result will be in x0. | |
5597 __ Mov(x12, Operand(Smi::FromInt(instr->hydrogen()->literal_index()))); | |
5598 __ Mov(x11, Operand(instr->hydrogen()->pattern())); | |
5599 __ Mov(x10, Operand(instr->hydrogen()->flags())); | |
5600 __ Push(x7, x12, x11, x10); | |
5601 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr); | |
5602 __ Mov(x1, x0); | |
5603 | |
5604 __ Bind(&materialized); | |
5605 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; | |
5606 Label allocated, runtime_allocate; | |
5607 | |
5608 __ Allocate(size, x0, x10, x11, &runtime_allocate, TAG_OBJECT); | |
5609 __ B(&allocated); | |
5610 | |
5611 __ Bind(&runtime_allocate); | |
5612 __ Mov(x0, Smi::FromInt(size)); | |
5613 __ Push(x1, x0); | |
5614 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); | |
5615 __ Pop(x1); | |
5616 | |
5617 __ Bind(&allocated); | |
5618 // Copy the content into the newly allocated memory. | |
5619 __ CopyFields(x0, x1, CPURegList(x10, x11, x12), size / kPointerSize); | |
5620 } | |
5621 | |
5622 | |
5623 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { | |
5624 Register object = ToRegister(instr->object()); | |
5625 Register temp1 = ToRegister(instr->temp1()); | |
5626 | |
5627 Handle<Map> from_map = instr->original_map(); | |
5628 Handle<Map> to_map = instr->transitioned_map(); | |
5629 ElementsKind from_kind = instr->from_kind(); | |
5630 ElementsKind to_kind = instr->to_kind(); | |
5631 | |
5632 Label not_applicable; | |
5633 __ CheckMap(object, temp1, from_map, ¬_applicable, DONT_DO_SMI_CHECK); | |
5634 | |
5635 if (IsSimpleMapChangeTransition(from_kind, to_kind)) { | |
5636 Register new_map = ToRegister(instr->temp2()); | |
5637 __ Mov(new_map, Operand(to_map)); | |
5638 __ Str(new_map, FieldMemOperand(object, HeapObject::kMapOffset)); | |
5639 // Write barrier. | |
5640 __ RecordWriteField(object, HeapObject::kMapOffset, new_map, temp1, | |
5641 GetLinkRegisterState(), kDontSaveFPRegs); | |
5642 } else { | |
5643 ASSERT(ToRegister(instr->context()).is(cp)); | |
5644 PushSafepointRegistersScope scope( | |
5645 this, Safepoint::kWithRegistersAndDoubles); | |
5646 __ Mov(x0, object); | |
5647 __ Mov(x1, Operand(to_map)); | |
5648 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE; | |
5649 TransitionElementsKindStub stub(from_kind, to_kind, is_js_array); | |
5650 __ CallStub(&stub); | |
5651 RecordSafepointWithRegistersAndDoubles( | |
5652 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); | |
5653 } | |
5654 __ Bind(¬_applicable); | |
5655 } | |
5656 | |
5657 | |
5658 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { | |
5659 Register object = ToRegister(instr->object()); | |
5660 Register temp1 = ToRegister(instr->temp1()); | |
5661 Register temp2 = ToRegister(instr->temp2()); | |
5662 | |
5663 Label no_memento_found; | |
5664 __ JumpIfJSArrayHasAllocationMemento(object, temp1, temp2, &no_memento_found); | |
5665 Deoptimize(instr->environment()); | |
5666 __ Bind(&no_memento_found); | |
5667 } | |
5668 | |
5669 | |
5670 void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) { | |
5671 DoubleRegister input = ToDoubleRegister(instr->value()); | |
5672 Register result = ToRegister(instr->result()); | |
5673 __ TruncateDoubleToI(result, input); | |
5674 if (instr->tag_result()) { | |
5675 __ SmiTag(result, result); | |
5676 } | |
5677 } | |
5678 | |
5679 | |
5680 void LCodeGen::DoTypeof(LTypeof* instr) { | |
5681 Register input = ToRegister(instr->value()); | |
5682 __ Push(input); | |
5683 CallRuntime(Runtime::kTypeof, 1, instr); | |
5684 } | |
5685 | |
5686 | |
5687 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { | |
5688 Handle<String> type_name = instr->type_literal(); | |
5689 Label* true_label = instr->TrueLabel(chunk_); | |
5690 Label* false_label = instr->FalseLabel(chunk_); | |
5691 Register value = ToRegister(instr->value()); | |
5692 | |
5693 if (type_name->Equals(heap()->number_string())) { | |
5694 ASSERT(instr->temp1() != NULL); | |
5695 Register map = ToRegister(instr->temp1()); | |
5696 | |
5697 __ JumpIfSmi(value, true_label); | |
5698 __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); | |
5699 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); | |
5700 EmitBranch(instr, eq); | |
5701 | |
5702 } else if (type_name->Equals(heap()->string_string())) { | |
5703 ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL)); | |
5704 Register map = ToRegister(instr->temp1()); | |
5705 Register scratch = ToRegister(instr->temp2()); | |
5706 | |
5707 __ JumpIfSmi(value, false_label); | |
5708 __ JumpIfObjectType( | |
5709 value, map, scratch, FIRST_NONSTRING_TYPE, false_label, ge); | |
5710 __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); | |
5711 EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable); | |
5712 | |
5713 } else if (type_name->Equals(heap()->symbol_string())) { | |
5714 ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL)); | |
5715 Register map = ToRegister(instr->temp1()); | |
5716 Register scratch = ToRegister(instr->temp2()); | |
5717 | |
5718 __ JumpIfSmi(value, false_label); | |
5719 __ CompareObjectType(value, map, scratch, SYMBOL_TYPE); | |
5720 EmitBranch(instr, eq); | |
5721 | |
5722 } else if (type_name->Equals(heap()->boolean_string())) { | |
5723 __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label); | |
5724 __ CompareRoot(value, Heap::kFalseValueRootIndex); | |
5725 EmitBranch(instr, eq); | |
5726 | |
5727 } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) { | |
5728 __ CompareRoot(value, Heap::kNullValueRootIndex); | |
5729 EmitBranch(instr, eq); | |
5730 | |
5731 } else if (type_name->Equals(heap()->undefined_string())) { | |
5732 ASSERT(instr->temp1() != NULL); | |
5733 Register scratch = ToRegister(instr->temp1()); | |
5734 | |
5735 __ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label); | |
5736 __ JumpIfSmi(value, false_label); | |
5737 // Check for undetectable objects and jump to the true branch in this case. | |
5738 __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); | |
5739 __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); | |
5740 EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable); | |
5741 | |
5742 } else if (type_name->Equals(heap()->function_string())) { | |
5743 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); | |
5744 ASSERT(instr->temp1() != NULL); | |
5745 Register type = ToRegister(instr->temp1()); | |
5746 | |
5747 __ JumpIfSmi(value, false_label); | |
5748 __ JumpIfObjectType(value, type, type, JS_FUNCTION_TYPE, true_label); | |
5749 // HeapObject's type has been loaded into type register by JumpIfObjectType. | |
5750 EmitCompareAndBranch(instr, eq, type, JS_FUNCTION_PROXY_TYPE); | |
5751 | |
5752 } else if (type_name->Equals(heap()->object_string())) { | |
5753 ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL)); | |
5754 Register map = ToRegister(instr->temp1()); | |
5755 Register scratch = ToRegister(instr->temp2()); | |
5756 | |
5757 __ JumpIfSmi(value, false_label); | |
5758 if (!FLAG_harmony_typeof) { | |
5759 __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label); | |
5760 } | |
5761 __ JumpIfObjectType(value, map, scratch, | |
5762 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label, lt); | |
5763 __ CompareInstanceType(map, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); | |
5764 __ B(gt, false_label); | |
5765 // Check for undetectable objects => false. | |
5766 __ Ldrb(scratch, FieldMemOperand(value, Map::kBitFieldOffset)); | |
5767 EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable); | |
5768 | |
5769 } else { | |
5770 __ B(false_label); | |
5771 } | |
5772 } | |
5773 | |
5774 | |
5775 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { | |
5776 __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value())); | |
5777 } | |
5778 | |
5779 | |
5780 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { | |
5781 Register object = ToRegister(instr->value()); | |
5782 Register map = ToRegister(instr->map()); | |
5783 Register temp = ToRegister(instr->temp()); | |
5784 __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); | |
5785 __ Cmp(map, temp); | |
5786 DeoptimizeIf(ne, instr->environment()); | |
5787 } | |
5788 | |
5789 | |
5790 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { | |
5791 Register receiver = ToRegister(instr->receiver()); | |
5792 Register function = ToRegister(instr->function()); | |
5793 Register result = ToRegister(instr->result()); | |
5794 | |
5795 // If the receiver is null or undefined, we have to pass the global object as | |
5796 // a receiver to normal functions. Values have to be passed unchanged to | |
5797 // builtins and strict-mode functions. | |
5798 Label global_object, done, deopt; | |
5799 | |
5800 if (!instr->hydrogen()->known_function()) { | |
5801 __ Ldr(result, FieldMemOperand(function, | |
5802 JSFunction::kSharedFunctionInfoOffset)); | |
5803 | |
5804 // CompilerHints is an int32 field. See objects.h. | |
5805 __ Ldr(result.W(), | |
5806 FieldMemOperand(result, SharedFunctionInfo::kCompilerHintsOffset)); | |
5807 | |
5808 // Do not transform the receiver to object for strict mode functions. | |
5809 __ Tbnz(result, SharedFunctionInfo::kStrictModeFunction, &done); | |
5810 | |
5811 // Do not transform the receiver to object for builtins. | |
5812 __ Tbnz(result, SharedFunctionInfo::kNative, &done); | |
5813 } | |
5814 | |
5815 // Normal function. Replace undefined or null with global receiver. | |
5816 __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object); | |
5817 __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object); | |
5818 | |
5819 // Deoptimize if the receiver is not a JS object. | |
5820 __ JumpIfSmi(receiver, &deopt); | |
5821 __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE); | |
5822 __ Mov(result, receiver); | |
5823 __ B(ge, &done); | |
5824 // Otherwise, fall through to deopt. | |
5825 | |
5826 __ Bind(&deopt); | |
5827 Deoptimize(instr->environment()); | |
5828 | |
5829 __ Bind(&global_object); | |
5830 __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset)); | |
5831 __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX)); | |
5832 __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset)); | |
5833 | |
5834 __ Bind(&done); | |
5835 } | |
5836 | |
5837 | |
5838 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { | |
5839 Register object = ToRegister(instr->object()); | |
5840 Register index = ToRegister(instr->index()); | |
5841 Register result = ToRegister(instr->result()); | |
5842 | |
5843 __ AssertSmi(index); | |
5844 | |
5845 Label out_of_object, done; | |
5846 __ Cmp(index, Smi::FromInt(0)); | |
5847 __ B(lt, &out_of_object); | |
5848 | |
5849 STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize); | |
5850 __ Add(result, object, Operand::UntagSmiAndScale(index, kPointerSizeLog2)); | |
5851 __ Ldr(result, FieldMemOperand(result, JSObject::kHeaderSize)); | |
5852 | |
5853 __ B(&done); | |
5854 | |
5855 __ Bind(&out_of_object); | |
5856 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); | |
5857 // Index is equal to negated out of object property index plus 1. | |
5858 __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2)); | |
5859 __ Ldr(result, FieldMemOperand(result, | |
5860 FixedArray::kHeaderSize - kPointerSize)); | |
5861 __ Bind(&done); | |
5862 } | |
5863 | |
5864 } } // namespace v8::internal | |
OLD | NEW |