Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(363)

Side by Side Diff: src/interpreter/interpreter.cc

Issue 2765433003: [interpreter] Split bytecode generation out of interpreter.cc (Closed)
Patch Set: addressed nits Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/interpreter/interpreter.h ('k') | src/interpreter/interpreter-generator.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2015 the V8 project authors. All rights reserved. 1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/interpreter/interpreter.h" 5 #include "src/interpreter/interpreter.h"
6 6
7 #include <array>
8 #include <fstream> 7 #include <fstream>
9 #include <memory> 8 #include <memory>
10 9
11 #include "src/ast/prettyprinter.h" 10 #include "src/codegen.h"
12 #include "src/builtins/builtins-arguments.h"
13 #include "src/builtins/builtins-constructor.h"
14 #include "src/builtins/builtins-forin.h"
15 #include "src/code-factory.h"
16 #include "src/compilation-info.h" 11 #include "src/compilation-info.h"
17 #include "src/compiler.h" 12 #include "src/compiler.h"
18 #include "src/counters.h" 13 #include "src/counters.h"
19 #include "src/debug/debug.h"
20 #include "src/factory.h"
21 #include "src/ic/accessor-assembler.h"
22 #include "src/interpreter/bytecode-flags.h"
23 #include "src/interpreter/bytecode-generator.h" 14 #include "src/interpreter/bytecode-generator.h"
24 #include "src/interpreter/bytecodes.h" 15 #include "src/interpreter/bytecodes.h"
25 #include "src/interpreter/interpreter-assembler.h" 16 #include "src/interpreter/interpreter-generator.h"
26 #include "src/interpreter/interpreter-intrinsics.h"
27 #include "src/log.h" 17 #include "src/log.h"
28 #include "src/objects-inl.h" 18 #include "src/objects.h"
29 #include "src/zone/zone.h"
30 19
31 namespace v8 { 20 namespace v8 {
32 namespace internal { 21 namespace internal {
33 namespace interpreter { 22 namespace interpreter {
34 23
35 using compiler::Node;
36 typedef CodeStubAssembler::Label Label;
37 typedef CodeStubAssembler::Variable Variable;
38
39 #define __ assembler->
40
41 class InterpreterCompilationJob final : public CompilationJob { 24 class InterpreterCompilationJob final : public CompilationJob {
42 public: 25 public:
43 explicit InterpreterCompilationJob(CompilationInfo* info); 26 explicit InterpreterCompilationJob(CompilationInfo* info);
44 27
45 protected: 28 protected:
46 Status PrepareJobImpl() final; 29 Status PrepareJobImpl() final;
47 Status ExecuteJobImpl() final; 30 Status ExecuteJobImpl() final;
48 Status FinalizeJobImpl() final; 31 Status FinalizeJobImpl() final;
49 32
50 private: 33 private:
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
87 70
88 DISALLOW_COPY_AND_ASSIGN(InterpreterCompilationJob); 71 DISALLOW_COPY_AND_ASSIGN(InterpreterCompilationJob);
89 }; 72 };
90 73
91 Interpreter::Interpreter(Isolate* isolate) : isolate_(isolate) { 74 Interpreter::Interpreter(Isolate* isolate) : isolate_(isolate) {
92 memset(dispatch_table_, 0, sizeof(dispatch_table_)); 75 memset(dispatch_table_, 0, sizeof(dispatch_table_));
93 } 76 }
94 77
95 void Interpreter::Initialize() { 78 void Interpreter::Initialize() {
96 if (!ShouldInitializeDispatchTable()) return; 79 if (!ShouldInitializeDispatchTable()) return;
97 Zone zone(isolate_->allocator(), ZONE_NAME);
98 HandleScope scope(isolate_); 80 HandleScope scope(isolate_);
99 81
100 if (FLAG_trace_ignition_dispatches) { 82 if (FLAG_trace_ignition_dispatches) {
101 static const int kBytecodeCount = static_cast<int>(Bytecode::kLast) + 1; 83 static const int kBytecodeCount = static_cast<int>(Bytecode::kLast) + 1;
102 bytecode_dispatch_counters_table_.reset( 84 bytecode_dispatch_counters_table_.reset(
103 new uintptr_t[kBytecodeCount * kBytecodeCount]); 85 new uintptr_t[kBytecodeCount * kBytecodeCount]);
104 memset(bytecode_dispatch_counters_table_.get(), 0, 86 memset(bytecode_dispatch_counters_table_.get(), 0,
105 sizeof(uintptr_t) * kBytecodeCount * kBytecodeCount); 87 sizeof(uintptr_t) * kBytecodeCount * kBytecodeCount);
106 } 88 }
107 89
108 // Generate bytecode handlers for all bytecodes and scales. 90 // Generate bytecode handlers for all bytecodes and scales.
109 const OperandScale kOperandScales[] = { 91 const OperandScale kOperandScales[] = {
110 #define VALUE(Name, _) OperandScale::k##Name, 92 #define VALUE(Name, _) OperandScale::k##Name,
111 OPERAND_SCALE_LIST(VALUE) 93 OPERAND_SCALE_LIST(VALUE)
112 #undef VALUE 94 #undef VALUE
113 }; 95 };
114 96
115 for (OperandScale operand_scale : kOperandScales) { 97 for (OperandScale operand_scale : kOperandScales) {
116 #define GENERATE_CODE(Name, ...) \ 98 #define GENERATE_CODE(Name, ...) \
117 InstallBytecodeHandler(&zone, Bytecode::k##Name, operand_scale, \ 99 InstallBytecodeHandler(isolate_, Bytecode::k##Name, operand_scale);
118 &Interpreter::Do##Name);
119 BYTECODE_LIST(GENERATE_CODE) 100 BYTECODE_LIST(GENERATE_CODE)
120 #undef GENERATE_CODE 101 #undef GENERATE_CODE
121 } 102 }
122 103
123 // Fill unused entries will the illegal bytecode handler. 104 // Fill unused entries will the illegal bytecode handler.
124 size_t illegal_index = 105 size_t illegal_index =
125 GetDispatchTableIndex(Bytecode::kIllegal, OperandScale::kSingle); 106 GetDispatchTableIndex(Bytecode::kIllegal, OperandScale::kSingle);
126 for (size_t index = 0; index < arraysize(dispatch_table_); ++index) { 107 for (size_t index = 0; index < arraysize(dispatch_table_); ++index) {
127 if (dispatch_table_[index] == nullptr) { 108 if (dispatch_table_[index] == nullptr) {
128 dispatch_table_[index] = dispatch_table_[illegal_index]; 109 dispatch_table_[index] = dispatch_table_[illegal_index];
(...skipping 26 matching lines...) Expand all
155 CHECK_LT(offset, index); 136 CHECK_LT(offset, index);
156 dispatch_table_[index] = dispatch_table_[index - offset]; 137 dispatch_table_[index] = dispatch_table_[index - offset];
157 return true; 138 return true;
158 break; 139 break;
159 } 140 }
160 default: 141 default:
161 return false; 142 return false;
162 } 143 }
163 } 144 }
164 145
165 void Interpreter::InstallBytecodeHandler(Zone* zone, Bytecode bytecode, 146 void Interpreter::InstallBytecodeHandler(Isolate* isolate, Bytecode bytecode,
166 OperandScale operand_scale, 147 OperandScale operand_scale) {
167 BytecodeGeneratorFunc generator) {
168 if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return; 148 if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return;
169 if (ReuseExistingHandler(bytecode, operand_scale)) return; 149 if (ReuseExistingHandler(bytecode, operand_scale)) return;
170 150
171 size_t index = GetDispatchTableIndex(bytecode, operand_scale); 151 size_t index = GetDispatchTableIndex(bytecode, operand_scale);
172 InterpreterDispatchDescriptor descriptor(isolate_); 152 Handle<Code> code = GenerateBytecodeHandler(isolate, bytecode, operand_scale);
173 compiler::CodeAssemblerState state(
174 isolate_, zone, descriptor, Code::ComputeFlags(Code::BYTECODE_HANDLER),
175 Bytecodes::ToString(bytecode), Bytecodes::ReturnCount(bytecode));
176 InterpreterAssembler assembler(&state, bytecode, operand_scale);
177 if (Bytecodes::MakesCallAlongCriticalPath(bytecode)) {
178 assembler.SaveBytecodeOffset();
179 }
180 (this->*generator)(&assembler);
181 Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
182 dispatch_table_[index] = code->entry(); 153 dispatch_table_[index] = code->entry();
183 TraceCodegen(code);
184 PROFILE(isolate_, CodeCreateEvent(
185 CodeEventListener::BYTECODE_HANDLER_TAG,
186 AbstractCode::cast(*code),
187 Bytecodes::ToString(bytecode, operand_scale).c_str()));
188 } 154 }
189 155
190 Code* Interpreter::GetBytecodeHandler(Bytecode bytecode, 156 Code* Interpreter::GetBytecodeHandler(Bytecode bytecode,
191 OperandScale operand_scale) { 157 OperandScale operand_scale) {
192 DCHECK(IsDispatchTableInitialized()); 158 DCHECK(IsDispatchTableInitialized());
193 DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale)); 159 DCHECK(Bytecodes::BytecodeHasHandler(bytecode, operand_scale));
194 size_t index = GetDispatchTableIndex(bytecode, operand_scale); 160 size_t index = GetDispatchTableIndex(bytecode, operand_scale);
195 Address code_entry = dispatch_table_[index]; 161 Address code_entry = dispatch_table_[index];
196 return Code::GetCodeFromTargetAddress(code_entry); 162 return Code::GetCodeFromTargetAddress(code_entry);
197 } 163 }
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
322 bool Interpreter::ShouldInitializeDispatchTable() { 288 bool Interpreter::ShouldInitializeDispatchTable() {
323 if (FLAG_trace_ignition || FLAG_trace_ignition_codegen || 289 if (FLAG_trace_ignition || FLAG_trace_ignition_codegen ||
324 FLAG_trace_ignition_dispatches) { 290 FLAG_trace_ignition_dispatches) {
325 // Regenerate table to add bytecode tracing operations, print the assembly 291 // Regenerate table to add bytecode tracing operations, print the assembly
326 // code generated by TurboFan or instrument handlers with dispatch counters. 292 // code generated by TurboFan or instrument handlers with dispatch counters.
327 return true; 293 return true;
328 } 294 }
329 return !IsDispatchTableInitialized(); 295 return !IsDispatchTableInitialized();
330 } 296 }
331 297
332 void Interpreter::TraceCodegen(Handle<Code> code) {
333 #ifdef ENABLE_DISASSEMBLER
334 if (FLAG_trace_ignition_codegen) {
335 OFStream os(stdout);
336 code->Disassemble(nullptr, os);
337 os << std::flush;
338 }
339 #endif // ENABLE_DISASSEMBLER
340 }
341
342 const char* Interpreter::LookupNameOfBytecodeHandler(Code* code) { 298 const char* Interpreter::LookupNameOfBytecodeHandler(Code* code) {
343 #ifdef ENABLE_DISASSEMBLER 299 #ifdef ENABLE_DISASSEMBLER
344 #define RETURN_NAME(Name, ...) \ 300 #define RETURN_NAME(Name, ...) \
345 if (dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] == \ 301 if (dispatch_table_[Bytecodes::ToByte(Bytecode::k##Name)] == \
346 code->entry()) { \ 302 code->entry()) { \
347 return #Name; \ 303 return #Name; \
348 } 304 }
349 BYTECODE_LIST(RETURN_NAME) 305 BYTECODE_LIST(RETURN_NAME)
350 #undef RETURN_NAME 306 #undef RETURN_NAME
351 #endif // ENABLE_DISASSEMBLER 307 #endif // ENABLE_DISASSEMBLER
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
404 .ToLocalChecked(); 360 .ToLocalChecked();
405 361
406 CHECK( 362 CHECK(
407 counters_map->DefineOwnProperty(context, from_name_object, counters_row) 363 counters_map->DefineOwnProperty(context, from_name_object, counters_row)
408 .IsJust()); 364 .IsJust());
409 } 365 }
410 366
411 return counters_map; 367 return counters_map;
412 } 368 }
413 369
414 // LdaZero
415 //
416 // Load literal '0' into the accumulator.
417 void Interpreter::DoLdaZero(InterpreterAssembler* assembler) {
418 Node* zero_value = __ NumberConstant(0.0);
419 __ SetAccumulator(zero_value);
420 __ Dispatch();
421 }
422
423 // LdaSmi <imm>
424 //
425 // Load an integer literal into the accumulator as a Smi.
426 void Interpreter::DoLdaSmi(InterpreterAssembler* assembler) {
427 Node* smi_int = __ BytecodeOperandImmSmi(0);
428 __ SetAccumulator(smi_int);
429 __ Dispatch();
430 }
431
432 // LdaConstant <idx>
433 //
434 // Load constant literal at |idx| in the constant pool into the accumulator.
435 void Interpreter::DoLdaConstant(InterpreterAssembler* assembler) {
436 Node* index = __ BytecodeOperandIdx(0);
437 Node* constant = __ LoadConstantPoolEntry(index);
438 __ SetAccumulator(constant);
439 __ Dispatch();
440 }
441
442 // LdaUndefined
443 //
444 // Load Undefined into the accumulator.
445 void Interpreter::DoLdaUndefined(InterpreterAssembler* assembler) {
446 Node* undefined_value =
447 __ HeapConstant(isolate_->factory()->undefined_value());
448 __ SetAccumulator(undefined_value);
449 __ Dispatch();
450 }
451
452 // LdaNull
453 //
454 // Load Null into the accumulator.
455 void Interpreter::DoLdaNull(InterpreterAssembler* assembler) {
456 Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
457 __ SetAccumulator(null_value);
458 __ Dispatch();
459 }
460
461 // LdaTheHole
462 //
463 // Load TheHole into the accumulator.
464 void Interpreter::DoLdaTheHole(InterpreterAssembler* assembler) {
465 Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
466 __ SetAccumulator(the_hole_value);
467 __ Dispatch();
468 }
469
470 // LdaTrue
471 //
472 // Load True into the accumulator.
473 void Interpreter::DoLdaTrue(InterpreterAssembler* assembler) {
474 Node* true_value = __ HeapConstant(isolate_->factory()->true_value());
475 __ SetAccumulator(true_value);
476 __ Dispatch();
477 }
478
479 // LdaFalse
480 //
481 // Load False into the accumulator.
482 void Interpreter::DoLdaFalse(InterpreterAssembler* assembler) {
483 Node* false_value = __ HeapConstant(isolate_->factory()->false_value());
484 __ SetAccumulator(false_value);
485 __ Dispatch();
486 }
487
488 // Ldar <src>
489 //
490 // Load accumulator with value from register <src>.
491 void Interpreter::DoLdar(InterpreterAssembler* assembler) {
492 Node* reg_index = __ BytecodeOperandReg(0);
493 Node* value = __ LoadRegister(reg_index);
494 __ SetAccumulator(value);
495 __ Dispatch();
496 }
497
498 // Star <dst>
499 //
500 // Store accumulator to register <dst>.
501 void Interpreter::DoStar(InterpreterAssembler* assembler) {
502 Node* reg_index = __ BytecodeOperandReg(0);
503 Node* accumulator = __ GetAccumulator();
504 __ StoreRegister(accumulator, reg_index);
505 __ Dispatch();
506 }
507
508 // Mov <src> <dst>
509 //
510 // Stores the value of register <src> to register <dst>.
511 void Interpreter::DoMov(InterpreterAssembler* assembler) {
512 Node* src_index = __ BytecodeOperandReg(0);
513 Node* src_value = __ LoadRegister(src_index);
514 Node* dst_index = __ BytecodeOperandReg(1);
515 __ StoreRegister(src_value, dst_index);
516 __ Dispatch();
517 }
518
519 void Interpreter::BuildLoadGlobalIC(int slot_operand_index,
520 int name_operand_index,
521 TypeofMode typeof_mode,
522 InterpreterAssembler* assembler) {
523 // Must be kept in sync with AccessorAssembler::LoadGlobalIC.
524
525 // Load the global via the LoadGlobalIC.
526 Node* feedback_vector = __ LoadFeedbackVector();
527 Node* feedback_slot = __ BytecodeOperandIdx(slot_operand_index);
528
529 AccessorAssembler accessor_asm(assembler->state());
530
531 Label try_handler(assembler, Label::kDeferred),
532 miss(assembler, Label::kDeferred);
533
534 // Fast path without frame construction for the data case.
535 {
536 Label done(assembler);
537 Variable var_result(assembler, MachineRepresentation::kTagged);
538 ExitPoint exit_point(assembler, &done, &var_result);
539
540 accessor_asm.LoadGlobalIC_TryPropertyCellCase(
541 feedback_vector, feedback_slot, &exit_point, &try_handler, &miss,
542 CodeStubAssembler::INTPTR_PARAMETERS);
543
544 __ Bind(&done);
545 __ SetAccumulator(var_result.value());
546 __ Dispatch();
547 }
548
549 // Slow path with frame construction.
550 {
551 Label done(assembler);
552 Variable var_result(assembler, MachineRepresentation::kTagged);
553 ExitPoint exit_point(assembler, &done, &var_result);
554
555 __ Bind(&try_handler);
556 {
557 Node* context = __ GetContext();
558 Node* smi_slot = __ SmiTag(feedback_slot);
559 Node* name_index = __ BytecodeOperandIdx(name_operand_index);
560 Node* name = __ LoadConstantPoolEntry(name_index);
561
562 AccessorAssembler::LoadICParameters params(context, nullptr, name,
563 smi_slot, feedback_vector);
564 accessor_asm.LoadGlobalIC_TryHandlerCase(&params, typeof_mode,
565 &exit_point, &miss);
566 }
567
568 __ Bind(&miss);
569 {
570 Node* context = __ GetContext();
571 Node* smi_slot = __ SmiTag(feedback_slot);
572 Node* name_index = __ BytecodeOperandIdx(name_operand_index);
573 Node* name = __ LoadConstantPoolEntry(name_index);
574
575 AccessorAssembler::LoadICParameters params(context, nullptr, name,
576 smi_slot, feedback_vector);
577 accessor_asm.LoadGlobalIC_MissCase(&params, &exit_point);
578 }
579
580 __ Bind(&done);
581 {
582 __ SetAccumulator(var_result.value());
583 __ Dispatch();
584 }
585 }
586 }
587
588 // LdaGlobal <name_index> <slot>
589 //
590 // Load the global with name in constant pool entry <name_index> into the
591 // accumulator using FeedBackVector slot <slot> outside of a typeof.
592 void Interpreter::DoLdaGlobal(InterpreterAssembler* assembler) {
593 static const int kNameOperandIndex = 0;
594 static const int kSlotOperandIndex = 1;
595
596 BuildLoadGlobalIC(kSlotOperandIndex, kNameOperandIndex, NOT_INSIDE_TYPEOF,
597 assembler);
598 }
599
600 // LdaGlobalInsideTypeof <name_index> <slot>
601 //
602 // Load the global with name in constant pool entry <name_index> into the
603 // accumulator using FeedBackVector slot <slot> inside of a typeof.
604 void Interpreter::DoLdaGlobalInsideTypeof(InterpreterAssembler* assembler) {
605 static const int kNameOperandIndex = 0;
606 static const int kSlotOperandIndex = 1;
607
608 BuildLoadGlobalIC(kSlotOperandIndex, kNameOperandIndex, INSIDE_TYPEOF,
609 assembler);
610 }
611
612 void Interpreter::DoStaGlobal(Callable ic, InterpreterAssembler* assembler) {
613 // Get the global object.
614 Node* context = __ GetContext();
615 Node* native_context = __ LoadNativeContext(context);
616 Node* global =
617 __ LoadContextElement(native_context, Context::EXTENSION_INDEX);
618
619 // Store the global via the StoreIC.
620 Node* code_target = __ HeapConstant(ic.code());
621 Node* constant_index = __ BytecodeOperandIdx(0);
622 Node* name = __ LoadConstantPoolEntry(constant_index);
623 Node* value = __ GetAccumulator();
624 Node* raw_slot = __ BytecodeOperandIdx(1);
625 Node* smi_slot = __ SmiTag(raw_slot);
626 Node* feedback_vector = __ LoadFeedbackVector();
627 __ CallStub(ic.descriptor(), code_target, context, global, name, value,
628 smi_slot, feedback_vector);
629 __ Dispatch();
630 }
631
632 // StaGlobalSloppy <name_index> <slot>
633 //
634 // Store the value in the accumulator into the global with name in constant pool
635 // entry <name_index> using FeedBackVector slot <slot> in sloppy mode.
636 void Interpreter::DoStaGlobalSloppy(InterpreterAssembler* assembler) {
637 Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY);
638 DoStaGlobal(ic, assembler);
639 }
640
641 // StaGlobalStrict <name_index> <slot>
642 //
643 // Store the value in the accumulator into the global with name in constant pool
644 // entry <name_index> using FeedBackVector slot <slot> in strict mode.
645 void Interpreter::DoStaGlobalStrict(InterpreterAssembler* assembler) {
646 Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, STRICT);
647 DoStaGlobal(ic, assembler);
648 }
649
650 // LdaContextSlot <context> <slot_index> <depth>
651 //
652 // Load the object in |slot_index| of the context at |depth| in the context
653 // chain starting at |context| into the accumulator.
654 void Interpreter::DoLdaContextSlot(InterpreterAssembler* assembler) {
655 Node* reg_index = __ BytecodeOperandReg(0);
656 Node* context = __ LoadRegister(reg_index);
657 Node* slot_index = __ BytecodeOperandIdx(1);
658 Node* depth = __ BytecodeOperandUImm(2);
659 Node* slot_context = __ GetContextAtDepth(context, depth);
660 Node* result = __ LoadContextElement(slot_context, slot_index);
661 __ SetAccumulator(result);
662 __ Dispatch();
663 }
664
665 // LdaImmutableContextSlot <context> <slot_index> <depth>
666 //
667 // Load the object in |slot_index| of the context at |depth| in the context
668 // chain starting at |context| into the accumulator.
669 void Interpreter::DoLdaImmutableContextSlot(InterpreterAssembler* assembler) {
670 // TODO(danno) Share the actual code object rather creating a duplicate one.
671 DoLdaContextSlot(assembler);
672 }
673
674 // LdaCurrentContextSlot <slot_index>
675 //
676 // Load the object in |slot_index| of the current context into the accumulator.
677 void Interpreter::DoLdaCurrentContextSlot(InterpreterAssembler* assembler) {
678 Node* slot_index = __ BytecodeOperandIdx(0);
679 Node* slot_context = __ GetContext();
680 Node* result = __ LoadContextElement(slot_context, slot_index);
681 __ SetAccumulator(result);
682 __ Dispatch();
683 }
684
685 // LdaImmutableCurrentContextSlot <slot_index>
686 //
687 // Load the object in |slot_index| of the current context into the accumulator.
688 void Interpreter::DoLdaImmutableCurrentContextSlot(
689 InterpreterAssembler* assembler) {
690 // TODO(danno) Share the actual code object rather creating a duplicate one.
691 DoLdaCurrentContextSlot(assembler);
692 }
693
694 // StaContextSlot <context> <slot_index> <depth>
695 //
696 // Stores the object in the accumulator into |slot_index| of the context at
697 // |depth| in the context chain starting at |context|.
698 void Interpreter::DoStaContextSlot(InterpreterAssembler* assembler) {
699 Node* value = __ GetAccumulator();
700 Node* reg_index = __ BytecodeOperandReg(0);
701 Node* context = __ LoadRegister(reg_index);
702 Node* slot_index = __ BytecodeOperandIdx(1);
703 Node* depth = __ BytecodeOperandUImm(2);
704 Node* slot_context = __ GetContextAtDepth(context, depth);
705 __ StoreContextElement(slot_context, slot_index, value);
706 __ Dispatch();
707 }
708
709 // StaCurrentContextSlot <slot_index>
710 //
711 // Stores the object in the accumulator into |slot_index| of the current
712 // context.
713 void Interpreter::DoStaCurrentContextSlot(InterpreterAssembler* assembler) {
714 Node* value = __ GetAccumulator();
715 Node* slot_index = __ BytecodeOperandIdx(0);
716 Node* slot_context = __ GetContext();
717 __ StoreContextElement(slot_context, slot_index, value);
718 __ Dispatch();
719 }
720
721 void Interpreter::DoLdaLookupSlot(Runtime::FunctionId function_id,
722 InterpreterAssembler* assembler) {
723 Node* name_index = __ BytecodeOperandIdx(0);
724 Node* name = __ LoadConstantPoolEntry(name_index);
725 Node* context = __ GetContext();
726 Node* result = __ CallRuntime(function_id, context, name);
727 __ SetAccumulator(result);
728 __ Dispatch();
729 }
730
731 // LdaLookupSlot <name_index>
732 //
733 // Lookup the object with the name in constant pool entry |name_index|
734 // dynamically.
735 void Interpreter::DoLdaLookupSlot(InterpreterAssembler* assembler) {
736 DoLdaLookupSlot(Runtime::kLoadLookupSlot, assembler);
737 }
738
739 // LdaLookupSlotInsideTypeof <name_index>
740 //
741 // Lookup the object with the name in constant pool entry |name_index|
742 // dynamically without causing a NoReferenceError.
743 void Interpreter::DoLdaLookupSlotInsideTypeof(InterpreterAssembler* assembler) {
744 DoLdaLookupSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler);
745 }
746
747 void Interpreter::DoLdaLookupContextSlot(Runtime::FunctionId function_id,
748 InterpreterAssembler* assembler) {
749 Node* context = __ GetContext();
750 Node* name_index = __ BytecodeOperandIdx(0);
751 Node* slot_index = __ BytecodeOperandIdx(1);
752 Node* depth = __ BytecodeOperandUImm(2);
753
754 Label slowpath(assembler, Label::kDeferred);
755
756 // Check for context extensions to allow the fast path.
757 __ GotoIfHasContextExtensionUpToDepth(context, depth, &slowpath);
758
759 // Fast path does a normal load context.
760 {
761 Node* slot_context = __ GetContextAtDepth(context, depth);
762 Node* result = __ LoadContextElement(slot_context, slot_index);
763 __ SetAccumulator(result);
764 __ Dispatch();
765 }
766
767 // Slow path when we have to call out to the runtime.
768 __ Bind(&slowpath);
769 {
770 Node* name = __ LoadConstantPoolEntry(name_index);
771 Node* result = __ CallRuntime(function_id, context, name);
772 __ SetAccumulator(result);
773 __ Dispatch();
774 }
775 }
776
777 // LdaLookupSlot <name_index>
778 //
779 // Lookup the object with the name in constant pool entry |name_index|
780 // dynamically.
781 void Interpreter::DoLdaLookupContextSlot(InterpreterAssembler* assembler) {
782 DoLdaLookupContextSlot(Runtime::kLoadLookupSlot, assembler);
783 }
784
785 // LdaLookupSlotInsideTypeof <name_index>
786 //
787 // Lookup the object with the name in constant pool entry |name_index|
788 // dynamically without causing a NoReferenceError.
789 void Interpreter::DoLdaLookupContextSlotInsideTypeof(
790 InterpreterAssembler* assembler) {
791 DoLdaLookupContextSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler);
792 }
793
794 void Interpreter::DoLdaLookupGlobalSlot(Runtime::FunctionId function_id,
795 InterpreterAssembler* assembler) {
796 Node* context = __ GetContext();
797 Node* depth = __ BytecodeOperandUImm(2);
798
799 Label slowpath(assembler, Label::kDeferred);
800
801 // Check for context extensions to allow the fast path
802 __ GotoIfHasContextExtensionUpToDepth(context, depth, &slowpath);
803
804 // Fast path does a normal load global
805 {
806 static const int kNameOperandIndex = 0;
807 static const int kSlotOperandIndex = 1;
808
809 TypeofMode typeof_mode = function_id == Runtime::kLoadLookupSlotInsideTypeof
810 ? INSIDE_TYPEOF
811 : NOT_INSIDE_TYPEOF;
812
813 BuildLoadGlobalIC(kSlotOperandIndex, kNameOperandIndex, typeof_mode,
814 assembler);
815 }
816
817 // Slow path when we have to call out to the runtime
818 __ Bind(&slowpath);
819 {
820 Node* name_index = __ BytecodeOperandIdx(0);
821 Node* name = __ LoadConstantPoolEntry(name_index);
822 Node* result = __ CallRuntime(function_id, context, name);
823 __ SetAccumulator(result);
824 __ Dispatch();
825 }
826 }
827
828 // LdaLookupGlobalSlot <name_index> <feedback_slot> <depth>
829 //
830 // Lookup the object with the name in constant pool entry |name_index|
831 // dynamically.
832 void Interpreter::DoLdaLookupGlobalSlot(InterpreterAssembler* assembler) {
833 DoLdaLookupGlobalSlot(Runtime::kLoadLookupSlot, assembler);
834 }
835
836 // LdaLookupGlobalSlotInsideTypeof <name_index> <feedback_slot> <depth>
837 //
838 // Lookup the object with the name in constant pool entry |name_index|
839 // dynamically without causing a NoReferenceError.
840 void Interpreter::DoLdaLookupGlobalSlotInsideTypeof(
841 InterpreterAssembler* assembler) {
842 DoLdaLookupGlobalSlot(Runtime::kLoadLookupSlotInsideTypeof, assembler);
843 }
844
845 void Interpreter::DoStaLookupSlot(LanguageMode language_mode,
846 InterpreterAssembler* assembler) {
847 Node* value = __ GetAccumulator();
848 Node* index = __ BytecodeOperandIdx(0);
849 Node* name = __ LoadConstantPoolEntry(index);
850 Node* context = __ GetContext();
851 Node* result = __ CallRuntime(is_strict(language_mode)
852 ? Runtime::kStoreLookupSlot_Strict
853 : Runtime::kStoreLookupSlot_Sloppy,
854 context, name, value);
855 __ SetAccumulator(result);
856 __ Dispatch();
857 }
858
859 // StaLookupSlotSloppy <name_index>
860 //
861 // Store the object in accumulator to the object with the name in constant
862 // pool entry |name_index| in sloppy mode.
863 void Interpreter::DoStaLookupSlotSloppy(InterpreterAssembler* assembler) {
864 DoStaLookupSlot(LanguageMode::SLOPPY, assembler);
865 }
866
867 // StaLookupSlotStrict <name_index>
868 //
869 // Store the object in accumulator to the object with the name in constant
870 // pool entry |name_index| in strict mode.
871 void Interpreter::DoStaLookupSlotStrict(InterpreterAssembler* assembler) {
872 DoStaLookupSlot(LanguageMode::STRICT, assembler);
873 }
874
875 void Interpreter::BuildLoadIC(int recv_operand_index, int slot_operand_index,
876 int name_operand_index,
877 InterpreterAssembler* assembler) {
878 __ Comment("BuildLoadIC");
879
880 // Load vector and slot.
881 Node* feedback_vector = __ LoadFeedbackVector();
882 Node* feedback_slot = __ BytecodeOperandIdx(slot_operand_index);
883 Node* smi_slot = __ SmiTag(feedback_slot);
884
885 // Load receiver.
886 Node* register_index = __ BytecodeOperandReg(recv_operand_index);
887 Node* recv = __ LoadRegister(register_index);
888
889 // Load the name.
890 // TODO(jgruber): Not needed for monomorphic smi handler constant/field case.
891 Node* constant_index = __ BytecodeOperandIdx(name_operand_index);
892 Node* name = __ LoadConstantPoolEntry(constant_index);
893
894 Node* context = __ GetContext();
895
896 Label done(assembler);
897 Variable var_result(assembler, MachineRepresentation::kTagged);
898 ExitPoint exit_point(assembler, &done, &var_result);
899
900 AccessorAssembler::LoadICParameters params(context, recv, name, smi_slot,
901 feedback_vector);
902 AccessorAssembler accessor_asm(assembler->state());
903 accessor_asm.LoadIC_BytecodeHandler(&params, &exit_point);
904
905 __ Bind(&done);
906 {
907 __ SetAccumulator(var_result.value());
908 __ Dispatch();
909 }
910 }
911
912 // LdaNamedProperty <object> <name_index> <slot>
913 //
914 // Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
915 // constant pool entry <name_index>.
916 void Interpreter::DoLdaNamedProperty(InterpreterAssembler* assembler) {
917 static const int kRecvOperandIndex = 0;
918 static const int kNameOperandIndex = 1;
919 static const int kSlotOperandIndex = 2;
920
921 BuildLoadIC(kRecvOperandIndex, kSlotOperandIndex, kNameOperandIndex,
922 assembler);
923 }
924
925 // KeyedLoadIC <object> <slot>
926 //
927 // Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
928 // in the accumulator.
929 void Interpreter::DoLdaKeyedProperty(InterpreterAssembler* assembler) {
930 Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_);
931 Node* code_target = __ HeapConstant(ic.code());
932 Node* reg_index = __ BytecodeOperandReg(0);
933 Node* object = __ LoadRegister(reg_index);
934 Node* name = __ GetAccumulator();
935 Node* raw_slot = __ BytecodeOperandIdx(1);
936 Node* smi_slot = __ SmiTag(raw_slot);
937 Node* feedback_vector = __ LoadFeedbackVector();
938 Node* context = __ GetContext();
939 Node* result = __ CallStub(ic.descriptor(), code_target, context, object,
940 name, smi_slot, feedback_vector);
941 __ SetAccumulator(result);
942 __ Dispatch();
943 }
944
945 void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) {
946 Node* code_target = __ HeapConstant(ic.code());
947 Node* object_reg_index = __ BytecodeOperandReg(0);
948 Node* object = __ LoadRegister(object_reg_index);
949 Node* constant_index = __ BytecodeOperandIdx(1);
950 Node* name = __ LoadConstantPoolEntry(constant_index);
951 Node* value = __ GetAccumulator();
952 Node* raw_slot = __ BytecodeOperandIdx(2);
953 Node* smi_slot = __ SmiTag(raw_slot);
954 Node* feedback_vector = __ LoadFeedbackVector();
955 Node* context = __ GetContext();
956 __ CallStub(ic.descriptor(), code_target, context, object, name, value,
957 smi_slot, feedback_vector);
958 __ Dispatch();
959 }
960
961 // StaNamedPropertySloppy <object> <name_index> <slot>
962 //
963 // Calls the sloppy mode StoreIC at FeedBackVector slot <slot> for <object> and
964 // the name in constant pool entry <name_index> with the value in the
965 // accumulator.
966 void Interpreter::DoStaNamedPropertySloppy(InterpreterAssembler* assembler) {
967 Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY);
968 DoStoreIC(ic, assembler);
969 }
970
971 // StaNamedPropertyStrict <object> <name_index> <slot>
972 //
973 // Calls the strict mode StoreIC at FeedBackVector slot <slot> for <object> and
974 // the name in constant pool entry <name_index> with the value in the
975 // accumulator.
976 void Interpreter::DoStaNamedPropertyStrict(InterpreterAssembler* assembler) {
977 Callable ic = CodeFactory::StoreICInOptimizedCode(isolate_, STRICT);
978 DoStoreIC(ic, assembler);
979 }
980
981 // StaNamedOwnProperty <object> <name_index> <slot>
982 //
983 // Calls the StoreOwnIC at FeedBackVector slot <slot> for <object> and
984 // the name in constant pool entry <name_index> with the value in the
985 // accumulator.
986 void Interpreter::DoStaNamedOwnProperty(InterpreterAssembler* assembler) {
987 Callable ic = CodeFactory::StoreOwnICInOptimizedCode(isolate_);
988 DoStoreIC(ic, assembler);
989 }
990
991 void Interpreter::DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler) {
992 Node* code_target = __ HeapConstant(ic.code());
993 Node* object_reg_index = __ BytecodeOperandReg(0);
994 Node* object = __ LoadRegister(object_reg_index);
995 Node* name_reg_index = __ BytecodeOperandReg(1);
996 Node* name = __ LoadRegister(name_reg_index);
997 Node* value = __ GetAccumulator();
998 Node* raw_slot = __ BytecodeOperandIdx(2);
999 Node* smi_slot = __ SmiTag(raw_slot);
1000 Node* feedback_vector = __ LoadFeedbackVector();
1001 Node* context = __ GetContext();
1002 __ CallStub(ic.descriptor(), code_target, context, object, name, value,
1003 smi_slot, feedback_vector);
1004 __ Dispatch();
1005 }
1006
1007 // StaKeyedPropertySloppy <object> <key> <slot>
1008 //
1009 // Calls the sloppy mode KeyStoreIC at FeedBackVector slot <slot> for <object>
1010 // and the key <key> with the value in the accumulator.
1011 void Interpreter::DoStaKeyedPropertySloppy(InterpreterAssembler* assembler) {
1012 Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY);
1013 DoKeyedStoreIC(ic, assembler);
1014 }
1015
1016 // StaKeyedPropertyStrict <object> <key> <slot>
1017 //
1018 // Calls the strict mode KeyStoreIC at FeedBackVector slot <slot> for <object>
1019 // and the key <key> with the value in the accumulator.
1020 void Interpreter::DoStaKeyedPropertyStrict(InterpreterAssembler* assembler) {
1021 Callable ic = CodeFactory::KeyedStoreICInOptimizedCode(isolate_, STRICT);
1022 DoKeyedStoreIC(ic, assembler);
1023 }
1024
1025 // StaDataPropertyInLiteral <object> <name> <flags>
1026 //
1027 // Define a property <name> with value from the accumulator in <object>.
1028 // Property attributes and whether set_function_name are stored in
1029 // DataPropertyInLiteralFlags <flags>.
1030 //
1031 // This definition is not observable and is used only for definitions
1032 // in object or class literals.
1033 void Interpreter::DoStaDataPropertyInLiteral(InterpreterAssembler* assembler) {
1034 Node* object = __ LoadRegister(__ BytecodeOperandReg(0));
1035 Node* name = __ LoadRegister(__ BytecodeOperandReg(1));
1036 Node* value = __ GetAccumulator();
1037 Node* flags = __ SmiFromWord32(__ BytecodeOperandFlag(2));
1038 Node* vector_index = __ SmiTag(__ BytecodeOperandIdx(3));
1039
1040 Node* feedback_vector = __ LoadFeedbackVector();
1041 Node* context = __ GetContext();
1042
1043 __ CallRuntime(Runtime::kDefineDataPropertyInLiteral, context, object, name,
1044 value, flags, feedback_vector, vector_index);
1045 __ Dispatch();
1046 }
1047
1048 void Interpreter::DoCollectTypeProfile(InterpreterAssembler* assembler) {
1049 Node* name = __ LoadRegister(__ BytecodeOperandReg(0));
1050 Node* value = __ GetAccumulator();
1051 Node* vector_index = __ SmiTag(__ BytecodeOperandIdx(1));
1052
1053 Node* feedback_vector = __ LoadFeedbackVector();
1054 Node* context = __ GetContext();
1055
1056 __ CallRuntime(Runtime::kCollectTypeProfile, context, name, value,
1057 feedback_vector, vector_index);
1058 __ Dispatch();
1059 }
1060
1061 // LdaModuleVariable <cell_index> <depth>
1062 //
1063 // Load the contents of a module variable into the accumulator. The variable is
1064 // identified by <cell_index>. <depth> is the depth of the current context
1065 // relative to the module context.
1066 void Interpreter::DoLdaModuleVariable(InterpreterAssembler* assembler) {
1067 Node* cell_index = __ BytecodeOperandImmIntPtr(0);
1068 Node* depth = __ BytecodeOperandUImm(1);
1069
1070 Node* module_context = __ GetContextAtDepth(__ GetContext(), depth);
1071 Node* module =
1072 __ LoadContextElement(module_context, Context::EXTENSION_INDEX);
1073
1074 Label if_export(assembler), if_import(assembler), end(assembler);
1075 __ Branch(__ IntPtrGreaterThan(cell_index, __ IntPtrConstant(0)), &if_export,
1076 &if_import);
1077
1078 __ Bind(&if_export);
1079 {
1080 Node* regular_exports =
1081 __ LoadObjectField(module, Module::kRegularExportsOffset);
1082 // The actual array index is (cell_index - 1).
1083 Node* export_index = __ IntPtrSub(cell_index, __ IntPtrConstant(1));
1084 Node* cell = __ LoadFixedArrayElement(regular_exports, export_index);
1085 __ SetAccumulator(__ LoadObjectField(cell, Cell::kValueOffset));
1086 __ Goto(&end);
1087 }
1088
1089 __ Bind(&if_import);
1090 {
1091 Node* regular_imports =
1092 __ LoadObjectField(module, Module::kRegularImportsOffset);
1093 // The actual array index is (-cell_index - 1).
1094 Node* import_index = __ IntPtrSub(__ IntPtrConstant(-1), cell_index);
1095 Node* cell = __ LoadFixedArrayElement(regular_imports, import_index);
1096 __ SetAccumulator(__ LoadObjectField(cell, Cell::kValueOffset));
1097 __ Goto(&end);
1098 }
1099
1100 __ Bind(&end);
1101 __ Dispatch();
1102 }
1103
1104 // StaModuleVariable <cell_index> <depth>
1105 //
1106 // Store accumulator to the module variable identified by <cell_index>.
1107 // <depth> is the depth of the current context relative to the module context.
1108 void Interpreter::DoStaModuleVariable(InterpreterAssembler* assembler) {
1109 Node* value = __ GetAccumulator();
1110 Node* cell_index = __ BytecodeOperandImmIntPtr(0);
1111 Node* depth = __ BytecodeOperandUImm(1);
1112
1113 Node* module_context = __ GetContextAtDepth(__ GetContext(), depth);
1114 Node* module =
1115 __ LoadContextElement(module_context, Context::EXTENSION_INDEX);
1116
1117 Label if_export(assembler), if_import(assembler), end(assembler);
1118 __ Branch(__ IntPtrGreaterThan(cell_index, __ IntPtrConstant(0)), &if_export,
1119 &if_import);
1120
1121 __ Bind(&if_export);
1122 {
1123 Node* regular_exports =
1124 __ LoadObjectField(module, Module::kRegularExportsOffset);
1125 // The actual array index is (cell_index - 1).
1126 Node* export_index = __ IntPtrSub(cell_index, __ IntPtrConstant(1));
1127 Node* cell = __ LoadFixedArrayElement(regular_exports, export_index);
1128 __ StoreObjectField(cell, Cell::kValueOffset, value);
1129 __ Goto(&end);
1130 }
1131
1132 __ Bind(&if_import);
1133 {
1134 // Not supported (probably never).
1135 __ Abort(kUnsupportedModuleOperation);
1136 __ Goto(&end);
1137 }
1138
1139 __ Bind(&end);
1140 __ Dispatch();
1141 }
1142
1143 // PushContext <context>
1144 //
1145 // Saves the current context in <context>, and pushes the accumulator as the
1146 // new current context.
1147 void Interpreter::DoPushContext(InterpreterAssembler* assembler) {
1148 Node* reg_index = __ BytecodeOperandReg(0);
1149 Node* new_context = __ GetAccumulator();
1150 Node* old_context = __ GetContext();
1151 __ StoreRegister(old_context, reg_index);
1152 __ SetContext(new_context);
1153 __ Dispatch();
1154 }
1155
1156 // PopContext <context>
1157 //
1158 // Pops the current context and sets <context> as the new context.
1159 void Interpreter::DoPopContext(InterpreterAssembler* assembler) {
1160 Node* reg_index = __ BytecodeOperandReg(0);
1161 Node* context = __ LoadRegister(reg_index);
1162 __ SetContext(context);
1163 __ Dispatch();
1164 }
1165
1166 // TODO(mythria): Remove this function once all CompareOps record type feedback.
1167 void Interpreter::DoCompareOp(Token::Value compare_op,
1168 InterpreterAssembler* assembler) {
1169 Node* reg_index = __ BytecodeOperandReg(0);
1170 Node* lhs = __ LoadRegister(reg_index);
1171 Node* rhs = __ GetAccumulator();
1172 Node* context = __ GetContext();
1173 Node* result;
1174 switch (compare_op) {
1175 case Token::IN:
1176 result = assembler->HasProperty(rhs, lhs, context);
1177 break;
1178 case Token::INSTANCEOF:
1179 result = assembler->InstanceOf(lhs, rhs, context);
1180 break;
1181 default:
1182 UNREACHABLE();
1183 }
1184 __ SetAccumulator(result);
1185 __ Dispatch();
1186 }
1187
1188 template <class Generator>
1189 void Interpreter::DoBinaryOpWithFeedback(InterpreterAssembler* assembler) {
1190 Node* reg_index = __ BytecodeOperandReg(0);
1191 Node* lhs = __ LoadRegister(reg_index);
1192 Node* rhs = __ GetAccumulator();
1193 Node* context = __ GetContext();
1194 Node* slot_index = __ BytecodeOperandIdx(1);
1195 Node* feedback_vector = __ LoadFeedbackVector();
1196 Node* result = Generator::Generate(assembler, lhs, rhs, slot_index,
1197 feedback_vector, context);
1198 __ SetAccumulator(result);
1199 __ Dispatch();
1200 }
1201
1202 void Interpreter::DoCompareOpWithFeedback(Token::Value compare_op,
1203 InterpreterAssembler* assembler) {
1204 Node* reg_index = __ BytecodeOperandReg(0);
1205 Node* lhs = __ LoadRegister(reg_index);
1206 Node* rhs = __ GetAccumulator();
1207 Node* context = __ GetContext();
1208 Node* slot_index = __ BytecodeOperandIdx(1);
1209 Node* feedback_vector = __ LoadFeedbackVector();
1210
1211 // TODO(interpreter): the only reason this check is here is because we
1212 // sometimes emit comparisons that shouldn't collect feedback (e.g.
1213 // try-finally blocks and generators), and we could get rid of this by
1214 // introducing Smi equality tests.
1215 Label gather_type_feedback(assembler), do_compare(assembler);
1216 __ Branch(__ WordEqual(slot_index, __ IntPtrConstant(0)), &do_compare,
1217 &gather_type_feedback);
1218
1219 __ Bind(&gather_type_feedback);
1220 {
1221 Variable var_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
1222 Label lhs_is_not_smi(assembler), lhs_is_not_number(assembler),
1223 lhs_is_not_string(assembler), gather_rhs_type(assembler),
1224 update_feedback(assembler);
1225
1226 __ GotoIfNot(__ TaggedIsSmi(lhs), &lhs_is_not_smi);
1227
1228 var_type_feedback.Bind(
1229 __ SmiConstant(CompareOperationFeedback::kSignedSmall));
1230 __ Goto(&gather_rhs_type);
1231
1232 __ Bind(&lhs_is_not_smi);
1233 {
1234 Node* lhs_map = __ LoadMap(lhs);
1235 __ GotoIfNot(__ IsHeapNumberMap(lhs_map), &lhs_is_not_number);
1236
1237 var_type_feedback.Bind(__ SmiConstant(CompareOperationFeedback::kNumber));
1238 __ Goto(&gather_rhs_type);
1239
1240 __ Bind(&lhs_is_not_number);
1241 {
1242 Node* lhs_instance_type = __ LoadInstanceType(lhs);
1243 if (Token::IsOrderedRelationalCompareOp(compare_op)) {
1244 Label lhs_is_not_oddball(assembler);
1245 __ GotoIfNot(
1246 __ Word32Equal(lhs_instance_type, __ Int32Constant(ODDBALL_TYPE)),
1247 &lhs_is_not_oddball);
1248
1249 var_type_feedback.Bind(
1250 __ SmiConstant(CompareOperationFeedback::kNumberOrOddball));
1251 __ Goto(&gather_rhs_type);
1252
1253 __ Bind(&lhs_is_not_oddball);
1254 }
1255
1256 Label lhs_is_not_string(assembler);
1257 __ GotoIfNot(__ IsStringInstanceType(lhs_instance_type),
1258 &lhs_is_not_string);
1259
1260 if (Token::IsOrderedRelationalCompareOp(compare_op)) {
1261 var_type_feedback.Bind(
1262 __ SmiConstant(CompareOperationFeedback::kString));
1263 } else {
1264 var_type_feedback.Bind(__ SelectSmiConstant(
1265 __ Word32Equal(
1266 __ Word32And(lhs_instance_type,
1267 __ Int32Constant(kIsNotInternalizedMask)),
1268 __ Int32Constant(kInternalizedTag)),
1269 CompareOperationFeedback::kInternalizedString,
1270 CompareOperationFeedback::kString));
1271 }
1272 __ Goto(&gather_rhs_type);
1273
1274 __ Bind(&lhs_is_not_string);
1275 if (Token::IsEqualityOp(compare_op)) {
1276 var_type_feedback.Bind(__ SelectSmiConstant(
1277 __ IsJSReceiverInstanceType(lhs_instance_type),
1278 CompareOperationFeedback::kReceiver,
1279 CompareOperationFeedback::kAny));
1280 } else {
1281 var_type_feedback.Bind(
1282 __ SmiConstant(CompareOperationFeedback::kAny));
1283 }
1284 __ Goto(&gather_rhs_type);
1285 }
1286 }
1287
1288 __ Bind(&gather_rhs_type);
1289 {
1290 Label rhs_is_not_smi(assembler), rhs_is_not_number(assembler);
1291
1292 __ GotoIfNot(__ TaggedIsSmi(rhs), &rhs_is_not_smi);
1293
1294 var_type_feedback.Bind(
1295 __ SmiOr(var_type_feedback.value(),
1296 __ SmiConstant(CompareOperationFeedback::kSignedSmall)));
1297 __ Goto(&update_feedback);
1298
1299 __ Bind(&rhs_is_not_smi);
1300 {
1301 Node* rhs_map = __ LoadMap(rhs);
1302 __ GotoIfNot(__ IsHeapNumberMap(rhs_map), &rhs_is_not_number);
1303
1304 var_type_feedback.Bind(
1305 __ SmiOr(var_type_feedback.value(),
1306 __ SmiConstant(CompareOperationFeedback::kNumber)));
1307 __ Goto(&update_feedback);
1308
1309 __ Bind(&rhs_is_not_number);
1310 {
1311 Node* rhs_instance_type = __ LoadInstanceType(rhs);
1312 if (Token::IsOrderedRelationalCompareOp(compare_op)) {
1313 Label rhs_is_not_oddball(assembler);
1314 __ GotoIfNot(__ Word32Equal(rhs_instance_type,
1315 __ Int32Constant(ODDBALL_TYPE)),
1316 &rhs_is_not_oddball);
1317
1318 var_type_feedback.Bind(__ SmiOr(
1319 var_type_feedback.value(),
1320 __ SmiConstant(CompareOperationFeedback::kNumberOrOddball)));
1321 __ Goto(&update_feedback);
1322
1323 __ Bind(&rhs_is_not_oddball);
1324 }
1325
1326 Label rhs_is_not_string(assembler);
1327 __ GotoIfNot(__ IsStringInstanceType(rhs_instance_type),
1328 &rhs_is_not_string);
1329
1330 if (Token::IsOrderedRelationalCompareOp(compare_op)) {
1331 var_type_feedback.Bind(
1332 __ SmiOr(var_type_feedback.value(),
1333 __ SmiConstant(CompareOperationFeedback::kString)));
1334 } else {
1335 var_type_feedback.Bind(__ SmiOr(
1336 var_type_feedback.value(),
1337 __ SelectSmiConstant(
1338 __ Word32Equal(
1339 __ Word32And(rhs_instance_type,
1340 __ Int32Constant(kIsNotInternalizedMask)),
1341 __ Int32Constant(kInternalizedTag)),
1342 CompareOperationFeedback::kInternalizedString,
1343 CompareOperationFeedback::kString)));
1344 }
1345 __ Goto(&update_feedback);
1346
1347 __ Bind(&rhs_is_not_string);
1348 if (Token::IsEqualityOp(compare_op)) {
1349 var_type_feedback.Bind(
1350 __ SmiOr(var_type_feedback.value(),
1351 __ SelectSmiConstant(
1352 __ IsJSReceiverInstanceType(rhs_instance_type),
1353 CompareOperationFeedback::kReceiver,
1354 CompareOperationFeedback::kAny)));
1355 } else {
1356 var_type_feedback.Bind(
1357 __ SmiConstant(CompareOperationFeedback::kAny));
1358 }
1359 __ Goto(&update_feedback);
1360 }
1361 }
1362 }
1363
1364 __ Bind(&update_feedback);
1365 {
1366 __ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
1367 __ Goto(&do_compare);
1368 }
1369 }
1370
1371 __ Bind(&do_compare);
1372 Node* result;
1373 switch (compare_op) {
1374 case Token::EQ:
1375 result = assembler->Equal(lhs, rhs, context);
1376 break;
1377 case Token::EQ_STRICT:
1378 result = assembler->StrictEqual(lhs, rhs);
1379 break;
1380 case Token::LT:
1381 result = assembler->RelationalComparison(CodeStubAssembler::kLessThan,
1382 lhs, rhs, context);
1383 break;
1384 case Token::GT:
1385 result = assembler->RelationalComparison(CodeStubAssembler::kGreaterThan,
1386 lhs, rhs, context);
1387 break;
1388 case Token::LTE:
1389 result = assembler->RelationalComparison(
1390 CodeStubAssembler::kLessThanOrEqual, lhs, rhs, context);
1391 break;
1392 case Token::GTE:
1393 result = assembler->RelationalComparison(
1394 CodeStubAssembler::kGreaterThanOrEqual, lhs, rhs, context);
1395 break;
1396 default:
1397 UNREACHABLE();
1398 }
1399 __ SetAccumulator(result);
1400 __ Dispatch();
1401 }
1402
1403 // Add <src>
1404 //
1405 // Add register <src> to accumulator.
1406 void Interpreter::DoAdd(InterpreterAssembler* assembler) {
1407 DoBinaryOpWithFeedback<AddWithFeedbackStub>(assembler);
1408 }
1409
1410 // Sub <src>
1411 //
1412 // Subtract register <src> from accumulator.
1413 void Interpreter::DoSub(InterpreterAssembler* assembler) {
1414 DoBinaryOpWithFeedback<SubtractWithFeedbackStub>(assembler);
1415 }
1416
1417 // Mul <src>
1418 //
1419 // Multiply accumulator by register <src>.
1420 void Interpreter::DoMul(InterpreterAssembler* assembler) {
1421 DoBinaryOpWithFeedback<MultiplyWithFeedbackStub>(assembler);
1422 }
1423
1424 // Div <src>
1425 //
1426 // Divide register <src> by accumulator.
1427 void Interpreter::DoDiv(InterpreterAssembler* assembler) {
1428 DoBinaryOpWithFeedback<DivideWithFeedbackStub>(assembler);
1429 }
1430
1431 // Mod <src>
1432 //
1433 // Modulo register <src> by accumulator.
1434 void Interpreter::DoMod(InterpreterAssembler* assembler) {
1435 DoBinaryOpWithFeedback<ModulusWithFeedbackStub>(assembler);
1436 }
1437
1438 void Interpreter::DoBitwiseBinaryOp(Token::Value bitwise_op,
1439 InterpreterAssembler* assembler) {
1440 Node* reg_index = __ BytecodeOperandReg(0);
1441 Node* lhs = __ LoadRegister(reg_index);
1442 Node* rhs = __ GetAccumulator();
1443 Node* context = __ GetContext();
1444 Node* slot_index = __ BytecodeOperandIdx(1);
1445 Node* feedback_vector = __ LoadFeedbackVector();
1446
1447 Variable var_lhs_type_feedback(assembler,
1448 MachineRepresentation::kTaggedSigned),
1449 var_rhs_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
1450 Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
1451 context, lhs, &var_lhs_type_feedback);
1452 Node* rhs_value = __ TruncateTaggedToWord32WithFeedback(
1453 context, rhs, &var_rhs_type_feedback);
1454 Node* result = nullptr;
1455
1456 switch (bitwise_op) {
1457 case Token::BIT_OR: {
1458 Node* value = __ Word32Or(lhs_value, rhs_value);
1459 result = __ ChangeInt32ToTagged(value);
1460 } break;
1461 case Token::BIT_AND: {
1462 Node* value = __ Word32And(lhs_value, rhs_value);
1463 result = __ ChangeInt32ToTagged(value);
1464 } break;
1465 case Token::BIT_XOR: {
1466 Node* value = __ Word32Xor(lhs_value, rhs_value);
1467 result = __ ChangeInt32ToTagged(value);
1468 } break;
1469 case Token::SHL: {
1470 Node* value = __ Word32Shl(
1471 lhs_value, __ Word32And(rhs_value, __ Int32Constant(0x1f)));
1472 result = __ ChangeInt32ToTagged(value);
1473 } break;
1474 case Token::SHR: {
1475 Node* value = __ Word32Shr(
1476 lhs_value, __ Word32And(rhs_value, __ Int32Constant(0x1f)));
1477 result = __ ChangeUint32ToTagged(value);
1478 } break;
1479 case Token::SAR: {
1480 Node* value = __ Word32Sar(
1481 lhs_value, __ Word32And(rhs_value, __ Int32Constant(0x1f)));
1482 result = __ ChangeInt32ToTagged(value);
1483 } break;
1484 default:
1485 UNREACHABLE();
1486 }
1487
1488 Node* result_type = __ SelectSmiConstant(
1489 __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
1490 BinaryOperationFeedback::kNumber);
1491
1492 if (FLAG_debug_code) {
1493 Label ok(assembler);
1494 __ GotoIf(__ TaggedIsSmi(result), &ok);
1495 Node* result_map = __ LoadMap(result);
1496 __ AbortIfWordNotEqual(result_map, __ HeapNumberMapConstant(),
1497 kExpectedHeapNumber);
1498 __ Goto(&ok);
1499 __ Bind(&ok);
1500 }
1501
1502 Node* input_feedback =
1503 __ SmiOr(var_lhs_type_feedback.value(), var_rhs_type_feedback.value());
1504 __ UpdateFeedback(__ SmiOr(result_type, input_feedback), feedback_vector,
1505 slot_index);
1506 __ SetAccumulator(result);
1507 __ Dispatch();
1508 }
1509
1510 // BitwiseOr <src>
1511 //
1512 // BitwiseOr register <src> to accumulator.
1513 void Interpreter::DoBitwiseOr(InterpreterAssembler* assembler) {
1514 DoBitwiseBinaryOp(Token::BIT_OR, assembler);
1515 }
1516
1517 // BitwiseXor <src>
1518 //
1519 // BitwiseXor register <src> to accumulator.
1520 void Interpreter::DoBitwiseXor(InterpreterAssembler* assembler) {
1521 DoBitwiseBinaryOp(Token::BIT_XOR, assembler);
1522 }
1523
1524 // BitwiseAnd <src>
1525 //
1526 // BitwiseAnd register <src> to accumulator.
1527 void Interpreter::DoBitwiseAnd(InterpreterAssembler* assembler) {
1528 DoBitwiseBinaryOp(Token::BIT_AND, assembler);
1529 }
1530
1531 // ShiftLeft <src>
1532 //
1533 // Left shifts register <src> by the count specified in the accumulator.
1534 // Register <src> is converted to an int32 and the accumulator to uint32
1535 // before the operation. 5 lsb bits from the accumulator are used as count
1536 // i.e. <src> << (accumulator & 0x1F).
1537 void Interpreter::DoShiftLeft(InterpreterAssembler* assembler) {
1538 DoBitwiseBinaryOp(Token::SHL, assembler);
1539 }
1540
1541 // ShiftRight <src>
1542 //
1543 // Right shifts register <src> by the count specified in the accumulator.
1544 // Result is sign extended. Register <src> is converted to an int32 and the
1545 // accumulator to uint32 before the operation. 5 lsb bits from the accumulator
1546 // are used as count i.e. <src> >> (accumulator & 0x1F).
1547 void Interpreter::DoShiftRight(InterpreterAssembler* assembler) {
1548 DoBitwiseBinaryOp(Token::SAR, assembler);
1549 }
1550
1551 // ShiftRightLogical <src>
1552 //
1553 // Right Shifts register <src> by the count specified in the accumulator.
1554 // Result is zero-filled. The accumulator and register <src> are converted to
1555 // uint32 before the operation 5 lsb bits from the accumulator are used as
1556 // count i.e. <src> << (accumulator & 0x1F).
1557 void Interpreter::DoShiftRightLogical(InterpreterAssembler* assembler) {
1558 DoBitwiseBinaryOp(Token::SHR, assembler);
1559 }
1560
1561 // AddSmi <imm> <reg>
1562 //
1563 // Adds an immediate value <imm> to register <reg>. For this
1564 // operation <reg> is the lhs operand and <imm> is the <rhs> operand.
1565 void Interpreter::DoAddSmi(InterpreterAssembler* assembler) {
1566 Variable var_result(assembler, MachineRepresentation::kTagged);
1567 Label fastpath(assembler), slowpath(assembler, Label::kDeferred),
1568 end(assembler);
1569
1570 Node* reg_index = __ BytecodeOperandReg(1);
1571 Node* left = __ LoadRegister(reg_index);
1572 Node* right = __ BytecodeOperandImmSmi(0);
1573 Node* slot_index = __ BytecodeOperandIdx(2);
1574 Node* feedback_vector = __ LoadFeedbackVector();
1575
1576 // {right} is known to be a Smi.
1577 // Check if the {left} is a Smi take the fast path.
1578 __ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath);
1579 __ Bind(&fastpath);
1580 {
1581 // Try fast Smi addition first.
1582 Node* pair = __ IntPtrAddWithOverflow(__ BitcastTaggedToWord(left),
1583 __ BitcastTaggedToWord(right));
1584 Node* overflow = __ Projection(1, pair);
1585
1586 // Check if the Smi additon overflowed.
1587 Label if_notoverflow(assembler);
1588 __ Branch(overflow, &slowpath, &if_notoverflow);
1589 __ Bind(&if_notoverflow);
1590 {
1591 __ UpdateFeedback(__ SmiConstant(BinaryOperationFeedback::kSignedSmall),
1592 feedback_vector, slot_index);
1593 var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair)));
1594 __ Goto(&end);
1595 }
1596 }
1597 __ Bind(&slowpath);
1598 {
1599 Node* context = __ GetContext();
1600 AddWithFeedbackStub stub(__ isolate());
1601 Callable callable =
1602 Callable(stub.GetCode(), AddWithFeedbackStub::Descriptor(__ isolate()));
1603 var_result.Bind(__ CallStub(callable, context, left, right,
1604 __ TruncateWordToWord32(slot_index),
1605 feedback_vector));
1606 __ Goto(&end);
1607 }
1608 __ Bind(&end);
1609 {
1610 __ SetAccumulator(var_result.value());
1611 __ Dispatch();
1612 }
1613 }
1614
1615 // SubSmi <imm> <reg>
1616 //
1617 // Subtracts an immediate value <imm> to register <reg>. For this
1618 // operation <reg> is the lhs operand and <imm> is the rhs operand.
1619 void Interpreter::DoSubSmi(InterpreterAssembler* assembler) {
1620 Variable var_result(assembler, MachineRepresentation::kTagged);
1621 Label fastpath(assembler), slowpath(assembler, Label::kDeferred),
1622 end(assembler);
1623
1624 Node* reg_index = __ BytecodeOperandReg(1);
1625 Node* left = __ LoadRegister(reg_index);
1626 Node* right = __ BytecodeOperandImmSmi(0);
1627 Node* slot_index = __ BytecodeOperandIdx(2);
1628 Node* feedback_vector = __ LoadFeedbackVector();
1629
1630 // {right} is known to be a Smi.
1631 // Check if the {left} is a Smi take the fast path.
1632 __ Branch(__ TaggedIsSmi(left), &fastpath, &slowpath);
1633 __ Bind(&fastpath);
1634 {
1635 // Try fast Smi subtraction first.
1636 Node* pair = __ IntPtrSubWithOverflow(__ BitcastTaggedToWord(left),
1637 __ BitcastTaggedToWord(right));
1638 Node* overflow = __ Projection(1, pair);
1639
1640 // Check if the Smi subtraction overflowed.
1641 Label if_notoverflow(assembler);
1642 __ Branch(overflow, &slowpath, &if_notoverflow);
1643 __ Bind(&if_notoverflow);
1644 {
1645 __ UpdateFeedback(__ SmiConstant(BinaryOperationFeedback::kSignedSmall),
1646 feedback_vector, slot_index);
1647 var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair)));
1648 __ Goto(&end);
1649 }
1650 }
1651 __ Bind(&slowpath);
1652 {
1653 Node* context = __ GetContext();
1654 SubtractWithFeedbackStub stub(__ isolate());
1655 Callable callable = Callable(
1656 stub.GetCode(), SubtractWithFeedbackStub::Descriptor(__ isolate()));
1657 var_result.Bind(__ CallStub(callable, context, left, right,
1658 __ TruncateWordToWord32(slot_index),
1659 feedback_vector));
1660 __ Goto(&end);
1661 }
1662 __ Bind(&end);
1663 {
1664 __ SetAccumulator(var_result.value());
1665 __ Dispatch();
1666 }
1667 }
1668
1669 // BitwiseOr <imm> <reg>
1670 //
1671 // BitwiseOr <reg> with <imm>. For this operation <reg> is the lhs
1672 // operand and <imm> is the rhs operand.
1673 void Interpreter::DoBitwiseOrSmi(InterpreterAssembler* assembler) {
1674 Node* reg_index = __ BytecodeOperandReg(1);
1675 Node* left = __ LoadRegister(reg_index);
1676 Node* right = __ BytecodeOperandImmSmi(0);
1677 Node* context = __ GetContext();
1678 Node* slot_index = __ BytecodeOperandIdx(2);
1679 Node* feedback_vector = __ LoadFeedbackVector();
1680 Variable var_lhs_type_feedback(assembler,
1681 MachineRepresentation::kTaggedSigned);
1682 Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
1683 context, left, &var_lhs_type_feedback);
1684 Node* rhs_value = __ SmiToWord32(right);
1685 Node* value = __ Word32Or(lhs_value, rhs_value);
1686 Node* result = __ ChangeInt32ToTagged(value);
1687 Node* result_type = __ SelectSmiConstant(
1688 __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
1689 BinaryOperationFeedback::kNumber);
1690 __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()),
1691 feedback_vector, slot_index);
1692 __ SetAccumulator(result);
1693 __ Dispatch();
1694 }
1695
1696 // BitwiseAnd <imm> <reg>
1697 //
1698 // BitwiseAnd <reg> with <imm>. For this operation <reg> is the lhs
1699 // operand and <imm> is the rhs operand.
1700 void Interpreter::DoBitwiseAndSmi(InterpreterAssembler* assembler) {
1701 Node* reg_index = __ BytecodeOperandReg(1);
1702 Node* left = __ LoadRegister(reg_index);
1703 Node* right = __ BytecodeOperandImmSmi(0);
1704 Node* context = __ GetContext();
1705 Node* slot_index = __ BytecodeOperandIdx(2);
1706 Node* feedback_vector = __ LoadFeedbackVector();
1707 Variable var_lhs_type_feedback(assembler,
1708 MachineRepresentation::kTaggedSigned);
1709 Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
1710 context, left, &var_lhs_type_feedback);
1711 Node* rhs_value = __ SmiToWord32(right);
1712 Node* value = __ Word32And(lhs_value, rhs_value);
1713 Node* result = __ ChangeInt32ToTagged(value);
1714 Node* result_type = __ SelectSmiConstant(
1715 __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
1716 BinaryOperationFeedback::kNumber);
1717 __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()),
1718 feedback_vector, slot_index);
1719 __ SetAccumulator(result);
1720 __ Dispatch();
1721 }
1722
1723 // ShiftLeftSmi <imm> <reg>
1724 //
1725 // Left shifts register <src> by the count specified in <imm>.
1726 // Register <src> is converted to an int32 before the operation. The 5
1727 // lsb bits from <imm> are used as count i.e. <src> << (<imm> & 0x1F).
1728 void Interpreter::DoShiftLeftSmi(InterpreterAssembler* assembler) {
1729 Node* reg_index = __ BytecodeOperandReg(1);
1730 Node* left = __ LoadRegister(reg_index);
1731 Node* right = __ BytecodeOperandImmSmi(0);
1732 Node* context = __ GetContext();
1733 Node* slot_index = __ BytecodeOperandIdx(2);
1734 Node* feedback_vector = __ LoadFeedbackVector();
1735 Variable var_lhs_type_feedback(assembler,
1736 MachineRepresentation::kTaggedSigned);
1737 Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
1738 context, left, &var_lhs_type_feedback);
1739 Node* rhs_value = __ SmiToWord32(right);
1740 Node* shift_count = __ Word32And(rhs_value, __ Int32Constant(0x1f));
1741 Node* value = __ Word32Shl(lhs_value, shift_count);
1742 Node* result = __ ChangeInt32ToTagged(value);
1743 Node* result_type = __ SelectSmiConstant(
1744 __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
1745 BinaryOperationFeedback::kNumber);
1746 __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()),
1747 feedback_vector, slot_index);
1748 __ SetAccumulator(result);
1749 __ Dispatch();
1750 }
1751
1752 // ShiftRightSmi <imm> <reg>
1753 //
1754 // Right shifts register <src> by the count specified in <imm>.
1755 // Register <src> is converted to an int32 before the operation. The 5
1756 // lsb bits from <imm> are used as count i.e. <src> << (<imm> & 0x1F).
1757 void Interpreter::DoShiftRightSmi(InterpreterAssembler* assembler) {
1758 Node* reg_index = __ BytecodeOperandReg(1);
1759 Node* left = __ LoadRegister(reg_index);
1760 Node* right = __ BytecodeOperandImmSmi(0);
1761 Node* context = __ GetContext();
1762 Node* slot_index = __ BytecodeOperandIdx(2);
1763 Node* feedback_vector = __ LoadFeedbackVector();
1764 Variable var_lhs_type_feedback(assembler,
1765 MachineRepresentation::kTaggedSigned);
1766 Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
1767 context, left, &var_lhs_type_feedback);
1768 Node* rhs_value = __ SmiToWord32(right);
1769 Node* shift_count = __ Word32And(rhs_value, __ Int32Constant(0x1f));
1770 Node* value = __ Word32Sar(lhs_value, shift_count);
1771 Node* result = __ ChangeInt32ToTagged(value);
1772 Node* result_type = __ SelectSmiConstant(
1773 __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
1774 BinaryOperationFeedback::kNumber);
1775 __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()),
1776 feedback_vector, slot_index);
1777 __ SetAccumulator(result);
1778 __ Dispatch();
1779 }
1780
1781 Node* Interpreter::BuildUnaryOp(Callable callable,
1782 InterpreterAssembler* assembler) {
1783 Node* target = __ HeapConstant(callable.code());
1784 Node* accumulator = __ GetAccumulator();
1785 Node* context = __ GetContext();
1786 return __ CallStub(callable.descriptor(), target, context, accumulator);
1787 }
1788
1789 template <class Generator>
1790 void Interpreter::DoUnaryOpWithFeedback(InterpreterAssembler* assembler) {
1791 Node* value = __ GetAccumulator();
1792 Node* context = __ GetContext();
1793 Node* slot_index = __ BytecodeOperandIdx(0);
1794 Node* feedback_vector = __ LoadFeedbackVector();
1795 Node* result = Generator::Generate(assembler, value, context, feedback_vector,
1796 slot_index);
1797 __ SetAccumulator(result);
1798 __ Dispatch();
1799 }
1800
1801 // ToName
1802 //
1803 // Convert the object referenced by the accumulator to a name.
1804 void Interpreter::DoToName(InterpreterAssembler* assembler) {
1805 Node* object = __ GetAccumulator();
1806 Node* context = __ GetContext();
1807 Node* result = __ ToName(context, object);
1808 __ StoreRegister(result, __ BytecodeOperandReg(0));
1809 __ Dispatch();
1810 }
1811
1812 // ToNumber
1813 //
1814 // Convert the object referenced by the accumulator to a number.
1815 void Interpreter::DoToNumber(InterpreterAssembler* assembler) {
1816 Node* object = __ GetAccumulator();
1817 Node* context = __ GetContext();
1818 Node* result = __ ToNumber(context, object);
1819 __ StoreRegister(result, __ BytecodeOperandReg(0));
1820 __ Dispatch();
1821 }
1822
1823 // ToObject
1824 //
1825 // Convert the object referenced by the accumulator to a JSReceiver.
1826 void Interpreter::DoToObject(InterpreterAssembler* assembler) {
1827 Node* result = BuildUnaryOp(CodeFactory::ToObject(isolate_), assembler);
1828 __ StoreRegister(result, __ BytecodeOperandReg(0));
1829 __ Dispatch();
1830 }
1831
1832 // Inc
1833 //
1834 // Increments value in the accumulator by one.
1835 void Interpreter::DoInc(InterpreterAssembler* assembler) {
1836 typedef CodeStubAssembler::Label Label;
1837 typedef compiler::Node Node;
1838 typedef CodeStubAssembler::Variable Variable;
1839
1840 Node* value = __ GetAccumulator();
1841 Node* context = __ GetContext();
1842 Node* slot_index = __ BytecodeOperandIdx(0);
1843 Node* feedback_vector = __ LoadFeedbackVector();
1844
1845 // Shared entry for floating point increment.
1846 Label do_finc(assembler), end(assembler);
1847 Variable var_finc_value(assembler, MachineRepresentation::kFloat64);
1848
1849 // We might need to try again due to ToNumber conversion.
1850 Variable value_var(assembler, MachineRepresentation::kTagged);
1851 Variable result_var(assembler, MachineRepresentation::kTagged);
1852 Variable var_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
1853 Variable* loop_vars[] = {&value_var, &var_type_feedback};
1854 Label start(assembler, 2, loop_vars);
1855 value_var.Bind(value);
1856 var_type_feedback.Bind(
1857 assembler->SmiConstant(BinaryOperationFeedback::kNone));
1858 assembler->Goto(&start);
1859 assembler->Bind(&start);
1860 {
1861 value = value_var.value();
1862
1863 Label if_issmi(assembler), if_isnotsmi(assembler);
1864 assembler->Branch(assembler->TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
1865
1866 assembler->Bind(&if_issmi);
1867 {
1868 // Try fast Smi addition first.
1869 Node* one = assembler->SmiConstant(Smi::FromInt(1));
1870 Node* pair = assembler->IntPtrAddWithOverflow(
1871 assembler->BitcastTaggedToWord(value),
1872 assembler->BitcastTaggedToWord(one));
1873 Node* overflow = assembler->Projection(1, pair);
1874
1875 // Check if the Smi addition overflowed.
1876 Label if_overflow(assembler), if_notoverflow(assembler);
1877 assembler->Branch(overflow, &if_overflow, &if_notoverflow);
1878
1879 assembler->Bind(&if_notoverflow);
1880 var_type_feedback.Bind(assembler->SmiOr(
1881 var_type_feedback.value(),
1882 assembler->SmiConstant(BinaryOperationFeedback::kSignedSmall)));
1883 result_var.Bind(
1884 assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair)));
1885 assembler->Goto(&end);
1886
1887 assembler->Bind(&if_overflow);
1888 {
1889 var_finc_value.Bind(assembler->SmiToFloat64(value));
1890 assembler->Goto(&do_finc);
1891 }
1892 }
1893
1894 assembler->Bind(&if_isnotsmi);
1895 {
1896 // Check if the value is a HeapNumber.
1897 Label if_valueisnumber(assembler),
1898 if_valuenotnumber(assembler, Label::kDeferred);
1899 Node* value_map = assembler->LoadMap(value);
1900 assembler->Branch(assembler->IsHeapNumberMap(value_map),
1901 &if_valueisnumber, &if_valuenotnumber);
1902
1903 assembler->Bind(&if_valueisnumber);
1904 {
1905 // Load the HeapNumber value.
1906 var_finc_value.Bind(assembler->LoadHeapNumberValue(value));
1907 assembler->Goto(&do_finc);
1908 }
1909
1910 assembler->Bind(&if_valuenotnumber);
1911 {
1912 // We do not require an Or with earlier feedback here because once we
1913 // convert the value to a number, we cannot reach this path. We can
1914 // only reach this path on the first pass when the feedback is kNone.
1915 CSA_ASSERT(assembler,
1916 assembler->SmiEqual(
1917 var_type_feedback.value(),
1918 assembler->SmiConstant(BinaryOperationFeedback::kNone)));
1919
1920 Label if_valueisoddball(assembler), if_valuenotoddball(assembler);
1921 Node* instance_type = assembler->LoadMapInstanceType(value_map);
1922 Node* is_oddball = assembler->Word32Equal(
1923 instance_type, assembler->Int32Constant(ODDBALL_TYPE));
1924 assembler->Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball);
1925
1926 assembler->Bind(&if_valueisoddball);
1927 {
1928 // Convert Oddball to Number and check again.
1929 value_var.Bind(
1930 assembler->LoadObjectField(value, Oddball::kToNumberOffset));
1931 var_type_feedback.Bind(assembler->SmiConstant(
1932 BinaryOperationFeedback::kNumberOrOddball));
1933 assembler->Goto(&start);
1934 }
1935
1936 assembler->Bind(&if_valuenotoddball);
1937 {
1938 // Convert to a Number first and try again.
1939 Callable callable =
1940 CodeFactory::NonNumberToNumber(assembler->isolate());
1941 var_type_feedback.Bind(
1942 assembler->SmiConstant(BinaryOperationFeedback::kAny));
1943 value_var.Bind(assembler->CallStub(callable, context, value));
1944 assembler->Goto(&start);
1945 }
1946 }
1947 }
1948 }
1949
1950 assembler->Bind(&do_finc);
1951 {
1952 Node* finc_value = var_finc_value.value();
1953 Node* one = assembler->Float64Constant(1.0);
1954 Node* finc_result = assembler->Float64Add(finc_value, one);
1955 var_type_feedback.Bind(assembler->SmiOr(
1956 var_type_feedback.value(),
1957 assembler->SmiConstant(BinaryOperationFeedback::kNumber)));
1958 result_var.Bind(assembler->AllocateHeapNumberWithValue(finc_result));
1959 assembler->Goto(&end);
1960 }
1961
1962 assembler->Bind(&end);
1963 assembler->UpdateFeedback(var_type_feedback.value(), feedback_vector,
1964 slot_index);
1965
1966 __ SetAccumulator(result_var.value());
1967 __ Dispatch();
1968 }
1969
1970 // Dec
1971 //
1972 // Decrements value in the accumulator by one.
1973 void Interpreter::DoDec(InterpreterAssembler* assembler) {
1974 typedef CodeStubAssembler::Label Label;
1975 typedef compiler::Node Node;
1976 typedef CodeStubAssembler::Variable Variable;
1977
1978 Node* value = __ GetAccumulator();
1979 Node* context = __ GetContext();
1980 Node* slot_index = __ BytecodeOperandIdx(0);
1981 Node* feedback_vector = __ LoadFeedbackVector();
1982
1983 // Shared entry for floating point decrement.
1984 Label do_fdec(assembler), end(assembler);
1985 Variable var_fdec_value(assembler, MachineRepresentation::kFloat64);
1986
1987 // We might need to try again due to ToNumber conversion.
1988 Variable value_var(assembler, MachineRepresentation::kTagged);
1989 Variable result_var(assembler, MachineRepresentation::kTagged);
1990 Variable var_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
1991 Variable* loop_vars[] = {&value_var, &var_type_feedback};
1992 Label start(assembler, 2, loop_vars);
1993 var_type_feedback.Bind(
1994 assembler->SmiConstant(BinaryOperationFeedback::kNone));
1995 value_var.Bind(value);
1996 assembler->Goto(&start);
1997 assembler->Bind(&start);
1998 {
1999 value = value_var.value();
2000
2001 Label if_issmi(assembler), if_isnotsmi(assembler);
2002 assembler->Branch(assembler->TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
2003
2004 assembler->Bind(&if_issmi);
2005 {
2006 // Try fast Smi subtraction first.
2007 Node* one = assembler->SmiConstant(Smi::FromInt(1));
2008 Node* pair = assembler->IntPtrSubWithOverflow(
2009 assembler->BitcastTaggedToWord(value),
2010 assembler->BitcastTaggedToWord(one));
2011 Node* overflow = assembler->Projection(1, pair);
2012
2013 // Check if the Smi subtraction overflowed.
2014 Label if_overflow(assembler), if_notoverflow(assembler);
2015 assembler->Branch(overflow, &if_overflow, &if_notoverflow);
2016
2017 assembler->Bind(&if_notoverflow);
2018 var_type_feedback.Bind(assembler->SmiOr(
2019 var_type_feedback.value(),
2020 assembler->SmiConstant(BinaryOperationFeedback::kSignedSmall)));
2021 result_var.Bind(
2022 assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair)));
2023 assembler->Goto(&end);
2024
2025 assembler->Bind(&if_overflow);
2026 {
2027 var_fdec_value.Bind(assembler->SmiToFloat64(value));
2028 assembler->Goto(&do_fdec);
2029 }
2030 }
2031
2032 assembler->Bind(&if_isnotsmi);
2033 {
2034 // Check if the value is a HeapNumber.
2035 Label if_valueisnumber(assembler),
2036 if_valuenotnumber(assembler, Label::kDeferred);
2037 Node* value_map = assembler->LoadMap(value);
2038 assembler->Branch(assembler->IsHeapNumberMap(value_map),
2039 &if_valueisnumber, &if_valuenotnumber);
2040
2041 assembler->Bind(&if_valueisnumber);
2042 {
2043 // Load the HeapNumber value.
2044 var_fdec_value.Bind(assembler->LoadHeapNumberValue(value));
2045 assembler->Goto(&do_fdec);
2046 }
2047
2048 assembler->Bind(&if_valuenotnumber);
2049 {
2050 // We do not require an Or with earlier feedback here because once we
2051 // convert the value to a number, we cannot reach this path. We can
2052 // only reach this path on the first pass when the feedback is kNone.
2053 CSA_ASSERT(assembler,
2054 assembler->SmiEqual(
2055 var_type_feedback.value(),
2056 assembler->SmiConstant(BinaryOperationFeedback::kNone)));
2057
2058 Label if_valueisoddball(assembler), if_valuenotoddball(assembler);
2059 Node* instance_type = assembler->LoadMapInstanceType(value_map);
2060 Node* is_oddball = assembler->Word32Equal(
2061 instance_type, assembler->Int32Constant(ODDBALL_TYPE));
2062 assembler->Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball);
2063
2064 assembler->Bind(&if_valueisoddball);
2065 {
2066 // Convert Oddball to Number and check again.
2067 value_var.Bind(
2068 assembler->LoadObjectField(value, Oddball::kToNumberOffset));
2069 var_type_feedback.Bind(assembler->SmiConstant(
2070 BinaryOperationFeedback::kNumberOrOddball));
2071 assembler->Goto(&start);
2072 }
2073
2074 assembler->Bind(&if_valuenotoddball);
2075 {
2076 // Convert to a Number first and try again.
2077 Callable callable =
2078 CodeFactory::NonNumberToNumber(assembler->isolate());
2079 var_type_feedback.Bind(
2080 assembler->SmiConstant(BinaryOperationFeedback::kAny));
2081 value_var.Bind(assembler->CallStub(callable, context, value));
2082 assembler->Goto(&start);
2083 }
2084 }
2085 }
2086 }
2087
2088 assembler->Bind(&do_fdec);
2089 {
2090 Node* fdec_value = var_fdec_value.value();
2091 Node* one = assembler->Float64Constant(1.0);
2092 Node* fdec_result = assembler->Float64Sub(fdec_value, one);
2093 var_type_feedback.Bind(assembler->SmiOr(
2094 var_type_feedback.value(),
2095 assembler->SmiConstant(BinaryOperationFeedback::kNumber)));
2096 result_var.Bind(assembler->AllocateHeapNumberWithValue(fdec_result));
2097 assembler->Goto(&end);
2098 }
2099
2100 assembler->Bind(&end);
2101 assembler->UpdateFeedback(var_type_feedback.value(), feedback_vector,
2102 slot_index);
2103
2104 __ SetAccumulator(result_var.value());
2105 __ Dispatch();
2106 }
2107
2108 // LogicalNot
2109 //
2110 // Perform logical-not on the accumulator, first casting the
2111 // accumulator to a boolean value if required.
2112 // ToBooleanLogicalNot
2113 void Interpreter::DoToBooleanLogicalNot(InterpreterAssembler* assembler) {
2114 Node* value = __ GetAccumulator();
2115 Variable result(assembler, MachineRepresentation::kTagged);
2116 Label if_true(assembler), if_false(assembler), end(assembler);
2117 Node* true_value = __ BooleanConstant(true);
2118 Node* false_value = __ BooleanConstant(false);
2119 __ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
2120 __ Bind(&if_true);
2121 {
2122 result.Bind(false_value);
2123 __ Goto(&end);
2124 }
2125 __ Bind(&if_false);
2126 {
2127 result.Bind(true_value);
2128 __ Goto(&end);
2129 }
2130 __ Bind(&end);
2131 __ SetAccumulator(result.value());
2132 __ Dispatch();
2133 }
2134
2135 // LogicalNot
2136 //
2137 // Perform logical-not on the accumulator, which must already be a boolean
2138 // value.
2139 void Interpreter::DoLogicalNot(InterpreterAssembler* assembler) {
2140 Node* value = __ GetAccumulator();
2141 Variable result(assembler, MachineRepresentation::kTagged);
2142 Label if_true(assembler), if_false(assembler), end(assembler);
2143 Node* true_value = __ BooleanConstant(true);
2144 Node* false_value = __ BooleanConstant(false);
2145 __ Branch(__ WordEqual(value, true_value), &if_true, &if_false);
2146 __ Bind(&if_true);
2147 {
2148 result.Bind(false_value);
2149 __ Goto(&end);
2150 }
2151 __ Bind(&if_false);
2152 {
2153 if (FLAG_debug_code) {
2154 __ AbortIfWordNotEqual(value, false_value,
2155 BailoutReason::kExpectedBooleanValue);
2156 }
2157 result.Bind(true_value);
2158 __ Goto(&end);
2159 }
2160 __ Bind(&end);
2161 __ SetAccumulator(result.value());
2162 __ Dispatch();
2163 }
2164
2165 // TypeOf
2166 //
2167 // Load the accumulator with the string representating type of the
2168 // object in the accumulator.
2169 void Interpreter::DoTypeOf(InterpreterAssembler* assembler) {
2170 Node* value = __ GetAccumulator();
2171 Node* result = assembler->Typeof(value);
2172 __ SetAccumulator(result);
2173 __ Dispatch();
2174 }
2175
2176 void Interpreter::DoDelete(Runtime::FunctionId function_id,
2177 InterpreterAssembler* assembler) {
2178 Node* reg_index = __ BytecodeOperandReg(0);
2179 Node* object = __ LoadRegister(reg_index);
2180 Node* key = __ GetAccumulator();
2181 Node* context = __ GetContext();
2182 Node* result = __ CallRuntime(function_id, context, object, key);
2183 __ SetAccumulator(result);
2184 __ Dispatch();
2185 }
2186
2187 // DeletePropertyStrict
2188 //
2189 // Delete the property specified in the accumulator from the object
2190 // referenced by the register operand following strict mode semantics.
2191 void Interpreter::DoDeletePropertyStrict(InterpreterAssembler* assembler) {
2192 DoDelete(Runtime::kDeleteProperty_Strict, assembler);
2193 }
2194
2195 // DeletePropertySloppy
2196 //
2197 // Delete the property specified in the accumulator from the object
2198 // referenced by the register operand following sloppy mode semantics.
2199 void Interpreter::DoDeletePropertySloppy(InterpreterAssembler* assembler) {
2200 DoDelete(Runtime::kDeleteProperty_Sloppy, assembler);
2201 }
2202
2203 // GetSuperConstructor
2204 //
2205 // Get the super constructor from the object referenced by the accumulator.
2206 // The result is stored in register |reg|.
2207 void Interpreter::DoGetSuperConstructor(InterpreterAssembler* assembler) {
2208 Node* active_function = __ GetAccumulator();
2209 Node* context = __ GetContext();
2210 Node* result = __ GetSuperConstructor(active_function, context);
2211 Node* reg = __ BytecodeOperandReg(0);
2212 __ StoreRegister(result, reg);
2213 __ Dispatch();
2214 }
2215
2216 void Interpreter::DoJSCall(InterpreterAssembler* assembler,
2217 TailCallMode tail_call_mode) {
2218 Node* function_reg = __ BytecodeOperandReg(0);
2219 Node* function = __ LoadRegister(function_reg);
2220 Node* receiver_reg = __ BytecodeOperandReg(1);
2221 Node* receiver_arg = __ RegisterLocation(receiver_reg);
2222 Node* receiver_args_count = __ BytecodeOperandCount(2);
2223 Node* receiver_count = __ Int32Constant(1);
2224 Node* args_count = __ Int32Sub(receiver_args_count, receiver_count);
2225 Node* slot_id = __ BytecodeOperandIdx(3);
2226 Node* feedback_vector = __ LoadFeedbackVector();
2227 Node* context = __ GetContext();
2228 Node* result =
2229 __ CallJSWithFeedback(function, context, receiver_arg, args_count,
2230 slot_id, feedback_vector, tail_call_mode);
2231 __ SetAccumulator(result);
2232 __ Dispatch();
2233 }
2234
2235 void Interpreter::DoJSCallN(InterpreterAssembler* assembler, int arg_count) {
2236 const int kReceiverOperandIndex = 1;
2237 const int kReceiverOperandCount = 1;
2238 const int kSlotOperandIndex =
2239 kReceiverOperandIndex + kReceiverOperandCount + arg_count;
2240 const int kBoilerplatParameterCount = 7;
2241 const int kReceiverParameterIndex = 5;
2242
2243 Node* function_reg = __ BytecodeOperandReg(0);
2244 Node* function = __ LoadRegister(function_reg);
2245 std::array<Node*, Bytecodes::kMaxOperands + kBoilerplatParameterCount> temp;
2246 Callable call_ic = CodeFactory::CallIC(isolate_);
2247 temp[0] = __ HeapConstant(call_ic.code());
2248 temp[1] = function;
2249 temp[2] = __ Int32Constant(arg_count);
2250 temp[3] = __ BytecodeOperandIdxInt32(kSlotOperandIndex);
2251 temp[4] = __ LoadFeedbackVector();
2252 for (int i = 0; i < (arg_count + kReceiverOperandCount); ++i) {
2253 Node* reg = __ BytecodeOperandReg(i + kReceiverOperandIndex);
2254 temp[kReceiverParameterIndex + i] = __ LoadRegister(reg);
2255 }
2256 temp[kReceiverParameterIndex + arg_count + kReceiverOperandCount] =
2257 __ GetContext();
2258 Node* result = __ CallStubN(call_ic.descriptor(), 1,
2259 arg_count + kBoilerplatParameterCount, &temp[0]);
2260 __ SetAccumulator(result);
2261 __ Dispatch();
2262 }
2263
2264 // Call <callable> <receiver> <arg_count> <feedback_slot_id>
2265 //
2266 // Call a JSfunction or Callable in |callable| with the |receiver| and
2267 // |arg_count| arguments in subsequent registers. Collect type feedback
2268 // into |feedback_slot_id|
2269 void Interpreter::DoCall(InterpreterAssembler* assembler) {
2270 DoJSCall(assembler, TailCallMode::kDisallow);
2271 }
2272
2273 void Interpreter::DoCall0(InterpreterAssembler* assembler) {
2274 DoJSCallN(assembler, 0);
2275 }
2276
2277 void Interpreter::DoCall1(InterpreterAssembler* assembler) {
2278 DoJSCallN(assembler, 1);
2279 }
2280
2281 void Interpreter::DoCall2(InterpreterAssembler* assembler) {
2282 DoJSCallN(assembler, 2);
2283 }
2284
2285 void Interpreter::DoCallProperty(InterpreterAssembler* assembler) {
2286 // Same as Call
2287 UNREACHABLE();
2288 }
2289
2290 void Interpreter::DoCallProperty0(InterpreterAssembler* assembler) {
2291 // Same as Call0
2292 UNREACHABLE();
2293 }
2294
2295 void Interpreter::DoCallProperty1(InterpreterAssembler* assembler) {
2296 // Same as Call1
2297 UNREACHABLE();
2298 }
2299
2300 void Interpreter::DoCallProperty2(InterpreterAssembler* assembler) {
2301 // Same as Call2
2302 UNREACHABLE();
2303 }
2304
2305 // TailCall <callable> <receiver> <arg_count> <feedback_slot_id>
2306 //
2307 // Tail call a JSfunction or Callable in |callable| with the |receiver| and
2308 // |arg_count| arguments in subsequent registers. Collect type feedback
2309 // into |feedback_slot_id|
2310 void Interpreter::DoTailCall(InterpreterAssembler* assembler) {
2311 DoJSCall(assembler, TailCallMode::kAllow);
2312 }
2313
2314 // CallRuntime <function_id> <first_arg> <arg_count>
2315 //
2316 // Call the runtime function |function_id| with the first argument in
2317 // register |first_arg| and |arg_count| arguments in subsequent
2318 // registers.
2319 void Interpreter::DoCallRuntime(InterpreterAssembler* assembler) {
2320 Node* function_id = __ BytecodeOperandRuntimeId(0);
2321 Node* first_arg_reg = __ BytecodeOperandReg(1);
2322 Node* first_arg = __ RegisterLocation(first_arg_reg);
2323 Node* args_count = __ BytecodeOperandCount(2);
2324 Node* context = __ GetContext();
2325 Node* result = __ CallRuntimeN(function_id, context, first_arg, args_count);
2326 __ SetAccumulator(result);
2327 __ Dispatch();
2328 }
2329
2330 // InvokeIntrinsic <function_id> <first_arg> <arg_count>
2331 //
2332 // Implements the semantic equivalent of calling the runtime function
2333 // |function_id| with the first argument in |first_arg| and |arg_count|
2334 // arguments in subsequent registers.
2335 void Interpreter::DoInvokeIntrinsic(InterpreterAssembler* assembler) {
2336 Node* function_id = __ BytecodeOperandIntrinsicId(0);
2337 Node* first_arg_reg = __ BytecodeOperandReg(1);
2338 Node* arg_count = __ BytecodeOperandCount(2);
2339 Node* context = __ GetContext();
2340 IntrinsicsHelper helper(assembler);
2341 Node* result =
2342 helper.InvokeIntrinsic(function_id, context, first_arg_reg, arg_count);
2343 __ SetAccumulator(result);
2344 __ Dispatch();
2345 }
2346
2347 // CallRuntimeForPair <function_id> <first_arg> <arg_count> <first_return>
2348 //
2349 // Call the runtime function |function_id| which returns a pair, with the
2350 // first argument in register |first_arg| and |arg_count| arguments in
2351 // subsequent registers. Returns the result in <first_return> and
2352 // <first_return + 1>
2353 void Interpreter::DoCallRuntimeForPair(InterpreterAssembler* assembler) {
2354 // Call the runtime function.
2355 Node* function_id = __ BytecodeOperandRuntimeId(0);
2356 Node* first_arg_reg = __ BytecodeOperandReg(1);
2357 Node* first_arg = __ RegisterLocation(first_arg_reg);
2358 Node* args_count = __ BytecodeOperandCount(2);
2359 Node* context = __ GetContext();
2360 Node* result_pair =
2361 __ CallRuntimeN(function_id, context, first_arg, args_count, 2);
2362 // Store the results in <first_return> and <first_return + 1>
2363 Node* first_return_reg = __ BytecodeOperandReg(3);
2364 Node* second_return_reg = __ NextRegister(first_return_reg);
2365 Node* result0 = __ Projection(0, result_pair);
2366 Node* result1 = __ Projection(1, result_pair);
2367 __ StoreRegister(result0, first_return_reg);
2368 __ StoreRegister(result1, second_return_reg);
2369 __ Dispatch();
2370 }
2371
2372 // CallJSRuntime <context_index> <receiver> <arg_count>
2373 //
2374 // Call the JS runtime function that has the |context_index| with the receiver
2375 // in register |receiver| and |arg_count| arguments in subsequent registers.
2376 void Interpreter::DoCallJSRuntime(InterpreterAssembler* assembler) {
2377 Node* context_index = __ BytecodeOperandIdx(0);
2378 Node* receiver_reg = __ BytecodeOperandReg(1);
2379 Node* first_arg = __ RegisterLocation(receiver_reg);
2380 Node* receiver_args_count = __ BytecodeOperandCount(2);
2381 Node* receiver_count = __ Int32Constant(1);
2382 Node* args_count = __ Int32Sub(receiver_args_count, receiver_count);
2383
2384 // Get the function to call from the native context.
2385 Node* context = __ GetContext();
2386 Node* native_context = __ LoadNativeContext(context);
2387 Node* function = __ LoadContextElement(native_context, context_index);
2388
2389 // Call the function.
2390 Node* result = __ CallJS(function, context, first_arg, args_count,
2391 TailCallMode::kDisallow);
2392 __ SetAccumulator(result);
2393 __ Dispatch();
2394 }
2395
2396 // CallWithSpread <callable> <first_arg> <arg_count>
2397 //
2398 // Call a JSfunction or Callable in |callable| with the receiver in
2399 // |first_arg| and |arg_count - 1| arguments in subsequent registers. The
2400 // final argument is always a spread.
2401 //
2402 void Interpreter::DoCallWithSpread(InterpreterAssembler* assembler) {
2403 Node* callable_reg = __ BytecodeOperandReg(0);
2404 Node* callable = __ LoadRegister(callable_reg);
2405 Node* receiver_reg = __ BytecodeOperandReg(1);
2406 Node* receiver_arg = __ RegisterLocation(receiver_reg);
2407 Node* receiver_args_count = __ BytecodeOperandCount(2);
2408 Node* receiver_count = __ Int32Constant(1);
2409 Node* args_count = __ Int32Sub(receiver_args_count, receiver_count);
2410 Node* context = __ GetContext();
2411
2412 // Call into Runtime function CallWithSpread which does everything.
2413 Node* result =
2414 __ CallJSWithSpread(callable, context, receiver_arg, args_count);
2415 __ SetAccumulator(result);
2416 __ Dispatch();
2417 }
2418
2419 // ConstructWithSpread <first_arg> <arg_count>
2420 //
2421 // Call the constructor in |constructor| with the first argument in register
2422 // |first_arg| and |arg_count| arguments in subsequent registers. The final
2423 // argument is always a spread. The new.target is in the accumulator.
2424 //
2425 void Interpreter::DoConstructWithSpread(InterpreterAssembler* assembler) {
2426 Node* new_target = __ GetAccumulator();
2427 Node* constructor_reg = __ BytecodeOperandReg(0);
2428 Node* constructor = __ LoadRegister(constructor_reg);
2429 Node* first_arg_reg = __ BytecodeOperandReg(1);
2430 Node* first_arg = __ RegisterLocation(first_arg_reg);
2431 Node* args_count = __ BytecodeOperandCount(2);
2432 Node* context = __ GetContext();
2433 Node* result = __ ConstructWithSpread(constructor, context, new_target,
2434 first_arg, args_count);
2435 __ SetAccumulator(result);
2436 __ Dispatch();
2437 }
2438
2439 // Construct <constructor> <first_arg> <arg_count>
2440 //
2441 // Call operator construct with |constructor| and the first argument in
2442 // register |first_arg| and |arg_count| arguments in subsequent
2443 // registers. The new.target is in the accumulator.
2444 //
2445 void Interpreter::DoConstruct(InterpreterAssembler* assembler) {
2446 Node* new_target = __ GetAccumulator();
2447 Node* constructor_reg = __ BytecodeOperandReg(0);
2448 Node* constructor = __ LoadRegister(constructor_reg);
2449 Node* first_arg_reg = __ BytecodeOperandReg(1);
2450 Node* first_arg = __ RegisterLocation(first_arg_reg);
2451 Node* args_count = __ BytecodeOperandCount(2);
2452 Node* slot_id = __ BytecodeOperandIdx(3);
2453 Node* feedback_vector = __ LoadFeedbackVector();
2454 Node* context = __ GetContext();
2455 Node* result = __ Construct(constructor, context, new_target, first_arg,
2456 args_count, slot_id, feedback_vector);
2457 __ SetAccumulator(result);
2458 __ Dispatch();
2459 }
2460
2461 // TestEqual <src>
2462 //
2463 // Test if the value in the <src> register equals the accumulator.
2464 void Interpreter::DoTestEqual(InterpreterAssembler* assembler) {
2465 DoCompareOpWithFeedback(Token::Value::EQ, assembler);
2466 }
2467
2468 // TestEqualStrict <src>
2469 //
2470 // Test if the value in the <src> register is strictly equal to the accumulator.
2471 void Interpreter::DoTestEqualStrict(InterpreterAssembler* assembler) {
2472 DoCompareOpWithFeedback(Token::Value::EQ_STRICT, assembler);
2473 }
2474
2475 // TestLessThan <src>
2476 //
2477 // Test if the value in the <src> register is less than the accumulator.
2478 void Interpreter::DoTestLessThan(InterpreterAssembler* assembler) {
2479 DoCompareOpWithFeedback(Token::Value::LT, assembler);
2480 }
2481
2482 // TestGreaterThan <src>
2483 //
2484 // Test if the value in the <src> register is greater than the accumulator.
2485 void Interpreter::DoTestGreaterThan(InterpreterAssembler* assembler) {
2486 DoCompareOpWithFeedback(Token::Value::GT, assembler);
2487 }
2488
2489 // TestLessThanOrEqual <src>
2490 //
2491 // Test if the value in the <src> register is less than or equal to the
2492 // accumulator.
2493 void Interpreter::DoTestLessThanOrEqual(InterpreterAssembler* assembler) {
2494 DoCompareOpWithFeedback(Token::Value::LTE, assembler);
2495 }
2496
2497 // TestGreaterThanOrEqual <src>
2498 //
2499 // Test if the value in the <src> register is greater than or equal to the
2500 // accumulator.
2501 void Interpreter::DoTestGreaterThanOrEqual(InterpreterAssembler* assembler) {
2502 DoCompareOpWithFeedback(Token::Value::GTE, assembler);
2503 }
2504
2505 // TestIn <src>
2506 //
2507 // Test if the object referenced by the register operand is a property of the
2508 // object referenced by the accumulator.
2509 void Interpreter::DoTestIn(InterpreterAssembler* assembler) {
2510 DoCompareOp(Token::IN, assembler);
2511 }
2512
2513 // TestInstanceOf <src>
2514 //
2515 // Test if the object referenced by the <src> register is an an instance of type
2516 // referenced by the accumulator.
2517 void Interpreter::DoTestInstanceOf(InterpreterAssembler* assembler) {
2518 DoCompareOp(Token::INSTANCEOF, assembler);
2519 }
2520
2521 // TestUndetectable <src>
2522 //
2523 // Test if the value in the <src> register equals to null/undefined. This is
2524 // done by checking undetectable bit on the map of the object.
2525 void Interpreter::DoTestUndetectable(InterpreterAssembler* assembler) {
2526 Node* reg_index = __ BytecodeOperandReg(0);
2527 Node* object = __ LoadRegister(reg_index);
2528
2529 Label not_equal(assembler), end(assembler);
2530 // If the object is an Smi then return false.
2531 __ GotoIf(__ TaggedIsSmi(object), &not_equal);
2532
2533 // If it is a HeapObject, load the map and check for undetectable bit.
2534 Node* map = __ LoadMap(object);
2535 Node* map_bitfield = __ LoadMapBitField(map);
2536 Node* map_undetectable =
2537 __ Word32And(map_bitfield, __ Int32Constant(1 << Map::kIsUndetectable));
2538 __ GotoIf(__ Word32Equal(map_undetectable, __ Int32Constant(0)), &not_equal);
2539
2540 __ SetAccumulator(__ BooleanConstant(true));
2541 __ Goto(&end);
2542
2543 __ Bind(&not_equal);
2544 {
2545 __ SetAccumulator(__ BooleanConstant(false));
2546 __ Goto(&end);
2547 }
2548
2549 __ Bind(&end);
2550 __ Dispatch();
2551 }
2552
2553 // TestNull <src>
2554 //
2555 // Test if the value in the <src> register is strictly equal to null.
2556 void Interpreter::DoTestNull(InterpreterAssembler* assembler) {
2557 Node* reg_index = __ BytecodeOperandReg(0);
2558 Node* object = __ LoadRegister(reg_index);
2559 Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
2560
2561 Label equal(assembler), end(assembler);
2562 __ GotoIf(__ WordEqual(object, null_value), &equal);
2563 __ SetAccumulator(__ BooleanConstant(false));
2564 __ Goto(&end);
2565
2566 __ Bind(&equal);
2567 {
2568 __ SetAccumulator(__ BooleanConstant(true));
2569 __ Goto(&end);
2570 }
2571
2572 __ Bind(&end);
2573 __ Dispatch();
2574 }
2575
2576 // TestUndefined <src>
2577 //
2578 // Test if the value in the <src> register is strictly equal to undefined.
2579 void Interpreter::DoTestUndefined(InterpreterAssembler* assembler) {
2580 Node* reg_index = __ BytecodeOperandReg(0);
2581 Node* object = __ LoadRegister(reg_index);
2582 Node* undefined_value =
2583 __ HeapConstant(isolate_->factory()->undefined_value());
2584
2585 Label equal(assembler), end(assembler);
2586 __ GotoIf(__ WordEqual(object, undefined_value), &equal);
2587 __ SetAccumulator(__ BooleanConstant(false));
2588 __ Goto(&end);
2589
2590 __ Bind(&equal);
2591 {
2592 __ SetAccumulator(__ BooleanConstant(true));
2593 __ Goto(&end);
2594 }
2595
2596 __ Bind(&end);
2597 __ Dispatch();
2598 }
2599
2600 // TestTypeOf <literal_flag>
2601 //
2602 // Tests if the object in the <accumulator> is typeof the literal represented
2603 // by |literal_flag|.
2604 void Interpreter::DoTestTypeOf(InterpreterAssembler* assembler) {
2605 Node* object = __ GetAccumulator();
2606 Node* literal_flag = __ BytecodeOperandFlag(0);
2607
2608 #define MAKE_LABEL(name, lower_case) Label if_##lower_case(assembler);
2609 TYPEOF_LITERAL_LIST(MAKE_LABEL)
2610 #undef MAKE_LABEL
2611
2612 #define LABEL_POINTER(name, lower_case) &if_##lower_case,
2613 Label* labels[] = {TYPEOF_LITERAL_LIST(LABEL_POINTER)};
2614 #undef LABEL_POINTER
2615
2616 #define CASE(name, lower_case) \
2617 static_cast<int32_t>(TestTypeOfFlags::LiteralFlag::k##name),
2618 int32_t cases[] = {TYPEOF_LITERAL_LIST(CASE)};
2619 #undef CASE
2620
2621 Label if_true(assembler), if_false(assembler), end(assembler),
2622 abort(assembler, Label::kDeferred);
2623
2624 __ Switch(literal_flag, &abort, cases, labels, arraysize(cases));
2625
2626 __ Bind(&abort);
2627 {
2628 __ Comment("Abort");
2629 __ Abort(BailoutReason::kUnexpectedTestTypeofLiteralFlag);
2630 __ Goto(&if_false);
2631 }
2632 __ Bind(&if_number);
2633 {
2634 __ Comment("IfNumber");
2635 __ GotoIfNumber(object, &if_true);
2636 __ Goto(&if_false);
2637 }
2638 __ Bind(&if_string);
2639 {
2640 __ Comment("IfString");
2641 __ GotoIf(__ TaggedIsSmi(object), &if_false);
2642 __ Branch(__ IsString(object), &if_true, &if_false);
2643 }
2644 __ Bind(&if_symbol);
2645 {
2646 __ Comment("IfSymbol");
2647 __ GotoIf(__ TaggedIsSmi(object), &if_false);
2648 __ Branch(__ IsSymbol(object), &if_true, &if_false);
2649 }
2650 __ Bind(&if_boolean);
2651 {
2652 __ Comment("IfBoolean");
2653 __ GotoIf(__ WordEqual(object, __ BooleanConstant(true)), &if_true);
2654 __ Branch(__ WordEqual(object, __ BooleanConstant(false)), &if_true,
2655 &if_false);
2656 }
2657 __ Bind(&if_undefined);
2658 {
2659 __ Comment("IfUndefined");
2660 __ GotoIf(__ TaggedIsSmi(object), &if_false);
2661 // Check it is not null and the map has the undetectable bit set.
2662 __ GotoIf(__ WordEqual(object, __ NullConstant()), &if_false);
2663 Node* map_bitfield = __ LoadMapBitField(__ LoadMap(object));
2664 Node* undetectable_bit =
2665 __ Word32And(map_bitfield, __ Int32Constant(1 << Map::kIsUndetectable));
2666 __ Branch(__ Word32Equal(undetectable_bit, __ Int32Constant(0)), &if_false,
2667 &if_true);
2668 }
2669 __ Bind(&if_function);
2670 {
2671 __ Comment("IfFunction");
2672 __ GotoIf(__ TaggedIsSmi(object), &if_false);
2673 // Check if callable bit is set and not undetectable.
2674 Node* map_bitfield = __ LoadMapBitField(__ LoadMap(object));
2675 Node* callable_undetectable = __ Word32And(
2676 map_bitfield,
2677 __ Int32Constant(1 << Map::kIsUndetectable | 1 << Map::kIsCallable));
2678 __ Branch(__ Word32Equal(callable_undetectable,
2679 __ Int32Constant(1 << Map::kIsCallable)),
2680 &if_true, &if_false);
2681 }
2682 __ Bind(&if_object);
2683 {
2684 __ Comment("IfObject");
2685 __ GotoIf(__ TaggedIsSmi(object), &if_false);
2686
2687 // If the object is null then return true.
2688 __ GotoIf(__ WordEqual(object, __ NullConstant()), &if_true);
2689
2690 // Check if the object is a receiver type and is not undefined or callable.
2691 Node* map = __ LoadMap(object);
2692 __ GotoIfNot(__ IsJSReceiverMap(map), &if_false);
2693 Node* map_bitfield = __ LoadMapBitField(map);
2694 Node* callable_undetectable = __ Word32And(
2695 map_bitfield,
2696 __ Int32Constant(1 << Map::kIsUndetectable | 1 << Map::kIsCallable));
2697 __ Branch(__ Word32Equal(callable_undetectable, __ Int32Constant(0)),
2698 &if_true, &if_false);
2699 }
2700 __ Bind(&if_other);
2701 {
2702 // Typeof doesn't return any other string value.
2703 __ Goto(&if_false);
2704 }
2705
2706 __ Bind(&if_false);
2707 {
2708 __ SetAccumulator(__ BooleanConstant(false));
2709 __ Goto(&end);
2710 }
2711 __ Bind(&if_true);
2712 {
2713 __ SetAccumulator(__ BooleanConstant(true));
2714 __ Goto(&end);
2715 }
2716 __ Bind(&end);
2717 __ Dispatch();
2718 }
2719
2720 // Jump <imm>
2721 //
2722 // Jump by number of bytes represented by the immediate operand |imm|.
2723 void Interpreter::DoJump(InterpreterAssembler* assembler) {
2724 Node* relative_jump = __ BytecodeOperandUImmWord(0);
2725 __ Jump(relative_jump);
2726 }
2727
2728 // JumpConstant <idx>
2729 //
2730 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool.
2731 void Interpreter::DoJumpConstant(InterpreterAssembler* assembler) {
2732 Node* index = __ BytecodeOperandIdx(0);
2733 Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
2734 __ Jump(relative_jump);
2735 }
2736
2737 // JumpIfTrue <imm>
2738 //
2739 // Jump by number of bytes represented by an immediate operand if the
2740 // accumulator contains true. This only works for boolean inputs, and
2741 // will misbehave if passed arbitrary input values.
2742 void Interpreter::DoJumpIfTrue(InterpreterAssembler* assembler) {
2743 Node* accumulator = __ GetAccumulator();
2744 Node* relative_jump = __ BytecodeOperandUImmWord(0);
2745 Node* true_value = __ BooleanConstant(true);
2746 CSA_ASSERT(assembler, assembler->TaggedIsNotSmi(accumulator));
2747 CSA_ASSERT(assembler, assembler->IsBoolean(accumulator));
2748 __ JumpIfWordEqual(accumulator, true_value, relative_jump);
2749 }
2750
2751 // JumpIfTrueConstant <idx>
2752 //
2753 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
2754 // if the accumulator contains true. This only works for boolean inputs, and
2755 // will misbehave if passed arbitrary input values.
2756 void Interpreter::DoJumpIfTrueConstant(InterpreterAssembler* assembler) {
2757 Node* accumulator = __ GetAccumulator();
2758 Node* index = __ BytecodeOperandIdx(0);
2759 Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
2760 Node* true_value = __ BooleanConstant(true);
2761 CSA_ASSERT(assembler, assembler->TaggedIsNotSmi(accumulator));
2762 CSA_ASSERT(assembler, assembler->IsBoolean(accumulator));
2763 __ JumpIfWordEqual(accumulator, true_value, relative_jump);
2764 }
2765
2766 // JumpIfFalse <imm>
2767 //
2768 // Jump by number of bytes represented by an immediate operand if the
2769 // accumulator contains false. This only works for boolean inputs, and
2770 // will misbehave if passed arbitrary input values.
2771 void Interpreter::DoJumpIfFalse(InterpreterAssembler* assembler) {
2772 Node* accumulator = __ GetAccumulator();
2773 Node* relative_jump = __ BytecodeOperandUImmWord(0);
2774 Node* false_value = __ BooleanConstant(false);
2775 CSA_ASSERT(assembler, assembler->TaggedIsNotSmi(accumulator));
2776 CSA_ASSERT(assembler, assembler->IsBoolean(accumulator));
2777 __ JumpIfWordEqual(accumulator, false_value, relative_jump);
2778 }
2779
2780 // JumpIfFalseConstant <idx>
2781 //
2782 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
2783 // if the accumulator contains false. This only works for boolean inputs, and
2784 // will misbehave if passed arbitrary input values.
2785 void Interpreter::DoJumpIfFalseConstant(InterpreterAssembler* assembler) {
2786 Node* accumulator = __ GetAccumulator();
2787 Node* index = __ BytecodeOperandIdx(0);
2788 Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
2789 Node* false_value = __ BooleanConstant(false);
2790 CSA_ASSERT(assembler, assembler->TaggedIsNotSmi(accumulator));
2791 CSA_ASSERT(assembler, assembler->IsBoolean(accumulator));
2792 __ JumpIfWordEqual(accumulator, false_value, relative_jump);
2793 }
2794
2795 // JumpIfToBooleanTrue <imm>
2796 //
2797 // Jump by number of bytes represented by an immediate operand if the object
2798 // referenced by the accumulator is true when the object is cast to boolean.
2799 void Interpreter::DoJumpIfToBooleanTrue(InterpreterAssembler* assembler) {
2800 Node* value = __ GetAccumulator();
2801 Node* relative_jump = __ BytecodeOperandUImmWord(0);
2802 Label if_true(assembler), if_false(assembler);
2803 __ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
2804 __ Bind(&if_true);
2805 __ Jump(relative_jump);
2806 __ Bind(&if_false);
2807 __ Dispatch();
2808 }
2809
2810 // JumpIfToBooleanTrueConstant <idx>
2811 //
2812 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
2813 // if the object referenced by the accumulator is true when the object is cast
2814 // to boolean.
2815 void Interpreter::DoJumpIfToBooleanTrueConstant(
2816 InterpreterAssembler* assembler) {
2817 Node* value = __ GetAccumulator();
2818 Node* index = __ BytecodeOperandIdx(0);
2819 Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
2820 Label if_true(assembler), if_false(assembler);
2821 __ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
2822 __ Bind(&if_true);
2823 __ Jump(relative_jump);
2824 __ Bind(&if_false);
2825 __ Dispatch();
2826 }
2827
2828 // JumpIfToBooleanFalse <imm>
2829 //
2830 // Jump by number of bytes represented by an immediate operand if the object
2831 // referenced by the accumulator is false when the object is cast to boolean.
2832 void Interpreter::DoJumpIfToBooleanFalse(InterpreterAssembler* assembler) {
2833 Node* value = __ GetAccumulator();
2834 Node* relative_jump = __ BytecodeOperandUImmWord(0);
2835 Label if_true(assembler), if_false(assembler);
2836 __ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
2837 __ Bind(&if_true);
2838 __ Dispatch();
2839 __ Bind(&if_false);
2840 __ Jump(relative_jump);
2841 }
2842
2843 // JumpIfToBooleanFalseConstant <idx>
2844 //
2845 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
2846 // if the object referenced by the accumulator is false when the object is cast
2847 // to boolean.
2848 void Interpreter::DoJumpIfToBooleanFalseConstant(
2849 InterpreterAssembler* assembler) {
2850 Node* value = __ GetAccumulator();
2851 Node* index = __ BytecodeOperandIdx(0);
2852 Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
2853 Label if_true(assembler), if_false(assembler);
2854 __ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
2855 __ Bind(&if_true);
2856 __ Dispatch();
2857 __ Bind(&if_false);
2858 __ Jump(relative_jump);
2859 }
2860
2861 // JumpIfNull <imm>
2862 //
2863 // Jump by number of bytes represented by an immediate operand if the object
2864 // referenced by the accumulator is the null constant.
2865 void Interpreter::DoJumpIfNull(InterpreterAssembler* assembler) {
2866 Node* accumulator = __ GetAccumulator();
2867 Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
2868 Node* relative_jump = __ BytecodeOperandUImmWord(0);
2869 __ JumpIfWordEqual(accumulator, null_value, relative_jump);
2870 }
2871
2872 // JumpIfNullConstant <idx>
2873 //
2874 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
2875 // if the object referenced by the accumulator is the null constant.
2876 void Interpreter::DoJumpIfNullConstant(InterpreterAssembler* assembler) {
2877 Node* accumulator = __ GetAccumulator();
2878 Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
2879 Node* index = __ BytecodeOperandIdx(0);
2880 Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
2881 __ JumpIfWordEqual(accumulator, null_value, relative_jump);
2882 }
2883
2884 // JumpIfUndefined <imm>
2885 //
2886 // Jump by number of bytes represented by an immediate operand if the object
2887 // referenced by the accumulator is the undefined constant.
2888 void Interpreter::DoJumpIfUndefined(InterpreterAssembler* assembler) {
2889 Node* accumulator = __ GetAccumulator();
2890 Node* undefined_value =
2891 __ HeapConstant(isolate_->factory()->undefined_value());
2892 Node* relative_jump = __ BytecodeOperandUImmWord(0);
2893 __ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
2894 }
2895
2896 // JumpIfUndefinedConstant <idx>
2897 //
2898 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
2899 // if the object referenced by the accumulator is the undefined constant.
2900 void Interpreter::DoJumpIfUndefinedConstant(InterpreterAssembler* assembler) {
2901 Node* accumulator = __ GetAccumulator();
2902 Node* undefined_value =
2903 __ HeapConstant(isolate_->factory()->undefined_value());
2904 Node* index = __ BytecodeOperandIdx(0);
2905 Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
2906 __ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
2907 }
2908
2909 // JumpIfJSReceiver <imm>
2910 //
2911 // Jump by number of bytes represented by an immediate operand if the object
2912 // referenced by the accumulator is a JSReceiver.
2913 void Interpreter::DoJumpIfJSReceiver(InterpreterAssembler* assembler) {
2914 Node* accumulator = __ GetAccumulator();
2915 Node* relative_jump = __ BytecodeOperandUImmWord(0);
2916
2917 Label if_object(assembler), if_notobject(assembler, Label::kDeferred),
2918 if_notsmi(assembler);
2919 __ Branch(__ TaggedIsSmi(accumulator), &if_notobject, &if_notsmi);
2920
2921 __ Bind(&if_notsmi);
2922 __ Branch(__ IsJSReceiver(accumulator), &if_object, &if_notobject);
2923 __ Bind(&if_object);
2924 __ Jump(relative_jump);
2925
2926 __ Bind(&if_notobject);
2927 __ Dispatch();
2928 }
2929
2930 // JumpIfJSReceiverConstant <idx>
2931 //
2932 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool if
2933 // the object referenced by the accumulator is a JSReceiver.
2934 void Interpreter::DoJumpIfJSReceiverConstant(InterpreterAssembler* assembler) {
2935 Node* accumulator = __ GetAccumulator();
2936 Node* index = __ BytecodeOperandIdx(0);
2937 Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
2938
2939 Label if_object(assembler), if_notobject(assembler), if_notsmi(assembler);
2940 __ Branch(__ TaggedIsSmi(accumulator), &if_notobject, &if_notsmi);
2941
2942 __ Bind(&if_notsmi);
2943 __ Branch(__ IsJSReceiver(accumulator), &if_object, &if_notobject);
2944
2945 __ Bind(&if_object);
2946 __ Jump(relative_jump);
2947
2948 __ Bind(&if_notobject);
2949 __ Dispatch();
2950 }
2951
2952 // JumpIfNotHole <imm>
2953 //
2954 // Jump by number of bytes represented by an immediate operand if the object
2955 // referenced by the accumulator is the hole.
2956 void Interpreter::DoJumpIfNotHole(InterpreterAssembler* assembler) {
2957 Node* accumulator = __ GetAccumulator();
2958 Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
2959 Node* relative_jump = __ BytecodeOperandUImmWord(0);
2960 __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
2961 }
2962
2963 // JumpIfNotHoleConstant <idx>
2964 //
2965 // Jump by number of bytes in the Smi in the |idx| entry in the constant pool
2966 // if the object referenced by the accumulator is the hole constant.
2967 void Interpreter::DoJumpIfNotHoleConstant(InterpreterAssembler* assembler) {
2968 Node* accumulator = __ GetAccumulator();
2969 Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
2970 Node* index = __ BytecodeOperandIdx(0);
2971 Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
2972 __ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
2973 }
2974
2975 // JumpLoop <imm> <loop_depth>
2976 //
2977 // Jump by number of bytes represented by the immediate operand |imm|. Also
2978 // performs a loop nesting check and potentially triggers OSR in case the
2979 // current OSR level matches (or exceeds) the specified |loop_depth|.
2980 void Interpreter::DoJumpLoop(InterpreterAssembler* assembler) {
2981 Node* relative_jump = __ BytecodeOperandUImmWord(0);
2982 Node* loop_depth = __ BytecodeOperandImm(1);
2983 Node* osr_level = __ LoadOSRNestingLevel();
2984
2985 // Check if OSR points at the given {loop_depth} are armed by comparing it to
2986 // the current {osr_level} loaded from the header of the BytecodeArray.
2987 Label ok(assembler), osr_armed(assembler, Label::kDeferred);
2988 Node* condition = __ Int32GreaterThanOrEqual(loop_depth, osr_level);
2989 __ Branch(condition, &ok, &osr_armed);
2990
2991 __ Bind(&ok);
2992 __ JumpBackward(relative_jump);
2993
2994 __ Bind(&osr_armed);
2995 {
2996 Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate_);
2997 Node* target = __ HeapConstant(callable.code());
2998 Node* context = __ GetContext();
2999 __ CallStub(callable.descriptor(), target, context);
3000 __ JumpBackward(relative_jump);
3001 }
3002 }
3003
3004 // CreateRegExpLiteral <pattern_idx> <literal_idx> <flags>
3005 //
3006 // Creates a regular expression literal for literal index <literal_idx> with
3007 // <flags> and the pattern in <pattern_idx>.
3008 void Interpreter::DoCreateRegExpLiteral(InterpreterAssembler* assembler) {
3009 Node* index = __ BytecodeOperandIdx(0);
3010 Node* pattern = __ LoadConstantPoolEntry(index);
3011 Node* literal_index = __ BytecodeOperandIdxSmi(1);
3012 Node* flags = __ SmiFromWord32(__ BytecodeOperandFlag(2));
3013 Node* closure = __ LoadRegister(Register::function_closure());
3014 Node* context = __ GetContext();
3015 ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
3016 Node* result = constructor_assembler.EmitFastCloneRegExp(
3017 closure, literal_index, pattern, flags, context);
3018 __ SetAccumulator(result);
3019 __ Dispatch();
3020 }
3021
3022 // CreateArrayLiteral <element_idx> <literal_idx> <flags>
3023 //
3024 // Creates an array literal for literal index <literal_idx> with
3025 // CreateArrayLiteral flags <flags> and constant elements in <element_idx>.
3026 void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) {
3027 Node* literal_index = __ BytecodeOperandIdxSmi(1);
3028 Node* closure = __ LoadRegister(Register::function_closure());
3029 Node* context = __ GetContext();
3030 Node* bytecode_flags = __ BytecodeOperandFlag(2);
3031
3032 Label fast_shallow_clone(assembler),
3033 call_runtime(assembler, Label::kDeferred);
3034 __ Branch(__ IsSetWord32<CreateArrayLiteralFlags::FastShallowCloneBit>(
3035 bytecode_flags),
3036 &fast_shallow_clone, &call_runtime);
3037
3038 __ Bind(&fast_shallow_clone);
3039 {
3040 ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
3041 Node* result = constructor_assembler.EmitFastCloneShallowArray(
3042 closure, literal_index, context, &call_runtime, TRACK_ALLOCATION_SITE);
3043 __ SetAccumulator(result);
3044 __ Dispatch();
3045 }
3046
3047 __ Bind(&call_runtime);
3048 {
3049 Node* flags_raw =
3050 __ DecodeWordFromWord32<CreateArrayLiteralFlags::FlagsBits>(
3051 bytecode_flags);
3052 Node* flags = __ SmiTag(flags_raw);
3053 Node* index = __ BytecodeOperandIdx(0);
3054 Node* constant_elements = __ LoadConstantPoolEntry(index);
3055 Node* result =
3056 __ CallRuntime(Runtime::kCreateArrayLiteral, context, closure,
3057 literal_index, constant_elements, flags);
3058 __ SetAccumulator(result);
3059 __ Dispatch();
3060 }
3061 }
3062
3063 // CreateObjectLiteral <element_idx> <literal_idx> <flags>
3064 //
3065 // Creates an object literal for literal index <literal_idx> with
3066 // CreateObjectLiteralFlags <flags> and constant elements in <element_idx>.
3067 void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
3068 Node* literal_index = __ BytecodeOperandIdxSmi(1);
3069 Node* bytecode_flags = __ BytecodeOperandFlag(2);
3070 Node* closure = __ LoadRegister(Register::function_closure());
3071
3072 // Check if we can do a fast clone or have to call the runtime.
3073 Label if_fast_clone(assembler),
3074 if_not_fast_clone(assembler, Label::kDeferred);
3075 Node* fast_clone_properties_count = __ DecodeWordFromWord32<
3076 CreateObjectLiteralFlags::FastClonePropertiesCountBits>(bytecode_flags);
3077 __ Branch(__ WordNotEqual(fast_clone_properties_count, __ IntPtrConstant(0)),
3078 &if_fast_clone, &if_not_fast_clone);
3079
3080 __ Bind(&if_fast_clone);
3081 {
3082 // If we can do a fast clone do the fast-path in FastCloneShallowObjectStub.
3083 ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
3084 Node* result = constructor_assembler.EmitFastCloneShallowObject(
3085 &if_not_fast_clone, closure, literal_index,
3086 fast_clone_properties_count);
3087 __ StoreRegister(result, __ BytecodeOperandReg(3));
3088 __ Dispatch();
3089 }
3090
3091 __ Bind(&if_not_fast_clone);
3092 {
3093 // If we can't do a fast clone, call into the runtime.
3094 Node* index = __ BytecodeOperandIdx(0);
3095 Node* constant_elements = __ LoadConstantPoolEntry(index);
3096 Node* context = __ GetContext();
3097
3098 Node* flags_raw =
3099 __ DecodeWordFromWord32<CreateObjectLiteralFlags::FlagsBits>(
3100 bytecode_flags);
3101 Node* flags = __ SmiTag(flags_raw);
3102
3103 Node* result =
3104 __ CallRuntime(Runtime::kCreateObjectLiteral, context, closure,
3105 literal_index, constant_elements, flags);
3106 __ StoreRegister(result, __ BytecodeOperandReg(3));
3107 // TODO(klaasb) build a single dispatch once the call is inlined
3108 __ Dispatch();
3109 }
3110 }
3111
3112 // CreateClosure <index> <slot> <tenured>
3113 //
3114 // Creates a new closure for SharedFunctionInfo at position |index| in the
3115 // constant pool and with the PretenureFlag <tenured>.
3116 void Interpreter::DoCreateClosure(InterpreterAssembler* assembler) {
3117 Node* index = __ BytecodeOperandIdx(0);
3118 Node* shared = __ LoadConstantPoolEntry(index);
3119 Node* flags = __ BytecodeOperandFlag(2);
3120 Node* context = __ GetContext();
3121
3122 Label call_runtime(assembler, Label::kDeferred);
3123 __ GotoIfNot(__ IsSetWord32<CreateClosureFlags::FastNewClosureBit>(flags),
3124 &call_runtime);
3125 ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
3126 Node* vector_index = __ BytecodeOperandIdx(1);
3127 vector_index = __ SmiTag(vector_index);
3128 Node* feedback_vector = __ LoadFeedbackVector();
3129 __ SetAccumulator(constructor_assembler.EmitFastNewClosure(
3130 shared, feedback_vector, vector_index, context));
3131 __ Dispatch();
3132
3133 __ Bind(&call_runtime);
3134 {
3135 Node* tenured_raw =
3136 __ DecodeWordFromWord32<CreateClosureFlags::PretenuredBit>(flags);
3137 Node* tenured = __ SmiTag(tenured_raw);
3138 feedback_vector = __ LoadFeedbackVector();
3139 vector_index = __ BytecodeOperandIdx(1);
3140 vector_index = __ SmiTag(vector_index);
3141 Node* result =
3142 __ CallRuntime(Runtime::kInterpreterNewClosure, context, shared,
3143 feedback_vector, vector_index, tenured);
3144 __ SetAccumulator(result);
3145 __ Dispatch();
3146 }
3147 }
3148
3149 // CreateBlockContext <index>
3150 //
3151 // Creates a new block context with the scope info constant at |index| and the
3152 // closure in the accumulator.
3153 void Interpreter::DoCreateBlockContext(InterpreterAssembler* assembler) {
3154 Node* index = __ BytecodeOperandIdx(0);
3155 Node* scope_info = __ LoadConstantPoolEntry(index);
3156 Node* closure = __ GetAccumulator();
3157 Node* context = __ GetContext();
3158 __ SetAccumulator(
3159 __ CallRuntime(Runtime::kPushBlockContext, context, scope_info, closure));
3160 __ Dispatch();
3161 }
3162
3163 // CreateCatchContext <exception> <name_idx> <scope_info_idx>
3164 //
3165 // Creates a new context for a catch block with the |exception| in a register,
3166 // the variable name at |name_idx|, the ScopeInfo at |scope_info_idx|, and the
3167 // closure in the accumulator.
3168 void Interpreter::DoCreateCatchContext(InterpreterAssembler* assembler) {
3169 Node* exception_reg = __ BytecodeOperandReg(0);
3170 Node* exception = __ LoadRegister(exception_reg);
3171 Node* name_idx = __ BytecodeOperandIdx(1);
3172 Node* name = __ LoadConstantPoolEntry(name_idx);
3173 Node* scope_info_idx = __ BytecodeOperandIdx(2);
3174 Node* scope_info = __ LoadConstantPoolEntry(scope_info_idx);
3175 Node* closure = __ GetAccumulator();
3176 Node* context = __ GetContext();
3177 __ SetAccumulator(__ CallRuntime(Runtime::kPushCatchContext, context, name,
3178 exception, scope_info, closure));
3179 __ Dispatch();
3180 }
3181
3182 // CreateFunctionContext <slots>
3183 //
3184 // Creates a new context with number of |slots| for the function closure.
3185 void Interpreter::DoCreateFunctionContext(InterpreterAssembler* assembler) {
3186 Node* closure = __ LoadRegister(Register::function_closure());
3187 Node* slots = __ BytecodeOperandUImm(0);
3188 Node* context = __ GetContext();
3189 ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
3190 __ SetAccumulator(constructor_assembler.EmitFastNewFunctionContext(
3191 closure, slots, context, FUNCTION_SCOPE));
3192 __ Dispatch();
3193 }
3194
3195 // CreateEvalContext <slots>
3196 //
3197 // Creates a new context with number of |slots| for an eval closure.
3198 void Interpreter::DoCreateEvalContext(InterpreterAssembler* assembler) {
3199 Node* closure = __ LoadRegister(Register::function_closure());
3200 Node* slots = __ BytecodeOperandUImm(0);
3201 Node* context = __ GetContext();
3202 ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
3203 __ SetAccumulator(constructor_assembler.EmitFastNewFunctionContext(
3204 closure, slots, context, EVAL_SCOPE));
3205 __ Dispatch();
3206 }
3207
3208 // CreateWithContext <register> <scope_info_idx>
3209 //
3210 // Creates a new context with the ScopeInfo at |scope_info_idx| for a
3211 // with-statement with the object in |register| and the closure in the
3212 // accumulator.
3213 void Interpreter::DoCreateWithContext(InterpreterAssembler* assembler) {
3214 Node* reg_index = __ BytecodeOperandReg(0);
3215 Node* object = __ LoadRegister(reg_index);
3216 Node* scope_info_idx = __ BytecodeOperandIdx(1);
3217 Node* scope_info = __ LoadConstantPoolEntry(scope_info_idx);
3218 Node* closure = __ GetAccumulator();
3219 Node* context = __ GetContext();
3220 __ SetAccumulator(__ CallRuntime(Runtime::kPushWithContext, context, object,
3221 scope_info, closure));
3222 __ Dispatch();
3223 }
3224
3225 // CreateMappedArguments
3226 //
3227 // Creates a new mapped arguments object.
3228 void Interpreter::DoCreateMappedArguments(InterpreterAssembler* assembler) {
3229 Node* closure = __ LoadRegister(Register::function_closure());
3230 Node* context = __ GetContext();
3231
3232 Label if_duplicate_parameters(assembler, Label::kDeferred);
3233 Label if_not_duplicate_parameters(assembler);
3234
3235 // Check if function has duplicate parameters.
3236 // TODO(rmcilroy): Remove this check when FastNewSloppyArgumentsStub supports
3237 // duplicate parameters.
3238 Node* shared_info =
3239 __ LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset);
3240 Node* compiler_hints = __ LoadObjectField(
3241 shared_info, SharedFunctionInfo::kHasDuplicateParametersByteOffset,
3242 MachineType::Uint8());
3243 Node* duplicate_parameters_bit = __ Int32Constant(
3244 1 << SharedFunctionInfo::kHasDuplicateParametersBitWithinByte);
3245 Node* compare = __ Word32And(compiler_hints, duplicate_parameters_bit);
3246 __ Branch(compare, &if_duplicate_parameters, &if_not_duplicate_parameters);
3247
3248 __ Bind(&if_not_duplicate_parameters);
3249 {
3250 ArgumentsBuiltinsAssembler constructor_assembler(assembler->state());
3251 Node* result =
3252 constructor_assembler.EmitFastNewSloppyArguments(context, closure);
3253 __ SetAccumulator(result);
3254 __ Dispatch();
3255 }
3256
3257 __ Bind(&if_duplicate_parameters);
3258 {
3259 Node* result =
3260 __ CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure);
3261 __ SetAccumulator(result);
3262 __ Dispatch();
3263 }
3264 }
3265
3266 // CreateUnmappedArguments
3267 //
3268 // Creates a new unmapped arguments object.
3269 void Interpreter::DoCreateUnmappedArguments(InterpreterAssembler* assembler) {
3270 Node* context = __ GetContext();
3271 Node* closure = __ LoadRegister(Register::function_closure());
3272 ArgumentsBuiltinsAssembler builtins_assembler(assembler->state());
3273 Node* result =
3274 builtins_assembler.EmitFastNewStrictArguments(context, closure);
3275 __ SetAccumulator(result);
3276 __ Dispatch();
3277 }
3278
3279 // CreateRestParameter
3280 //
3281 // Creates a new rest parameter array.
3282 void Interpreter::DoCreateRestParameter(InterpreterAssembler* assembler) {
3283 Node* closure = __ LoadRegister(Register::function_closure());
3284 Node* context = __ GetContext();
3285 ArgumentsBuiltinsAssembler builtins_assembler(assembler->state());
3286 Node* result = builtins_assembler.EmitFastNewRestParameter(context, closure);
3287 __ SetAccumulator(result);
3288 __ Dispatch();
3289 }
3290
3291 // StackCheck
3292 //
3293 // Performs a stack guard check.
3294 void Interpreter::DoStackCheck(InterpreterAssembler* assembler) {
3295 Label ok(assembler), stack_check_interrupt(assembler, Label::kDeferred);
3296
3297 Node* interrupt = __ StackCheckTriggeredInterrupt();
3298 __ Branch(interrupt, &stack_check_interrupt, &ok);
3299
3300 __ Bind(&ok);
3301 __ Dispatch();
3302
3303 __ Bind(&stack_check_interrupt);
3304 {
3305 Node* context = __ GetContext();
3306 __ CallRuntime(Runtime::kStackGuard, context);
3307 __ Dispatch();
3308 }
3309 }
3310
3311 // SetPendingMessage
3312 //
3313 // Sets the pending message to the value in the accumulator, and returns the
3314 // previous pending message in the accumulator.
3315 void Interpreter::DoSetPendingMessage(InterpreterAssembler* assembler) {
3316 Node* pending_message = __ ExternalConstant(
3317 ExternalReference::address_of_pending_message_obj(isolate_));
3318 Node* previous_message =
3319 __ Load(MachineType::TaggedPointer(), pending_message);
3320 Node* new_message = __ GetAccumulator();
3321 __ StoreNoWriteBarrier(MachineRepresentation::kTaggedPointer, pending_message,
3322 new_message);
3323 __ SetAccumulator(previous_message);
3324 __ Dispatch();
3325 }
3326
3327 // Throw
3328 //
3329 // Throws the exception in the accumulator.
3330 void Interpreter::DoThrow(InterpreterAssembler* assembler) {
3331 Node* exception = __ GetAccumulator();
3332 Node* context = __ GetContext();
3333 __ CallRuntime(Runtime::kThrow, context, exception);
3334 // We shouldn't ever return from a throw.
3335 __ Abort(kUnexpectedReturnFromThrow);
3336 }
3337
3338 // ReThrow
3339 //
3340 // Re-throws the exception in the accumulator.
3341 void Interpreter::DoReThrow(InterpreterAssembler* assembler) {
3342 Node* exception = __ GetAccumulator();
3343 Node* context = __ GetContext();
3344 __ CallRuntime(Runtime::kReThrow, context, exception);
3345 // We shouldn't ever return from a throw.
3346 __ Abort(kUnexpectedReturnFromThrow);
3347 }
3348
3349 // Return
3350 //
3351 // Return the value in the accumulator.
3352 void Interpreter::DoReturn(InterpreterAssembler* assembler) {
3353 __ UpdateInterruptBudgetOnReturn();
3354 Node* accumulator = __ GetAccumulator();
3355 __ Return(accumulator);
3356 }
3357
3358 // Debugger
3359 //
3360 // Call runtime to handle debugger statement.
3361 void Interpreter::DoDebugger(InterpreterAssembler* assembler) {
3362 Node* context = __ GetContext();
3363 __ CallStub(CodeFactory::HandleDebuggerStatement(isolate_), context);
3364 __ Dispatch();
3365 }
3366
3367 // DebugBreak
3368 //
3369 // Call runtime to handle a debug break.
3370 #define DEBUG_BREAK(Name, ...) \
3371 void Interpreter::Do##Name(InterpreterAssembler* assembler) { \
3372 Node* context = __ GetContext(); \
3373 Node* accumulator = __ GetAccumulator(); \
3374 Node* original_handler = \
3375 __ CallRuntime(Runtime::kDebugBreakOnBytecode, context, accumulator); \
3376 __ MaybeDropFrames(context); \
3377 __ DispatchToBytecodeHandler(original_handler); \
3378 }
3379 DEBUG_BREAK_BYTECODE_LIST(DEBUG_BREAK);
3380 #undef DEBUG_BREAK
3381
3382 void Interpreter::BuildForInPrepareResult(Node* output_register,
3383 Node* cache_type, Node* cache_array,
3384 Node* cache_length,
3385 InterpreterAssembler* assembler) {
3386 __ StoreRegister(cache_type, output_register);
3387 output_register = __ NextRegister(output_register);
3388 __ StoreRegister(cache_array, output_register);
3389 output_register = __ NextRegister(output_register);
3390 __ StoreRegister(cache_length, output_register);
3391 }
3392
3393 // ForInPrepare <receiver> <cache_info_triple>
3394 //
3395 // Returns state for for..in loop execution based on the object in the register
3396 // |receiver|. The object must not be null or undefined and must have been
3397 // converted to a receiver already.
3398 // The result is output in registers |cache_info_triple| to
3399 // |cache_info_triple + 2|, with the registers holding cache_type, cache_array,
3400 // and cache_length respectively.
3401 void Interpreter::DoForInPrepare(InterpreterAssembler* assembler) {
3402 Node* object_register = __ BytecodeOperandReg(0);
3403 Node* output_register = __ BytecodeOperandReg(1);
3404 Node* receiver = __ LoadRegister(object_register);
3405 Node* context = __ GetContext();
3406
3407 Node* cache_type;
3408 Node* cache_array;
3409 Node* cache_length;
3410 Label call_runtime(assembler, Label::kDeferred),
3411 nothing_to_iterate(assembler, Label::kDeferred);
3412
3413 ForInBuiltinsAssembler forin_assembler(assembler->state());
3414 std::tie(cache_type, cache_array, cache_length) =
3415 forin_assembler.EmitForInPrepare(receiver, context, &call_runtime,
3416 &nothing_to_iterate);
3417
3418 BuildForInPrepareResult(output_register, cache_type, cache_array,
3419 cache_length, assembler);
3420 __ Dispatch();
3421
3422 __ Bind(&call_runtime);
3423 {
3424 Node* result_triple =
3425 __ CallRuntime(Runtime::kForInPrepare, context, receiver);
3426 Node* cache_type = __ Projection(0, result_triple);
3427 Node* cache_array = __ Projection(1, result_triple);
3428 Node* cache_length = __ Projection(2, result_triple);
3429 BuildForInPrepareResult(output_register, cache_type, cache_array,
3430 cache_length, assembler);
3431 __ Dispatch();
3432 }
3433 __ Bind(&nothing_to_iterate);
3434 {
3435 // Receiver is null or undefined or descriptors are zero length.
3436 Node* zero = __ SmiConstant(0);
3437 BuildForInPrepareResult(output_register, zero, zero, zero, assembler);
3438 __ Dispatch();
3439 }
3440 }
3441
3442 // ForInNext <receiver> <index> <cache_info_pair>
3443 //
3444 // Returns the next enumerable property in the the accumulator.
3445 void Interpreter::DoForInNext(InterpreterAssembler* assembler) {
3446 Node* receiver_reg = __ BytecodeOperandReg(0);
3447 Node* receiver = __ LoadRegister(receiver_reg);
3448 Node* index_reg = __ BytecodeOperandReg(1);
3449 Node* index = __ LoadRegister(index_reg);
3450 Node* cache_type_reg = __ BytecodeOperandReg(2);
3451 Node* cache_type = __ LoadRegister(cache_type_reg);
3452 Node* cache_array_reg = __ NextRegister(cache_type_reg);
3453 Node* cache_array = __ LoadRegister(cache_array_reg);
3454
3455 // Load the next key from the enumeration array.
3456 Node* key = __ LoadFixedArrayElement(cache_array, index, 0,
3457 CodeStubAssembler::SMI_PARAMETERS);
3458
3459 // Check if we can use the for-in fast path potentially using the enum cache.
3460 Label if_fast(assembler), if_slow(assembler, Label::kDeferred);
3461 Node* receiver_map = __ LoadMap(receiver);
3462 __ Branch(__ WordEqual(receiver_map, cache_type), &if_fast, &if_slow);
3463 __ Bind(&if_fast);
3464 {
3465 // Enum cache in use for {receiver}, the {key} is definitely valid.
3466 __ SetAccumulator(key);
3467 __ Dispatch();
3468 }
3469 __ Bind(&if_slow);
3470 {
3471 // Record the fact that we hit the for-in slow path.
3472 Node* vector_index = __ BytecodeOperandIdx(3);
3473 Node* feedback_vector = __ LoadFeedbackVector();
3474 Node* megamorphic_sentinel =
3475 __ HeapConstant(FeedbackVector::MegamorphicSentinel(isolate_));
3476 __ StoreFixedArrayElement(feedback_vector, vector_index,
3477 megamorphic_sentinel, SKIP_WRITE_BARRIER);
3478
3479 // Need to filter the {key} for the {receiver}.
3480 Node* context = __ GetContext();
3481 Callable callable = CodeFactory::ForInFilter(assembler->isolate());
3482 Node* result = __ CallStub(callable, context, key, receiver);
3483 __ SetAccumulator(result);
3484 __ Dispatch();
3485 }
3486 }
3487
3488 // ForInContinue <index> <cache_length>
3489 //
3490 // Returns false if the end of the enumerable properties has been reached.
3491 void Interpreter::DoForInContinue(InterpreterAssembler* assembler) {
3492 Node* index_reg = __ BytecodeOperandReg(0);
3493 Node* index = __ LoadRegister(index_reg);
3494 Node* cache_length_reg = __ BytecodeOperandReg(1);
3495 Node* cache_length = __ LoadRegister(cache_length_reg);
3496
3497 // Check if {index} is at {cache_length} already.
3498 Label if_true(assembler), if_false(assembler), end(assembler);
3499 __ Branch(__ WordEqual(index, cache_length), &if_true, &if_false);
3500 __ Bind(&if_true);
3501 {
3502 __ SetAccumulator(__ BooleanConstant(false));
3503 __ Goto(&end);
3504 }
3505 __ Bind(&if_false);
3506 {
3507 __ SetAccumulator(__ BooleanConstant(true));
3508 __ Goto(&end);
3509 }
3510 __ Bind(&end);
3511 __ Dispatch();
3512 }
3513
3514 // ForInStep <index>
3515 //
3516 // Increments the loop counter in register |index| and stores the result
3517 // in the accumulator.
3518 void Interpreter::DoForInStep(InterpreterAssembler* assembler) {
3519 Node* index_reg = __ BytecodeOperandReg(0);
3520 Node* index = __ LoadRegister(index_reg);
3521 Node* one = __ SmiConstant(Smi::FromInt(1));
3522 Node* result = __ SmiAdd(index, one);
3523 __ SetAccumulator(result);
3524 __ Dispatch();
3525 }
3526
3527 // Wide
3528 //
3529 // Prefix bytecode indicating next bytecode has wide (16-bit) operands.
3530 void Interpreter::DoWide(InterpreterAssembler* assembler) {
3531 __ DispatchWide(OperandScale::kDouble);
3532 }
3533
3534 // ExtraWide
3535 //
3536 // Prefix bytecode indicating next bytecode has extra-wide (32-bit) operands.
3537 void Interpreter::DoExtraWide(InterpreterAssembler* assembler) {
3538 __ DispatchWide(OperandScale::kQuadruple);
3539 }
3540
3541 // Illegal
3542 //
3543 // An invalid bytecode aborting execution if dispatched.
3544 void Interpreter::DoIllegal(InterpreterAssembler* assembler) {
3545 __ Abort(kInvalidBytecode);
3546 }
3547
3548 // Nop
3549 //
3550 // No operation.
3551 void Interpreter::DoNop(InterpreterAssembler* assembler) { __ Dispatch(); }
3552
3553 // SuspendGenerator <generator>
3554 //
3555 // Exports the register file and stores it into the generator. Also stores the
3556 // current context, the state given in the accumulator, and the current bytecode
3557 // offset (for debugging purposes) into the generator.
3558 void Interpreter::DoSuspendGenerator(InterpreterAssembler* assembler) {
3559 Node* generator_reg = __ BytecodeOperandReg(0);
3560 Node* generator = __ LoadRegister(generator_reg);
3561
3562 Label if_stepping(assembler, Label::kDeferred), ok(assembler);
3563 Node* step_action_address = __ ExternalConstant(
3564 ExternalReference::debug_last_step_action_address(isolate_));
3565 Node* step_action = __ Load(MachineType::Int8(), step_action_address);
3566 STATIC_ASSERT(StepIn > StepNext);
3567 STATIC_ASSERT(LastStepAction == StepIn);
3568 Node* step_next = __ Int32Constant(StepNext);
3569 __ Branch(__ Int32LessThanOrEqual(step_next, step_action), &if_stepping, &ok);
3570 __ Bind(&ok);
3571
3572 Node* array =
3573 __ LoadObjectField(generator, JSGeneratorObject::kRegisterFileOffset);
3574 Node* context = __ GetContext();
3575 Node* state = __ GetAccumulator();
3576
3577 __ ExportRegisterFile(array);
3578 __ StoreObjectField(generator, JSGeneratorObject::kContextOffset, context);
3579 __ StoreObjectField(generator, JSGeneratorObject::kContinuationOffset, state);
3580
3581 Node* offset = __ SmiTag(__ BytecodeOffset());
3582 __ StoreObjectField(generator, JSGeneratorObject::kInputOrDebugPosOffset,
3583 offset);
3584
3585 __ Dispatch();
3586
3587 __ Bind(&if_stepping);
3588 {
3589 Node* context = __ GetContext();
3590 __ CallRuntime(Runtime::kDebugRecordGenerator, context, generator);
3591 __ Goto(&ok);
3592 }
3593 }
3594
3595 // ResumeGenerator <generator>
3596 //
3597 // Imports the register file stored in the generator. Also loads the
3598 // generator's state and stores it in the accumulator, before overwriting it
3599 // with kGeneratorExecuting.
3600 void Interpreter::DoResumeGenerator(InterpreterAssembler* assembler) {
3601 Node* generator_reg = __ BytecodeOperandReg(0);
3602 Node* generator = __ LoadRegister(generator_reg);
3603
3604 __ ImportRegisterFile(
3605 __ LoadObjectField(generator, JSGeneratorObject::kRegisterFileOffset));
3606
3607 Node* old_state =
3608 __ LoadObjectField(generator, JSGeneratorObject::kContinuationOffset);
3609 Node* new_state = __ Int32Constant(JSGeneratorObject::kGeneratorExecuting);
3610 __ StoreObjectField(generator, JSGeneratorObject::kContinuationOffset,
3611 __ SmiTag(new_state));
3612 __ SetAccumulator(old_state);
3613
3614 __ Dispatch();
3615 }
3616
3617 } // namespace interpreter 370 } // namespace interpreter
3618 } // namespace internal 371 } // namespace internal
3619 } // namespace v8 372 } // namespace v8
OLDNEW
« no previous file with comments | « src/interpreter/interpreter.h ('k') | src/interpreter/interpreter-generator.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698