Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/compiler/arm64/instruction-selector-arm64.cc

Issue 1176393002: Revert of [arm64][turbofan]: Handle any immediate shift. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/compiler/instruction-selector-impl.h" 5 #include "src/compiler/instruction-selector-impl.h"
6 #include "src/compiler/node-matchers.h" 6 #include "src/compiler/node-matchers.h"
7 #include "src/compiler/node-properties.h" 7 #include "src/compiler/node-properties.h"
8 8
9 namespace v8 { 9 namespace v8 {
10 namespace internal { 10 namespace internal {
(...skipping 19 matching lines...) Expand all
30 explicit Arm64OperandGenerator(InstructionSelector* selector) 30 explicit Arm64OperandGenerator(InstructionSelector* selector)
31 : OperandGenerator(selector) {} 31 : OperandGenerator(selector) {}
32 32
33 InstructionOperand UseOperand(Node* node, ImmediateMode mode) { 33 InstructionOperand UseOperand(Node* node, ImmediateMode mode) {
34 if (CanBeImmediate(node, mode)) { 34 if (CanBeImmediate(node, mode)) {
35 return UseImmediate(node); 35 return UseImmediate(node);
36 } 36 }
37 return UseRegister(node); 37 return UseRegister(node);
38 } 38 }
39 39
40 // Use the provided node if it has the required value, or create a
41 // TempImmediate otherwise.
42 InstructionOperand UseImmediateOrTemp(Node* node, int32_t value) {
43 if (GetIntegerConstantValue(node) == value) {
44 return UseImmediate(node);
45 }
46 return TempImmediate(value);
47 }
48
49 bool IsIntegerConstant(Node* node) {
50 return (node->opcode() == IrOpcode::kInt32Constant) ||
51 (node->opcode() == IrOpcode::kInt64Constant);
52 }
53
54 int64_t GetIntegerConstantValue(Node* node) {
55 if (node->opcode() == IrOpcode::kInt32Constant) {
56 return OpParameter<int32_t>(node);
57 }
58 DCHECK(node->opcode() == IrOpcode::kInt64Constant);
59 return OpParameter<int64_t>(node);
60 }
61
62 bool CanBeImmediate(Node* node, ImmediateMode mode) { 40 bool CanBeImmediate(Node* node, ImmediateMode mode) {
63 return IsIntegerConstant(node) && 41 int64_t value;
64 CanBeImmediate(GetIntegerConstantValue(node), mode); 42 if (node->opcode() == IrOpcode::kInt32Constant)
43 value = OpParameter<int32_t>(node);
44 else if (node->opcode() == IrOpcode::kInt64Constant)
45 value = OpParameter<int64_t>(node);
46 else
47 return false;
48 return CanBeImmediate(value, mode);
65 } 49 }
66 50
67 bool CanBeImmediate(int64_t value, ImmediateMode mode) { 51 bool CanBeImmediate(int64_t value, ImmediateMode mode) {
68 unsigned ignored; 52 unsigned ignored;
69 switch (mode) { 53 switch (mode) {
70 case kLogical32Imm: 54 case kLogical32Imm:
71 // TODO(dcarney): some unencodable values can be handled by 55 // TODO(dcarney): some unencodable values can be handled by
72 // switching instructions. 56 // switching instructions.
73 return Assembler::IsImmLogical(static_cast<uint64_t>(value), 32, 57 return Assembler::IsImmLogical(static_cast<uint64_t>(value), 32,
74 &ignored, &ignored, &ignored); 58 &ignored, &ignored, &ignored);
75 case kLogical64Imm: 59 case kLogical64Imm:
76 return Assembler::IsImmLogical(static_cast<uint64_t>(value), 64, 60 return Assembler::IsImmLogical(static_cast<uint64_t>(value), 64,
77 &ignored, &ignored, &ignored); 61 &ignored, &ignored, &ignored);
78 case kArithmeticImm: 62 case kArithmeticImm:
79 return Assembler::IsImmAddSub(value); 63 return Assembler::IsImmAddSub(value);
64 case kShift32Imm:
65 return 0 <= value && value < 32;
66 case kShift64Imm:
67 return 0 <= value && value < 64;
80 case kLoadStoreImm8: 68 case kLoadStoreImm8:
81 return IsLoadStoreImmediate(value, LSByte); 69 return IsLoadStoreImmediate(value, LSByte);
82 case kLoadStoreImm16: 70 case kLoadStoreImm16:
83 return IsLoadStoreImmediate(value, LSHalfword); 71 return IsLoadStoreImmediate(value, LSHalfword);
84 case kLoadStoreImm32: 72 case kLoadStoreImm32:
85 return IsLoadStoreImmediate(value, LSWord); 73 return IsLoadStoreImmediate(value, LSWord);
86 case kLoadStoreImm64: 74 case kLoadStoreImm64:
87 return IsLoadStoreImmediate(value, LSDoubleWord); 75 return IsLoadStoreImmediate(value, LSDoubleWord);
88 case kNoImmediate: 76 case kNoImmediate:
89 return false; 77 return false;
90 case kShift32Imm: // Fall through.
91 case kShift64Imm:
92 // Shift operations only observe the bottom 5 or 6 bits of the value.
93 // All possible shifts can be encoded by discarding bits which have no
94 // effect.
95 return true;
96 } 78 }
97 return false; 79 return false;
98 } 80 }
99 81
100 private: 82 private:
101 bool IsLoadStoreImmediate(int64_t value, LSDataSize size) { 83 bool IsLoadStoreImmediate(int64_t value, LSDataSize size) {
102 return Assembler::IsImmLSScaled(value, size) || 84 return Assembler::IsImmLSScaled(value, size) ||
103 Assembler::IsImmLSUnscaled(value); 85 Assembler::IsImmLSUnscaled(value);
104 } 86 }
105 }; 87 };
(...skipping 18 matching lines...) Expand all
124 106
125 void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node, 107 void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
126 ImmediateMode operand_mode) { 108 ImmediateMode operand_mode) {
127 Arm64OperandGenerator g(selector); 109 Arm64OperandGenerator g(selector);
128 selector->Emit(opcode, g.DefineAsRegister(node), 110 selector->Emit(opcode, g.DefineAsRegister(node),
129 g.UseRegister(node->InputAt(0)), 111 g.UseRegister(node->InputAt(0)),
130 g.UseOperand(node->InputAt(1), operand_mode)); 112 g.UseOperand(node->InputAt(1), operand_mode));
131 } 113 }
132 114
133 115
116 template <typename Matcher>
117 bool TryMatchShift(InstructionSelector* selector, Node* node,
118 InstructionCode* opcode, IrOpcode::Value shift_opcode,
119 ImmediateMode imm_mode, AddressingMode addressing_mode) {
120 if (node->opcode() != shift_opcode) return false;
121 Arm64OperandGenerator g(selector);
122 Matcher m(node);
123 if (g.CanBeImmediate(m.right().node(), imm_mode)) {
124 *opcode |= AddressingModeField::encode(addressing_mode);
125 return true;
126 }
127 return false;
128 }
129
130
134 bool TryMatchAnyShift(InstructionSelector* selector, Node* node, 131 bool TryMatchAnyShift(InstructionSelector* selector, Node* node,
135 InstructionCode* opcode, bool try_ror) { 132 InstructionCode* opcode, bool try_ror) {
136 Arm64OperandGenerator g(selector); 133 return TryMatchShift<Int32BinopMatcher>(selector, node, opcode,
137 134 IrOpcode::kWord32Shl, kShift32Imm,
138 if (node->InputCount() != 2) return false; 135 kMode_Operand2_R_LSL_I) ||
139 if (!g.IsIntegerConstant(node->InputAt(1))) return false; 136 TryMatchShift<Int32BinopMatcher>(selector, node, opcode,
140 137 IrOpcode::kWord32Shr, kShift32Imm,
141 switch (node->opcode()) { 138 kMode_Operand2_R_LSR_I) ||
142 case IrOpcode::kWord32Shl: 139 TryMatchShift<Int32BinopMatcher>(selector, node, opcode,
143 case IrOpcode::kWord64Shl: 140 IrOpcode::kWord32Sar, kShift32Imm,
144 *opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I); 141 kMode_Operand2_R_ASR_I) ||
145 return true; 142 (try_ror && TryMatchShift<Int32BinopMatcher>(
146 case IrOpcode::kWord32Shr: 143 selector, node, opcode, IrOpcode::kWord32Ror,
147 case IrOpcode::kWord64Shr: 144 kShift32Imm, kMode_Operand2_R_ROR_I)) ||
148 *opcode |= AddressingModeField::encode(kMode_Operand2_R_LSR_I); 145 TryMatchShift<Int64BinopMatcher>(selector, node, opcode,
149 return true; 146 IrOpcode::kWord64Shl, kShift64Imm,
150 case IrOpcode::kWord32Sar: 147 kMode_Operand2_R_LSL_I) ||
151 case IrOpcode::kWord64Sar: 148 TryMatchShift<Int64BinopMatcher>(selector, node, opcode,
152 *opcode |= AddressingModeField::encode(kMode_Operand2_R_ASR_I); 149 IrOpcode::kWord64Shr, kShift64Imm,
153 return true; 150 kMode_Operand2_R_LSR_I) ||
154 case IrOpcode::kWord32Ror: 151 TryMatchShift<Int64BinopMatcher>(selector, node, opcode,
155 case IrOpcode::kWord64Ror: 152 IrOpcode::kWord64Sar, kShift64Imm,
156 if (try_ror) { 153 kMode_Operand2_R_ASR_I) ||
157 *opcode |= AddressingModeField::encode(kMode_Operand2_R_ROR_I); 154 (try_ror && TryMatchShift<Int64BinopMatcher>(
158 return true; 155 selector, node, opcode, IrOpcode::kWord64Ror,
159 } 156 kShift64Imm, kMode_Operand2_R_ROR_I));
160 return false;
161 default:
162 return false;
163 }
164 } 157 }
165 158
166 159
167 bool TryMatchAnyExtend(Arm64OperandGenerator* g, InstructionSelector* selector, 160 bool TryMatchAnyExtend(Arm64OperandGenerator* g, InstructionSelector* selector,
168 Node* left_node, Node* right_node, 161 Node* left_node, Node* right_node,
169 InstructionOperand* left_op, 162 InstructionOperand* left_op,
170 InstructionOperand* right_op, InstructionCode* opcode) { 163 InstructionOperand* right_op, InstructionCode* opcode) {
171 NodeMatcher nm(right_node); 164 NodeMatcher nm(right_node);
172 165
173 if (nm.IsWord32And()) { 166 if (nm.IsWord32And()) {
(...skipping 374 matching lines...) Expand 10 before | Expand all | Expand 10 after
548 uint32_t mask = m.right().Value(); 541 uint32_t mask = m.right().Value();
549 uint32_t mask_width = base::bits::CountPopulation32(mask); 542 uint32_t mask_width = base::bits::CountPopulation32(mask);
550 uint32_t mask_msb = base::bits::CountLeadingZeros32(mask); 543 uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
551 if ((mask_width != 0) && (mask_msb + mask_width == 32)) { 544 if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
552 // The mask must be contiguous, and occupy the least-significant bits. 545 // The mask must be contiguous, and occupy the least-significant bits.
553 DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask)); 546 DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
554 547
555 // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least 548 // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
556 // significant bits. 549 // significant bits.
557 Int32BinopMatcher mleft(m.left().node()); 550 Int32BinopMatcher mleft(m.left().node());
558 if (mleft.right().HasValue()) { 551 if (mleft.right().IsInRange(0, 31)) {
559 // Any shift value can match; int32 shifts use `value % 32`.
560 uint32_t lsb = mleft.right().Value() & 0x1f;
561
562 // Ubfx cannot extract bits past the register size, however since 552 // Ubfx cannot extract bits past the register size, however since
563 // shifting the original value would have introduced some zeros we can 553 // shifting the original value would have introduced some zeros we can
564 // still use ubfx with a smaller mask and the remaining bits will be 554 // still use ubfx with a smaller mask and the remaining bits will be
565 // zeros. 555 // zeros.
556 uint32_t lsb = mleft.right().Value();
566 if (lsb + mask_width > 32) mask_width = 32 - lsb; 557 if (lsb + mask_width > 32) mask_width = 32 - lsb;
567 558
568 Emit(kArm64Ubfx32, g.DefineAsRegister(node), 559 Emit(kArm64Ubfx32, g.DefineAsRegister(node),
569 g.UseRegister(mleft.left().node()), 560 g.UseRegister(mleft.left().node()),
570 g.UseImmediateOrTemp(mleft.right().node(), lsb), 561 g.UseImmediate(mleft.right().node()), g.TempImmediate(mask_width));
571 g.TempImmediate(mask_width));
572 return; 562 return;
573 } 563 }
574 // Other cases fall through to the normal And operation. 564 // Other cases fall through to the normal And operation.
575 } 565 }
576 } 566 }
577 VisitLogical<Int32BinopMatcher>( 567 VisitLogical<Int32BinopMatcher>(
578 this, node, &m, kArm64And32, CanCover(node, m.left().node()), 568 this, node, &m, kArm64And32, CanCover(node, m.left().node()),
579 CanCover(node, m.right().node()), kLogical32Imm); 569 CanCover(node, m.right().node()), kLogical32Imm);
580 } 570 }
581 571
582 572
583 void InstructionSelector::VisitWord64And(Node* node) { 573 void InstructionSelector::VisitWord64And(Node* node) {
584 Arm64OperandGenerator g(this); 574 Arm64OperandGenerator g(this);
585 Int64BinopMatcher m(node); 575 Int64BinopMatcher m(node);
586 if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) && 576 if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
587 m.right().HasValue()) { 577 m.right().HasValue()) {
588 uint64_t mask = m.right().Value(); 578 uint64_t mask = m.right().Value();
589 uint64_t mask_width = base::bits::CountPopulation64(mask); 579 uint64_t mask_width = base::bits::CountPopulation64(mask);
590 uint64_t mask_msb = base::bits::CountLeadingZeros64(mask); 580 uint64_t mask_msb = base::bits::CountLeadingZeros64(mask);
591 if ((mask_width != 0) && (mask_msb + mask_width == 64)) { 581 if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
592 // The mask must be contiguous, and occupy the least-significant bits. 582 // The mask must be contiguous, and occupy the least-significant bits.
593 DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask)); 583 DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
594 584
595 // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least 585 // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
596 // significant bits. 586 // significant bits.
597 Int64BinopMatcher mleft(m.left().node()); 587 Int64BinopMatcher mleft(m.left().node());
598 if (mleft.right().HasValue()) { 588 if (mleft.right().IsInRange(0, 63)) {
599 // Any shift value can match; int64 shifts use `value % 64`.
600 uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3f);
601
602 // Ubfx cannot extract bits past the register size, however since 589 // Ubfx cannot extract bits past the register size, however since
603 // shifting the original value would have introduced some zeros we can 590 // shifting the original value would have introduced some zeros we can
604 // still use ubfx with a smaller mask and the remaining bits will be 591 // still use ubfx with a smaller mask and the remaining bits will be
605 // zeros. 592 // zeros.
593 uint64_t lsb = mleft.right().Value();
606 if (lsb + mask_width > 64) mask_width = 64 - lsb; 594 if (lsb + mask_width > 64) mask_width = 64 - lsb;
607 595
608 Emit(kArm64Ubfx, g.DefineAsRegister(node), 596 Emit(kArm64Ubfx, g.DefineAsRegister(node),
609 g.UseRegister(mleft.left().node()), 597 g.UseRegister(mleft.left().node()),
610 g.UseImmediateOrTemp(mleft.right().node(), lsb), 598 g.UseImmediate(mleft.right().node()),
611 g.TempImmediate(static_cast<int32_t>(mask_width))); 599 g.TempImmediate(static_cast<int32_t>(mask_width)));
612 return; 600 return;
613 } 601 }
614 // Other cases fall through to the normal And operation. 602 // Other cases fall through to the normal And operation.
615 } 603 }
616 } 604 }
617 VisitLogical<Int64BinopMatcher>( 605 VisitLogical<Int64BinopMatcher>(
618 this, node, &m, kArm64And, CanCover(node, m.left().node()), 606 this, node, &m, kArm64And, CanCover(node, m.left().node()),
619 CanCover(node, m.right().node()), kLogical64Imm); 607 CanCover(node, m.right().node()), kLogical64Imm);
620 } 608 }
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after
729 } 717 }
730 } 718 }
731 return false; 719 return false;
732 } 720 }
733 721
734 } // namespace 722 } // namespace
735 723
736 724
737 void InstructionSelector::VisitWord32Shr(Node* node) { 725 void InstructionSelector::VisitWord32Shr(Node* node) {
738 Int32BinopMatcher m(node); 726 Int32BinopMatcher m(node);
739 if (m.left().IsWord32And() && m.right().HasValue()) { 727 if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
740 uint32_t lsb = m.right().Value() & 0x1f; 728 uint32_t lsb = m.right().Value();
741 Int32BinopMatcher mleft(m.left().node()); 729 Int32BinopMatcher mleft(m.left().node());
742 if (mleft.right().HasValue()) { 730 if (mleft.right().HasValue()) {
731 uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
732 uint32_t mask_width = base::bits::CountPopulation32(mask);
733 uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
743 // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is 734 // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
744 // shifted into the least-significant bits. 735 // shifted into the least-significant bits.
745 uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
746 unsigned mask_width = base::bits::CountPopulation32(mask);
747 unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
748 if ((mask_msb + mask_width + lsb) == 32) { 736 if ((mask_msb + mask_width + lsb) == 32) {
749 Arm64OperandGenerator g(this); 737 Arm64OperandGenerator g(this);
750 DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask)); 738 DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
751 Emit(kArm64Ubfx32, g.DefineAsRegister(node), 739 Emit(kArm64Ubfx32, g.DefineAsRegister(node),
752 g.UseRegister(mleft.left().node()), 740 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
753 g.UseImmediateOrTemp(m.right().node(), lsb),
754 g.TempImmediate(mask_width)); 741 g.TempImmediate(mask_width));
755 return; 742 return;
756 } 743 }
757 } 744 }
758 } else if (TryEmitBitfieldExtract32(this, node)) { 745 } else if (TryEmitBitfieldExtract32(this, node)) {
759 return; 746 return;
760 } 747 }
761 VisitRRO(this, kArm64Lsr32, node, kShift32Imm); 748 VisitRRO(this, kArm64Lsr32, node, kShift32Imm);
762 } 749 }
763 750
764 751
765 void InstructionSelector::VisitWord64Shr(Node* node) { 752 void InstructionSelector::VisitWord64Shr(Node* node) {
753 Arm64OperandGenerator g(this);
766 Int64BinopMatcher m(node); 754 Int64BinopMatcher m(node);
767 if (m.left().IsWord64And() && m.right().HasValue()) { 755 if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) {
768 uint32_t lsb = m.right().Value() & 0x3f; 756 uint64_t lsb = m.right().Value();
769 Int64BinopMatcher mleft(m.left().node()); 757 Int64BinopMatcher mleft(m.left().node());
770 if (mleft.right().HasValue()) { 758 if (mleft.right().HasValue()) {
771 // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is 759 // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
772 // shifted into the least-significant bits. 760 // shifted into the least-significant bits.
773 uint64_t mask = (mleft.right().Value() >> lsb) << lsb; 761 uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
774 unsigned mask_width = base::bits::CountPopulation64(mask); 762 uint64_t mask_width = base::bits::CountPopulation64(mask);
775 unsigned mask_msb = base::bits::CountLeadingZeros64(mask); 763 uint64_t mask_msb = base::bits::CountLeadingZeros64(mask);
776 if ((mask_msb + mask_width + lsb) == 64) { 764 if ((mask_msb + mask_width + lsb) == 64) {
777 Arm64OperandGenerator g(this);
778 DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask)); 765 DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
779 Emit(kArm64Ubfx, g.DefineAsRegister(node), 766 Emit(kArm64Ubfx, g.DefineAsRegister(node),
780 g.UseRegister(mleft.left().node()), 767 g.UseRegister(mleft.left().node()),
781 g.UseImmediateOrTemp(m.right().node(), lsb), 768 g.TempImmediate(static_cast<int32_t>(lsb)),
782 g.TempImmediate(mask_width)); 769 g.TempImmediate(static_cast<int32_t>(mask_width)));
783 return; 770 return;
784 } 771 }
785 } 772 }
786 } 773 }
787 VisitRRO(this, kArm64Lsr, node, kShift64Imm); 774 VisitRRO(this, kArm64Lsr, node, kShift64Imm);
788 } 775 }
789 776
790 777
791 void InstructionSelector::VisitWord32Sar(Node* node) { 778 void InstructionSelector::VisitWord32Sar(Node* node) {
792 if (TryEmitBitfieldExtract32(this, node)) { 779 if (TryEmitBitfieldExtract32(this, node)) {
(...skipping 1157 matching lines...) Expand 10 before | Expand all | Expand 10 after
1950 MachineOperatorBuilder::kFloat64RoundTruncate | 1937 MachineOperatorBuilder::kFloat64RoundTruncate |
1951 MachineOperatorBuilder::kFloat64RoundTiesAway | 1938 MachineOperatorBuilder::kFloat64RoundTiesAway |
1952 MachineOperatorBuilder::kWord32ShiftIsSafe | 1939 MachineOperatorBuilder::kWord32ShiftIsSafe |
1953 MachineOperatorBuilder::kInt32DivIsSafe | 1940 MachineOperatorBuilder::kInt32DivIsSafe |
1954 MachineOperatorBuilder::kUint32DivIsSafe; 1941 MachineOperatorBuilder::kUint32DivIsSafe;
1955 } 1942 }
1956 1943
1957 } // namespace compiler 1944 } // namespace compiler
1958 } // namespace internal 1945 } // namespace internal
1959 } // namespace v8 1946 } // namespace v8
OLDNEW
« no previous file with comments | « src/compiler/arm64/code-generator-arm64.cc ('k') | test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698