Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/compiler/arm64/instruction-selector-arm64.cc

Issue 1179733004: [arm64][turbofan]: Handle any immediate shift. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/compiler/instruction-selector-impl.h" 5 #include "src/compiler/instruction-selector-impl.h"
6 #include "src/compiler/node-matchers.h" 6 #include "src/compiler/node-matchers.h"
7 #include "src/compiler/node-properties.h" 7 #include "src/compiler/node-properties.h"
8 8
9 namespace v8 { 9 namespace v8 {
10 namespace internal { 10 namespace internal {
(...skipping 19 matching lines...) Expand all
30 explicit Arm64OperandGenerator(InstructionSelector* selector) 30 explicit Arm64OperandGenerator(InstructionSelector* selector)
31 : OperandGenerator(selector) {} 31 : OperandGenerator(selector) {}
32 32
33 InstructionOperand UseOperand(Node* node, ImmediateMode mode) { 33 InstructionOperand UseOperand(Node* node, ImmediateMode mode) {
34 if (CanBeImmediate(node, mode)) { 34 if (CanBeImmediate(node, mode)) {
35 return UseImmediate(node); 35 return UseImmediate(node);
36 } 36 }
37 return UseRegister(node); 37 return UseRegister(node);
38 } 38 }
39 39
40 // Use the provided node if it has the required value, or create a
41 // TempImmediate otherwise.
42 InstructionOperand UseImmediateOrTemp(Node* node, int32_t value) {
43 if (GetIntegerConstantValue(node) == value) {
44 return UseImmediate(node);
45 }
46 return TempImmediate(value);
47 }
48
49 bool IsIntegerConstant(Node* node) {
50 return (node->opcode() == IrOpcode::kInt32Constant) ||
51 (node->opcode() == IrOpcode::kInt64Constant);
52 }
53
54 int64_t GetIntegerConstantValue(Node* node) {
55 if (node->opcode() == IrOpcode::kInt32Constant) {
56 return OpParameter<int32_t>(node);
57 }
58 DCHECK(node->opcode() == IrOpcode::kInt64Constant);
59 return OpParameter<int64_t>(node);
60 }
61
40 bool CanBeImmediate(Node* node, ImmediateMode mode) { 62 bool CanBeImmediate(Node* node, ImmediateMode mode) {
41 int64_t value; 63 return IsIntegerConstant(node) &&
42 if (node->opcode() == IrOpcode::kInt32Constant) 64 CanBeImmediate(GetIntegerConstantValue(node), mode);
43 value = OpParameter<int32_t>(node);
44 else if (node->opcode() == IrOpcode::kInt64Constant)
45 value = OpParameter<int64_t>(node);
46 else
47 return false;
48 return CanBeImmediate(value, mode);
49 } 65 }
50 66
51 bool CanBeImmediate(int64_t value, ImmediateMode mode) { 67 bool CanBeImmediate(int64_t value, ImmediateMode mode) {
52 unsigned ignored; 68 unsigned ignored;
53 switch (mode) { 69 switch (mode) {
54 case kLogical32Imm: 70 case kLogical32Imm:
55 // TODO(dcarney): some unencodable values can be handled by 71 // TODO(dcarney): some unencodable values can be handled by
56 // switching instructions. 72 // switching instructions.
57 return Assembler::IsImmLogical(static_cast<uint64_t>(value), 32, 73 return Assembler::IsImmLogical(static_cast<uint64_t>(value), 32,
58 &ignored, &ignored, &ignored); 74 &ignored, &ignored, &ignored);
59 case kLogical64Imm: 75 case kLogical64Imm:
60 return Assembler::IsImmLogical(static_cast<uint64_t>(value), 64, 76 return Assembler::IsImmLogical(static_cast<uint64_t>(value), 64,
61 &ignored, &ignored, &ignored); 77 &ignored, &ignored, &ignored);
62 case kArithmeticImm: 78 case kArithmeticImm:
63 return Assembler::IsImmAddSub(value); 79 return Assembler::IsImmAddSub(value);
64 case kShift32Imm:
65 return 0 <= value && value < 32;
66 case kShift64Imm:
67 return 0 <= value && value < 64;
68 case kLoadStoreImm8: 80 case kLoadStoreImm8:
69 return IsLoadStoreImmediate(value, LSByte); 81 return IsLoadStoreImmediate(value, LSByte);
70 case kLoadStoreImm16: 82 case kLoadStoreImm16:
71 return IsLoadStoreImmediate(value, LSHalfword); 83 return IsLoadStoreImmediate(value, LSHalfword);
72 case kLoadStoreImm32: 84 case kLoadStoreImm32:
73 return IsLoadStoreImmediate(value, LSWord); 85 return IsLoadStoreImmediate(value, LSWord);
74 case kLoadStoreImm64: 86 case kLoadStoreImm64:
75 return IsLoadStoreImmediate(value, LSDoubleWord); 87 return IsLoadStoreImmediate(value, LSDoubleWord);
76 case kNoImmediate: 88 case kNoImmediate:
77 return false; 89 return false;
90 case kShift32Imm: // Fall through.
91 case kShift64Imm:
92 // Shift operations only observe the bottom 5 or 6 bits of the value.
93 // All possible shifts can be encoded by discarding bits which have no
94 // effect.
95 return true;
78 } 96 }
79 return false; 97 return false;
80 } 98 }
81 99
82 private: 100 private:
83 bool IsLoadStoreImmediate(int64_t value, LSDataSize size) { 101 bool IsLoadStoreImmediate(int64_t value, LSDataSize size) {
84 return Assembler::IsImmLSScaled(value, size) || 102 return Assembler::IsImmLSScaled(value, size) ||
85 Assembler::IsImmLSUnscaled(value); 103 Assembler::IsImmLSUnscaled(value);
86 } 104 }
87 }; 105 };
(...skipping 18 matching lines...) Expand all
106 124
107 void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node, 125 void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
108 ImmediateMode operand_mode) { 126 ImmediateMode operand_mode) {
109 Arm64OperandGenerator g(selector); 127 Arm64OperandGenerator g(selector);
110 selector->Emit(opcode, g.DefineAsRegister(node), 128 selector->Emit(opcode, g.DefineAsRegister(node),
111 g.UseRegister(node->InputAt(0)), 129 g.UseRegister(node->InputAt(0)),
112 g.UseOperand(node->InputAt(1), operand_mode)); 130 g.UseOperand(node->InputAt(1), operand_mode));
113 } 131 }
114 132
115 133
116 template <typename Matcher>
117 bool TryMatchShift(InstructionSelector* selector, Node* node,
118 InstructionCode* opcode, IrOpcode::Value shift_opcode,
119 ImmediateMode imm_mode, AddressingMode addressing_mode) {
120 if (node->opcode() != shift_opcode) return false;
121 Arm64OperandGenerator g(selector);
122 Matcher m(node);
123 if (g.CanBeImmediate(m.right().node(), imm_mode)) {
124 *opcode |= AddressingModeField::encode(addressing_mode);
125 return true;
126 }
127 return false;
128 }
129
130
131 bool TryMatchAnyShift(InstructionSelector* selector, Node* node, 134 bool TryMatchAnyShift(InstructionSelector* selector, Node* node,
132 InstructionCode* opcode, bool try_ror) { 135 InstructionCode* opcode, bool try_ror) {
133 return TryMatchShift<Int32BinopMatcher>(selector, node, opcode, 136 Arm64OperandGenerator g(selector);
134 IrOpcode::kWord32Shl, kShift32Imm, 137
135 kMode_Operand2_R_LSL_I) || 138 if (node->InputCount() != 2) return false;
136 TryMatchShift<Int32BinopMatcher>(selector, node, opcode, 139 if (!g.IsIntegerConstant(node->InputAt(1))) return false;
137 IrOpcode::kWord32Shr, kShift32Imm, 140
138 kMode_Operand2_R_LSR_I) || 141 switch (node->opcode()) {
139 TryMatchShift<Int32BinopMatcher>(selector, node, opcode, 142 case IrOpcode::kWord32Shl:
140 IrOpcode::kWord32Sar, kShift32Imm, 143 case IrOpcode::kWord64Shl:
141 kMode_Operand2_R_ASR_I) || 144 *opcode |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
142 (try_ror && TryMatchShift<Int32BinopMatcher>( 145 return true;
143 selector, node, opcode, IrOpcode::kWord32Ror, 146 case IrOpcode::kWord32Shr:
144 kShift32Imm, kMode_Operand2_R_ROR_I)) || 147 case IrOpcode::kWord64Shr:
145 TryMatchShift<Int64BinopMatcher>(selector, node, opcode, 148 *opcode |= AddressingModeField::encode(kMode_Operand2_R_LSR_I);
146 IrOpcode::kWord64Shl, kShift64Imm, 149 return true;
147 kMode_Operand2_R_LSL_I) || 150 case IrOpcode::kWord32Sar:
148 TryMatchShift<Int64BinopMatcher>(selector, node, opcode, 151 case IrOpcode::kWord64Sar:
149 IrOpcode::kWord64Shr, kShift64Imm, 152 *opcode |= AddressingModeField::encode(kMode_Operand2_R_ASR_I);
150 kMode_Operand2_R_LSR_I) || 153 return true;
151 TryMatchShift<Int64BinopMatcher>(selector, node, opcode, 154 case IrOpcode::kWord32Ror:
152 IrOpcode::kWord64Sar, kShift64Imm, 155 case IrOpcode::kWord64Ror:
153 kMode_Operand2_R_ASR_I) || 156 if (try_ror) {
154 (try_ror && TryMatchShift<Int64BinopMatcher>( 157 *opcode |= AddressingModeField::encode(kMode_Operand2_R_ROR_I);
155 selector, node, opcode, IrOpcode::kWord64Ror, 158 return true;
156 kShift64Imm, kMode_Operand2_R_ROR_I)); 159 }
160 return false;
161 default:
162 return false;
163 }
157 } 164 }
158 165
159 166
160 bool TryMatchAnyExtend(Arm64OperandGenerator* g, InstructionSelector* selector, 167 bool TryMatchAnyExtend(Arm64OperandGenerator* g, InstructionSelector* selector,
161 Node* left_node, Node* right_node, 168 Node* left_node, Node* right_node,
162 InstructionOperand* left_op, 169 InstructionOperand* left_op,
163 InstructionOperand* right_op, InstructionCode* opcode) { 170 InstructionOperand* right_op, InstructionCode* opcode) {
164 NodeMatcher nm(right_node); 171 NodeMatcher nm(right_node);
165 172
166 if (nm.IsWord32And()) { 173 if (nm.IsWord32And()) {
(...skipping 374 matching lines...) Expand 10 before | Expand all | Expand 10 after
541 uint32_t mask = m.right().Value(); 548 uint32_t mask = m.right().Value();
542 uint32_t mask_width = base::bits::CountPopulation32(mask); 549 uint32_t mask_width = base::bits::CountPopulation32(mask);
543 uint32_t mask_msb = base::bits::CountLeadingZeros32(mask); 550 uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
544 if ((mask_width != 0) && (mask_msb + mask_width == 32)) { 551 if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
545 // The mask must be contiguous, and occupy the least-significant bits. 552 // The mask must be contiguous, and occupy the least-significant bits.
546 DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask)); 553 DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
547 554
548 // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least 555 // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
549 // significant bits. 556 // significant bits.
550 Int32BinopMatcher mleft(m.left().node()); 557 Int32BinopMatcher mleft(m.left().node());
551 if (mleft.right().IsInRange(0, 31)) { 558 if (mleft.right().HasValue()) {
559 // Any shift value can match; int32 shifts use `value % 32`.
560 uint32_t lsb = mleft.right().Value() & 0x1f;
561
552 // Ubfx cannot extract bits past the register size, however since 562 // Ubfx cannot extract bits past the register size, however since
553 // shifting the original value would have introduced some zeros we can 563 // shifting the original value would have introduced some zeros we can
554 // still use ubfx with a smaller mask and the remaining bits will be 564 // still use ubfx with a smaller mask and the remaining bits will be
555 // zeros. 565 // zeros.
556 uint32_t lsb = mleft.right().Value();
557 if (lsb + mask_width > 32) mask_width = 32 - lsb; 566 if (lsb + mask_width > 32) mask_width = 32 - lsb;
558 567
559 Emit(kArm64Ubfx32, g.DefineAsRegister(node), 568 Emit(kArm64Ubfx32, g.DefineAsRegister(node),
560 g.UseRegister(mleft.left().node()), 569 g.UseRegister(mleft.left().node()),
561 g.UseImmediate(mleft.right().node()), g.TempImmediate(mask_width)); 570 g.UseImmediateOrTemp(mleft.right().node(), lsb),
571 g.TempImmediate(mask_width));
562 return; 572 return;
563 } 573 }
564 // Other cases fall through to the normal And operation. 574 // Other cases fall through to the normal And operation.
565 } 575 }
566 } 576 }
567 VisitLogical<Int32BinopMatcher>( 577 VisitLogical<Int32BinopMatcher>(
568 this, node, &m, kArm64And32, CanCover(node, m.left().node()), 578 this, node, &m, kArm64And32, CanCover(node, m.left().node()),
569 CanCover(node, m.right().node()), kLogical32Imm); 579 CanCover(node, m.right().node()), kLogical32Imm);
570 } 580 }
571 581
572 582
573 void InstructionSelector::VisitWord64And(Node* node) { 583 void InstructionSelector::VisitWord64And(Node* node) {
574 Arm64OperandGenerator g(this); 584 Arm64OperandGenerator g(this);
575 Int64BinopMatcher m(node); 585 Int64BinopMatcher m(node);
576 if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) && 586 if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
577 m.right().HasValue()) { 587 m.right().HasValue()) {
578 uint64_t mask = m.right().Value(); 588 uint64_t mask = m.right().Value();
579 uint64_t mask_width = base::bits::CountPopulation64(mask); 589 uint64_t mask_width = base::bits::CountPopulation64(mask);
580 uint64_t mask_msb = base::bits::CountLeadingZeros64(mask); 590 uint64_t mask_msb = base::bits::CountLeadingZeros64(mask);
581 if ((mask_width != 0) && (mask_msb + mask_width == 64)) { 591 if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
582 // The mask must be contiguous, and occupy the least-significant bits. 592 // The mask must be contiguous, and occupy the least-significant bits.
583 DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask)); 593 DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
584 594
585 // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least 595 // Select Ubfx for And(Shr(x, imm), mask) where the mask is in the least
586 // significant bits. 596 // significant bits.
587 Int64BinopMatcher mleft(m.left().node()); 597 Int64BinopMatcher mleft(m.left().node());
588 if (mleft.right().IsInRange(0, 63)) { 598 if (mleft.right().HasValue()) {
599 // Any shift value can match; int64 shifts use `value % 64`.
600 uint32_t lsb = static_cast<uint32_t>(mleft.right().Value() & 0x3f);
601
589 // Ubfx cannot extract bits past the register size, however since 602 // Ubfx cannot extract bits past the register size, however since
590 // shifting the original value would have introduced some zeros we can 603 // shifting the original value would have introduced some zeros we can
591 // still use ubfx with a smaller mask and the remaining bits will be 604 // still use ubfx with a smaller mask and the remaining bits will be
592 // zeros. 605 // zeros.
593 uint64_t lsb = mleft.right().Value();
594 if (lsb + mask_width > 64) mask_width = 64 - lsb; 606 if (lsb + mask_width > 64) mask_width = 64 - lsb;
595 607
596 Emit(kArm64Ubfx, g.DefineAsRegister(node), 608 Emit(kArm64Ubfx, g.DefineAsRegister(node),
597 g.UseRegister(mleft.left().node()), 609 g.UseRegister(mleft.left().node()),
598 g.UseImmediate(mleft.right().node()), 610 g.UseImmediateOrTemp(mleft.right().node(), lsb),
599 g.TempImmediate(static_cast<int32_t>(mask_width))); 611 g.TempImmediate(static_cast<int32_t>(mask_width)));
600 return; 612 return;
601 } 613 }
602 // Other cases fall through to the normal And operation. 614 // Other cases fall through to the normal And operation.
603 } 615 }
604 } 616 }
605 VisitLogical<Int64BinopMatcher>( 617 VisitLogical<Int64BinopMatcher>(
606 this, node, &m, kArm64And, CanCover(node, m.left().node()), 618 this, node, &m, kArm64And, CanCover(node, m.left().node()),
607 CanCover(node, m.right().node()), kLogical64Imm); 619 CanCover(node, m.right().node()), kLogical64Imm);
608 } 620 }
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after
717 } 729 }
718 } 730 }
719 return false; 731 return false;
720 } 732 }
721 733
722 } // namespace 734 } // namespace
723 735
724 736
725 void InstructionSelector::VisitWord32Shr(Node* node) { 737 void InstructionSelector::VisitWord32Shr(Node* node) {
726 Int32BinopMatcher m(node); 738 Int32BinopMatcher m(node);
727 if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) { 739 if (m.left().IsWord32And() && m.right().HasValue()) {
728 uint32_t lsb = m.right().Value(); 740 uint32_t lsb = m.right().Value() & 0x1f;
729 Int32BinopMatcher mleft(m.left().node()); 741 Int32BinopMatcher mleft(m.left().node());
730 if (mleft.right().HasValue()) { 742 if (mleft.right().HasValue()) {
731 uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
732 uint32_t mask_width = base::bits::CountPopulation32(mask);
733 uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
734 // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is 743 // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
735 // shifted into the least-significant bits. 744 // shifted into the least-significant bits.
745 uint32_t mask = (mleft.right().Value() >> lsb) << lsb;
746 unsigned mask_width = base::bits::CountPopulation32(mask);
747 unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
736 if ((mask_msb + mask_width + lsb) == 32) { 748 if ((mask_msb + mask_width + lsb) == 32) {
737 Arm64OperandGenerator g(this); 749 Arm64OperandGenerator g(this);
738 DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask)); 750 DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
739 Emit(kArm64Ubfx32, g.DefineAsRegister(node), 751 Emit(kArm64Ubfx32, g.DefineAsRegister(node),
740 g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), 752 g.UseRegister(mleft.left().node()),
753 g.UseImmediateOrTemp(m.right().node(), lsb),
741 g.TempImmediate(mask_width)); 754 g.TempImmediate(mask_width));
742 return; 755 return;
743 } 756 }
744 } 757 }
745 } else if (TryEmitBitfieldExtract32(this, node)) { 758 } else if (TryEmitBitfieldExtract32(this, node)) {
746 return; 759 return;
747 } 760 }
748 VisitRRO(this, kArm64Lsr32, node, kShift32Imm); 761 VisitRRO(this, kArm64Lsr32, node, kShift32Imm);
749 } 762 }
750 763
751 764
752 void InstructionSelector::VisitWord64Shr(Node* node) { 765 void InstructionSelector::VisitWord64Shr(Node* node) {
753 Arm64OperandGenerator g(this);
754 Int64BinopMatcher m(node); 766 Int64BinopMatcher m(node);
755 if (m.left().IsWord64And() && m.right().IsInRange(0, 63)) { 767 if (m.left().IsWord64And() && m.right().HasValue()) {
756 uint64_t lsb = m.right().Value(); 768 uint32_t lsb = m.right().Value() & 0x3f;
757 Int64BinopMatcher mleft(m.left().node()); 769 Int64BinopMatcher mleft(m.left().node());
758 if (mleft.right().HasValue()) { 770 if (mleft.right().HasValue()) {
759 // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is 771 // Select Ubfx for Shr(And(x, mask), imm) where the result of the mask is
760 // shifted into the least-significant bits. 772 // shifted into the least-significant bits.
761 uint64_t mask = (mleft.right().Value() >> lsb) << lsb; 773 uint64_t mask = (mleft.right().Value() >> lsb) << lsb;
762 uint64_t mask_width = base::bits::CountPopulation64(mask); 774 unsigned mask_width = base::bits::CountPopulation64(mask);
763 uint64_t mask_msb = base::bits::CountLeadingZeros64(mask); 775 unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
764 if ((mask_msb + mask_width + lsb) == 64) { 776 if ((mask_msb + mask_width + lsb) == 64) {
777 Arm64OperandGenerator g(this);
765 DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask)); 778 DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
766 Emit(kArm64Ubfx, g.DefineAsRegister(node), 779 Emit(kArm64Ubfx, g.DefineAsRegister(node),
767 g.UseRegister(mleft.left().node()), 780 g.UseRegister(mleft.left().node()),
768 g.TempImmediate(static_cast<int32_t>(lsb)), 781 g.UseImmediateOrTemp(m.right().node(), lsb),
769 g.TempImmediate(static_cast<int32_t>(mask_width))); 782 g.TempImmediate(mask_width));
770 return; 783 return;
771 } 784 }
772 } 785 }
773 } 786 }
774 VisitRRO(this, kArm64Lsr, node, kShift64Imm); 787 VisitRRO(this, kArm64Lsr, node, kShift64Imm);
775 } 788 }
776 789
777 790
778 void InstructionSelector::VisitWord32Sar(Node* node) { 791 void InstructionSelector::VisitWord32Sar(Node* node) {
779 if (TryEmitBitfieldExtract32(this, node)) { 792 if (TryEmitBitfieldExtract32(this, node)) {
(...skipping 1157 matching lines...) Expand 10 before | Expand all | Expand 10 after
1937 MachineOperatorBuilder::kFloat64RoundTruncate | 1950 MachineOperatorBuilder::kFloat64RoundTruncate |
1938 MachineOperatorBuilder::kFloat64RoundTiesAway | 1951 MachineOperatorBuilder::kFloat64RoundTiesAway |
1939 MachineOperatorBuilder::kWord32ShiftIsSafe | 1952 MachineOperatorBuilder::kWord32ShiftIsSafe |
1940 MachineOperatorBuilder::kInt32DivIsSafe | 1953 MachineOperatorBuilder::kInt32DivIsSafe |
1941 MachineOperatorBuilder::kUint32DivIsSafe; 1954 MachineOperatorBuilder::kUint32DivIsSafe;
1942 } 1955 }
1943 1956
1944 } // namespace compiler 1957 } // namespace compiler
1945 } // namespace internal 1958 } // namespace internal
1946 } // namespace v8 1959 } // namespace v8
OLDNEW
« no previous file with comments | « src/compiler/arm64/code-generator-arm64.cc ('k') | test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698