Index: src/compiler/x64/instruction-selector-x64.cc |
diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc |
index ed857c57b0af8d5b1c282bc9a4ed3d514b1f5f8a..cc8b16cae5e7807458bf169dcc92f835b4d3334a 100644 |
--- a/src/compiler/x64/instruction-selector-x64.cc |
+++ b/src/compiler/x64/instruction-selector-x64.cc |
@@ -180,29 +180,34 @@ class X64OperandGenerator final : public OperandGenerator { |
namespace { |
-ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) { |
+ArchOpcode GetLoadOpcode(LoadRepresentation load_rep, bool protect) { |
ArchOpcode opcode = kArchNop; |
switch (load_rep.representation()) { |
case MachineRepresentation::kFloat32: |
+ DCHECK(!protect); |
titzer
2016/11/22 10:33:20
We'll need to support all of these types with prot
Eric Holk
2016/11/22 23:16:25
I think this is a better idea. It looks like I can
|
opcode = kX64Movss; |
break; |
case MachineRepresentation::kFloat64: |
+ DCHECK(!protect); |
opcode = kX64Movsd; |
break; |
case MachineRepresentation::kBit: // Fall through. |
case MachineRepresentation::kWord8: |
+ DCHECK(!protect); |
opcode = load_rep.IsSigned() ? kX64Movsxbl : kX64Movzxbl; |
break; |
case MachineRepresentation::kWord16: |
+ DCHECK(!protect); |
opcode = load_rep.IsSigned() ? kX64Movsxwl : kX64Movzxwl; |
break; |
case MachineRepresentation::kWord32: |
- opcode = kX64Movl; |
+ opcode = protect ? kX64TrapMovl : kX64Movl; |
break; |
case MachineRepresentation::kTaggedSigned: // Fall through. |
case MachineRepresentation::kTaggedPointer: // Fall through. |
case MachineRepresentation::kTagged: // Fall through. |
case MachineRepresentation::kWord64: |
+ DCHECK(!protect); |
opcode = kX64Movq; |
break; |
case MachineRepresentation::kSimd128: // Fall through. |
@@ -213,13 +218,50 @@ ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) { |
return opcode; |
} |
+ArchOpcode GetStoreOpcode(StoreRepresentation store_rep, bool protect) { |
+ switch (store_rep.representation()) { |
+ case MachineRepresentation::kFloat32: |
+ DCHECK(!protect); |
+ return kX64Movss; |
+ break; |
+ case MachineRepresentation::kFloat64: |
+ DCHECK(!protect); |
+ return kX64Movsd; |
+ break; |
+ case MachineRepresentation::kBit: // Fall through. |
+ case MachineRepresentation::kWord8: |
+ DCHECK(!protect); |
+ return kX64Movb; |
+ break; |
+ case MachineRepresentation::kWord16: |
+ DCHECK(!protect); |
+ return kX64Movw; |
+ break; |
+ case MachineRepresentation::kWord32: |
+ return protect ? kX64TrapMovl : kX64Movl; |
+ break; |
+ case MachineRepresentation::kTaggedSigned: // Fall through. |
+ case MachineRepresentation::kTaggedPointer: // Fall through. |
+ case MachineRepresentation::kTagged: // Fall through. |
+ case MachineRepresentation::kWord64: |
+ DCHECK(!protect); |
+ return kX64Movq; |
+ break; |
+ case MachineRepresentation::kSimd128: // Fall through. |
+ case MachineRepresentation::kNone: |
+ UNREACHABLE(); |
+ return kArchNop; |
+ } |
+} |
+ |
} // namespace |
void InstructionSelector::VisitLoad(Node* node) { |
LoadRepresentation load_rep = LoadRepresentationOf(node->op()); |
X64OperandGenerator g(this); |
- ArchOpcode opcode = GetLoadOpcode(load_rep); |
+ const bool protect = false; |
+ ArchOpcode opcode = GetLoadOpcode(load_rep, protect); |
InstructionOperand outputs[1]; |
outputs[0] = g.DefineAsRegister(node); |
InstructionOperand inputs[3]; |
@@ -234,7 +276,8 @@ void InstructionSelector::VisitProtectedLoad(Node* node) { |
LoadRepresentation load_rep = LoadRepresentationOf(node->op()); |
X64OperandGenerator g(this); |
- ArchOpcode opcode = GetLoadOpcode(load_rep); |
+ const bool protect = true; |
+ ArchOpcode opcode = GetLoadOpcode(load_rep, protect); |
InstructionOperand outputs[1]; |
outputs[0] = g.DefineAsRegister(node); |
InstructionOperand inputs[4]; |
@@ -295,35 +338,8 @@ void InstructionSelector::VisitStore(Node* node) { |
code |= MiscField::encode(static_cast<int>(record_write_mode)); |
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps); |
} else { |
- ArchOpcode opcode = kArchNop; |
- switch (rep) { |
- case MachineRepresentation::kFloat32: |
- opcode = kX64Movss; |
- break; |
- case MachineRepresentation::kFloat64: |
- opcode = kX64Movsd; |
- break; |
- case MachineRepresentation::kBit: // Fall through. |
- case MachineRepresentation::kWord8: |
- opcode = kX64Movb; |
- break; |
- case MachineRepresentation::kWord16: |
- opcode = kX64Movw; |
- break; |
- case MachineRepresentation::kWord32: |
- opcode = kX64Movl; |
- break; |
- case MachineRepresentation::kTaggedSigned: // Fall through. |
- case MachineRepresentation::kTaggedPointer: // Fall through. |
- case MachineRepresentation::kTagged: // Fall through. |
- case MachineRepresentation::kWord64: |
- opcode = kX64Movq; |
- break; |
- case MachineRepresentation::kSimd128: // Fall through. |
- case MachineRepresentation::kNone: |
- UNREACHABLE(); |
- return; |
- } |
+ const bool protect = false; |
+ ArchOpcode opcode = GetStoreOpcode(store_rep, protect); |
InstructionOperand inputs[4]; |
size_t input_count = 0; |
AddressingMode addressing_mode = |
@@ -338,6 +354,33 @@ void InstructionSelector::VisitStore(Node* node) { |
} |
} |
+void InstructionSelector::VisitProtectedStore(Node* node) { |
+ X64OperandGenerator g(this); |
+ Node* base = node->InputAt(0); |
+ Node* index = node->InputAt(1); |
+ Node* value = node->InputAt(2); |
+ Node* context = node->InputAt(3); |
+ Node* position = node->InputAt(4); |
+ |
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op()); |
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind(); |
+ MachineRepresentation rep = store_rep.representation(); |
+ |
+ const bool protect = true; |
+ ArchOpcode opcode = GetStoreOpcode(store_rep, protect); |
+ InstructionOperand inputs[6]; |
+ size_t input_count = 0; |
+ AddressingMode addressing_mode = |
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count); |
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); |
+ InstructionOperand value_operand = |
+ g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value); |
+ inputs[input_count++] = value_operand; |
+ inputs[input_count++] = g.UseRegister(context); |
+ inputs[input_count++] = g.UseImmediate(position); |
+ Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs); |
+} |
+ |
// Architecture supports unaligned access, therefore VisitLoad is used instead |
void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); } |