Index: src/interpreter/interpreter-assembler.cc |
diff --git a/src/interpreter/interpreter-assembler.cc b/src/interpreter/interpreter-assembler.cc |
index 5b32d2fb338c913138f57f2b6cc169ee5decdca2..fec92e5de61f38967de2098ec4617d5ffc2ffd64 100644 |
--- a/src/interpreter/interpreter-assembler.cc |
+++ b/src/interpreter/interpreter-assembler.cc |
@@ -86,7 +86,7 @@ Node* InterpreterAssembler::GetContextAtDepth(Node* context, Node* depth) { |
Variable cur_context(this, MachineRepresentation::kTaggedPointer); |
cur_context.Bind(context); |
- Variable cur_depth(this, MachineRepresentation::kWord32); |
+ Variable cur_depth(this, MachineType::PointerRepresentation()); |
cur_depth.Bind(depth); |
Label context_found(this); |
@@ -95,16 +95,16 @@ Node* InterpreterAssembler::GetContextAtDepth(Node* context, Node* depth) { |
Label context_search(this, 2, context_search_loop_variables); |
// Fast path if the depth is 0. |
- Branch(Word32Equal(depth, Int32Constant(0)), &context_found, &context_search); |
+ Branch(WordEqual(depth, IntPtrConstant(0)), &context_found, &context_search); |
// Loop until the depth is 0. |
Bind(&context_search); |
{ |
- cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1))); |
+ cur_depth.Bind(IntPtrSub(cur_depth.value(), IntPtrConstant(1))); |
cur_context.Bind( |
LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX)); |
- Branch(Word32Equal(cur_depth.value(), Int32Constant(0)), &context_found, |
+ Branch(WordEqual(cur_depth.value(), IntPtrConstant(0)), &context_found, |
&context_search); |
} |
@@ -118,7 +118,7 @@ void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(Node* context, |
Variable cur_context(this, MachineRepresentation::kTaggedPointer); |
cur_context.Bind(context); |
- Variable cur_depth(this, MachineRepresentation::kWord32); |
+ Variable cur_depth(this, MachineType::PointerRepresentation()); |
cur_depth.Bind(depth); |
Variable* context_search_loop_variables[2] = {&cur_depth, &cur_context}; |
@@ -138,12 +138,11 @@ void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(Node* context, |
// Jump to the target if the extension slot is not a hole. |
GotoIf(WordNotEqual(extension_slot, TheHoleConstant()), target); |
- cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1))); |
+ cur_depth.Bind(IntPtrSub(cur_depth.value(), IntPtrConstant(1))); |
cur_context.Bind( |
LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX)); |
- GotoIf(Word32NotEqual(cur_depth.value(), Int32Constant(0)), |
- &context_search); |
+ GotoIf(WordNotEqual(cur_depth.value(), IntPtrConstant(0)), &context_search); |
} |
} |
@@ -211,8 +210,9 @@ Node* InterpreterAssembler::BytecodeOperandUnsignedByte(int operand_index) { |
DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize( |
bytecode_, operand_index, operand_scale())); |
Node* operand_offset = OperandOffset(operand_index); |
- return Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(), |
- IntPtrAdd(BytecodeOffset(), operand_offset)); |
+ Node* load = Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(), |
+ IntPtrAdd(BytecodeOffset(), operand_offset)); |
+ return ChangeUint32ToWord(load); |
rmcilroy
2016/12/09 11:21:37
Not something for this CL (unless it's easier) but
Igor Sheludko
2016/12/10 00:09:50
Done.
|
} |
Node* InterpreterAssembler::BytecodeOperandSignedByte(int operand_index) { |
@@ -224,10 +224,7 @@ Node* InterpreterAssembler::BytecodeOperandSignedByte(int operand_index) { |
IntPtrAdd(BytecodeOffset(), operand_offset)); |
// Ensure that we sign extend to full pointer size |
- if (kPointerSize == 8) { |
- load = ChangeInt32ToInt64(load); |
- } |
- return load; |
+ return ChangeInt32ToIntPtr(load); |
} |
compiler::Node* InterpreterAssembler::BytecodeOperandReadUnaligned( |
@@ -288,12 +285,14 @@ Node* InterpreterAssembler::BytecodeOperandUnsignedShort(int operand_index) { |
Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale())); |
int operand_offset = |
Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()); |
+ Node* load; |
if (TargetSupportsUnalignedAccess()) { |
- return Load(MachineType::Uint16(), BytecodeArrayTaggedPointer(), |
+ load = Load(MachineType::Uint16(), BytecodeArrayTaggedPointer(), |
IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset))); |
} else { |
- return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint16()); |
+ load = BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint16()); |
} |
+ return ChangeUint32ToWord(load); |
} |
Node* InterpreterAssembler::BytecodeOperandSignedShort(int operand_index) { |
@@ -312,10 +311,7 @@ Node* InterpreterAssembler::BytecodeOperandSignedShort(int operand_index) { |
} |
// Ensure that we sign extend to full pointer size |
- if (kPointerSize == 8) { |
- load = ChangeInt32ToInt64(load); |
- } |
- return load; |
+ return ChangeInt32ToIntPtr(load); |
} |
Node* InterpreterAssembler::BytecodeOperandUnsignedQuad(int operand_index) { |
@@ -324,12 +320,14 @@ Node* InterpreterAssembler::BytecodeOperandUnsignedQuad(int operand_index) { |
bytecode_, operand_index, operand_scale())); |
int operand_offset = |
Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale()); |
+ Node* load; |
if (TargetSupportsUnalignedAccess()) { |
- return Load(MachineType::Uint32(), BytecodeArrayTaggedPointer(), |
+ load = Load(MachineType::Uint32(), BytecodeArrayTaggedPointer(), |
IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset))); |
} else { |
- return BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint32()); |
+ load = BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint32()); |
} |
+ return ChangeUint32ToWord(load); |
} |
Node* InterpreterAssembler::BytecodeOperandSignedQuad(int operand_index) { |
@@ -347,10 +345,7 @@ Node* InterpreterAssembler::BytecodeOperandSignedQuad(int operand_index) { |
} |
// Ensure that we sign extend to full pointer size |
- if (kPointerSize == 8) { |
- load = ChangeInt32ToInt64(load); |
- } |
- return load; |
+ return ChangeInt32ToIntPtr(load); |
} |
Node* InterpreterAssembler::BytecodeSignedOperand(int operand_index, |
@@ -457,15 +452,13 @@ Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) { |
Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) { |
Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(), |
BytecodeArray::kConstantPoolOffset); |
- Node* entry_offset = |
- IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag), |
- WordShl(index, kPointerSizeLog2)); |
- return Load(MachineType::AnyTagged(), constant_pool, entry_offset); |
+ return LoadFixedArrayElement(constant_pool, index, 0, INTPTR_PARAMETERS); |
} |
Node* InterpreterAssembler::LoadAndUntagConstantPoolEntry(Node* index) { |
Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(), |
BytecodeArray::kConstantPoolOffset); |
+ // TODO(ishell): move the implementation to CSA. |
int offset = FixedArray::kHeaderSize - kHeapObjectTag; |
#if V8_TARGET_LITTLE_ENDIAN |
if (Is64()) { |
@@ -515,12 +508,13 @@ Node* InterpreterAssembler::IncrementCallCount(Node* type_feedback_vector, |
Node* slot_id) { |
Comment("increment call count"); |
Node* call_count_slot = IntPtrAdd(slot_id, IntPtrConstant(1)); |
- Node* call_count = |
- LoadFixedArrayElement(type_feedback_vector, call_count_slot); |
- Node* new_count = SmiAdd(call_count, SmiTag(Int32Constant(1))); |
+ Node* call_count = LoadFixedArrayElement( |
+ type_feedback_vector, call_count_slot, 0, INTPTR_PARAMETERS); |
+ Node* new_count = SmiAdd(call_count, SmiConstant(1)); |
// Count is Smi, so we don't need a write barrier. |
return StoreFixedArrayElement(type_feedback_vector, call_count_slot, |
- new_count, SKIP_WRITE_BARRIER); |
+ new_count, SKIP_WRITE_BARRIER, 0, |
+ INTPTR_PARAMETERS); |
} |
Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context, |
@@ -543,12 +537,16 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context, |
WeakCell::kValueOffset && |
WeakCell::kValueOffset == Symbol::kHashFieldSlot); |
+ // Truncate |arg_count| to match the calling convention. |
+ arg_count = TruncateWordToWord32(arg_count); |
+ |
Variable return_value(this, MachineRepresentation::kTagged); |
Label call_function(this), extra_checks(this, Label::kDeferred), call(this), |
end(this); |
// The checks. First, does function match the recorded monomorphic target? |
- Node* feedback_element = LoadFixedArrayElement(type_feedback_vector, slot_id); |
+ Node* feedback_element = LoadFixedArrayElement(type_feedback_vector, slot_id, |
+ 0, INTPTR_PARAMETERS); |
Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element); |
Node* is_monomorphic = WordEqual(function, feedback_value); |
GotoUnless(is_monomorphic, &extra_checks); |
@@ -586,9 +584,8 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context, |
GotoIf(is_megamorphic, &call); |
Comment("check if it is an allocation site"); |
- Node* is_allocation_site = WordEqual( |
- LoadMap(feedback_element), LoadRoot(Heap::kAllocationSiteMapRootIndex)); |
- GotoUnless(is_allocation_site, &check_initialized); |
+ GotoUnless(IsAllocationSiteMap(LoadMap(feedback_element)), |
+ &check_initialized); |
// If it is not the Array() function, mark megamorphic. |
Node* context_slot = LoadContextElement(LoadNativeContext(context), |
@@ -626,7 +623,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context, |
// Check if function is an object of JSFunction type. |
Node* instance_type = LoadInstanceType(function); |
Node* is_js_function = |
- WordEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE)); |
+ Word32Equal(instance_type, Int32Constant(JS_FUNCTION_TYPE)); |
GotoUnless(is_js_function, &mark_megamorphic); |
// Check if it is the Array() function. |
@@ -669,7 +666,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context, |
StoreFixedArrayElement( |
type_feedback_vector, slot_id, |
HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())), |
- SKIP_WRITE_BARRIER); |
+ SKIP_WRITE_BARRIER, 0, INTPTR_PARAMETERS); |
Goto(&call); |
} |
} |
@@ -700,6 +697,10 @@ Node* InterpreterAssembler::CallJS(Node* function, Node* context, |
Callable callable = CodeFactory::InterpreterPushArgsAndCall( |
isolate(), tail_call_mode, CallableType::kAny); |
Node* code_target = HeapConstant(callable.code()); |
+ |
+ // Truncate |arg_count| to match the calling convention. |
+ arg_count = TruncateWordToWord32(arg_count); |
+ |
return CallStub(callable.descriptor(), code_target, context, arg_count, |
first_arg, function); |
} |
@@ -713,9 +714,12 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context, |
Label call_construct_function(this, &allocation_feedback), |
extra_checks(this, Label::kDeferred), call_construct(this), end(this); |
+ // Truncate |arg_count| to match the calling convention. |
+ arg_count = TruncateWordToWord32(arg_count); |
+ |
// Slot id of 0 is used to indicate no type feedback is available. |
STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0); |
- Node* is_feedback_unavailable = Word32Equal(slot_id, Int32Constant(0)); |
+ Node* is_feedback_unavailable = WordEqual(slot_id, IntPtrConstant(0)); |
GotoIf(is_feedback_unavailable, &call_construct); |
// Check that the constructor is not a smi. |
@@ -725,11 +729,12 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context, |
// Check that constructor is a JSFunction. |
Node* instance_type = LoadInstanceType(constructor); |
Node* is_js_function = |
- WordEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE)); |
+ Word32Equal(instance_type, Int32Constant(JS_FUNCTION_TYPE)); |
GotoUnless(is_js_function, &call_construct); |
// Check if it is a monomorphic constructor. |
- Node* feedback_element = LoadFixedArrayElement(type_feedback_vector, slot_id); |
+ Node* feedback_element = LoadFixedArrayElement(type_feedback_vector, slot_id, |
+ 0, INTPTR_PARAMETERS); |
Node* feedback_value = LoadWeakCellValueUnchecked(feedback_element); |
Node* is_monomorphic = WordEqual(constructor, feedback_value); |
allocation_feedback.Bind(UndefinedConstant()); |
@@ -834,7 +839,7 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context, |
StoreFixedArrayElement( |
type_feedback_vector, slot_id, |
HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate())), |
- SKIP_WRITE_BARRIER); |
+ SKIP_WRITE_BARRIER, 0, INTPTR_PARAMETERS); |
Goto(&call_construct_function); |
} |
} |
@@ -865,12 +870,15 @@ Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context, |
Node* function_table = ExternalConstant( |
ExternalReference::runtime_function_table_address(isolate())); |
Node* function_offset = |
- Int32Mul(function_id, Int32Constant(sizeof(Runtime::Function))); |
+ IntPtrMul(function_id, IntPtrConstant(sizeof(Runtime::Function))); |
Node* function = IntPtrAdd(function_table, function_offset); |
Node* function_entry = |
Load(MachineType::Pointer(), function, |
IntPtrConstant(offsetof(Runtime::Function, entry))); |
+ // Truncate |arg_count| to match the calling convention. |
+ arg_count = TruncateWordToWord32(arg_count); |
+ |
return CallStub(callable.descriptor(), code_target, context, arg_count, |
first_arg, function_entry, result_size); |
} |
@@ -887,7 +895,7 @@ void InterpreterAssembler::UpdateInterruptBudget(Node* weight) { |
Variable new_budget(this, MachineRepresentation::kWord32); |
Node* old_budget = |
Load(MachineType::Int32(), BytecodeArrayTaggedPointer(), budget_offset); |
- new_budget.Bind(Int32Add(old_budget, weight)); |
+ new_budget.Bind(Int32Add(old_budget, TruncateWordToWord32(weight))); |
Node* condition = |
Int32GreaterThanOrEqual(new_budget.value(), Int32Constant(0)); |
Branch(condition, &ok, &interrupt_check); |
@@ -955,10 +963,7 @@ void InterpreterAssembler::JumpIfWordNotEqual(Node* lhs, Node* rhs, |
Node* InterpreterAssembler::LoadBytecode(compiler::Node* bytecode_offset) { |
Node* bytecode = |
Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(), bytecode_offset); |
- if (kPointerSize == 8) { |
- bytecode = ChangeUint32ToUint64(bytecode); |
- } |
- return bytecode; |
+ return ChangeUint32ToWord(bytecode); |
} |
Node* InterpreterAssembler::StarDispatchLookahead(Node* target_bytecode) { |
@@ -1001,6 +1006,7 @@ void InterpreterAssembler::InlineStar() { |
} |
Node* InterpreterAssembler::Dispatch() { |
+ Comment("========= Dispatch"); |
Node* target_offset = Advance(); |
Node* target_bytecode = LoadBytecode(target_offset); |
@@ -1026,7 +1032,8 @@ Node* InterpreterAssembler::DispatchToBytecode(Node* target_bytecode, |
Node* InterpreterAssembler::DispatchToBytecodeHandler(Node* handler, |
Node* bytecode_offset) { |
Node* handler_entry = |
- IntPtrAdd(handler, IntPtrConstant(Code::kHeaderSize - kHeapObjectTag)); |
+ IntPtrAdd(BitcastTaggedToWord(handler), |
+ IntPtrConstant(Code::kHeaderSize - kHeapObjectTag)); |
rmcilroy
2016/12/09 11:21:37
nit - Maybe there should be a CSA helper which con
Igor Sheludko
2016/12/10 00:09:50
Added TODO, will address in a follow-up CL.
|
return DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset); |
} |
@@ -1167,8 +1174,8 @@ void InterpreterAssembler::UpdateInterruptBudgetOnReturn() { |
// Update profiling count by -BytecodeOffset to simulate backedge to start of |
// function. |
Node* profiling_weight = |
- Int32Sub(Int32Constant(kHeapObjectTag + BytecodeArray::kHeaderSize), |
- BytecodeOffset()); |
+ IntPtrSub(IntPtrConstant(kHeapObjectTag + BytecodeArray::kHeaderSize), |
+ BytecodeOffset()); |
UpdateInterruptBudget(profiling_weight); |
} |
@@ -1183,7 +1190,9 @@ Node* InterpreterAssembler::StackCheckTriggeredInterrupt() { |
Node* InterpreterAssembler::LoadOSRNestingLevel() { |
Node* offset = |
IntPtrConstant(BytecodeArray::kOSRNestingLevelOffset - kHeapObjectTag); |
- return Load(MachineType::Int8(), BytecodeArrayTaggedPointer(), offset); |
+ Node* load = Load(MachineType::Int8(), BytecodeArrayTaggedPointer(), offset); |
+ // Ensure that we sign extend to full pointer size |
+ return ChangeInt32ToIntPtr(load); |
} |
void InterpreterAssembler::Abort(BailoutReason bailout_reason) { |
@@ -1255,19 +1264,21 @@ bool InterpreterAssembler::TargetSupportsUnalignedAccess() { |
Node* InterpreterAssembler::RegisterCount() { |
Node* bytecode_array = LoadRegister(Register::bytecode_array()); |
Node* frame_size = LoadObjectField( |
- bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Int32()); |
- return Word32Sar(frame_size, Int32Constant(kPointerSizeLog2)); |
+ bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Uint32()); |
+ return WordShr(ChangeUint32ToWord(frame_size), |
+ IntPtrConstant(kPointerSizeLog2)); |
} |
Node* InterpreterAssembler::ExportRegisterFile(Node* array) { |
+ Node* register_count = RegisterCount(); |
if (FLAG_debug_code) { |
Node* array_size = LoadAndUntagFixedArrayBaseLength(array); |
- AbortIfWordNotEqual( |
- array_size, RegisterCount(), kInvalidRegisterFileInGenerator); |
+ AbortIfWordNotEqual(array_size, register_count, |
+ kInvalidRegisterFileInGenerator); |
} |
- Variable var_index(this, MachineRepresentation::kWord32); |
- var_index.Bind(Int32Constant(0)); |
+ Variable var_index(this, MachineType::PointerRepresentation()); |
+ var_index.Bind(IntPtrConstant(0)); |
// Iterate over register file and write values into array. |
// The mapping of register to array index must match that used in |
@@ -1277,16 +1288,15 @@ Node* InterpreterAssembler::ExportRegisterFile(Node* array) { |
Bind(&loop); |
{ |
Node* index = var_index.value(); |
- Node* condition = Int32LessThan(index, RegisterCount()); |
- GotoUnless(condition, &done_loop); |
+ GotoUnless(UintPtrLessThan(index, register_count), &done_loop); |
- Node* reg_index = |
- Int32Sub(Int32Constant(Register(0).ToOperand()), index); |
- Node* value = LoadRegister(ChangeInt32ToIntPtr(reg_index)); |
+ Node* reg_index = IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index); |
+ Node* value = LoadRegister(reg_index); |
- StoreFixedArrayElement(array, index, value); |
+ StoreFixedArrayElement(array, index, value, UPDATE_WRITE_BARRIER, 0, |
+ INTPTR_PARAMETERS); |
rmcilroy
2016/12/09 11:21:37
Do you need the extra arguments here, aren't these
Igor Sheludko
2016/12/10 00:09:50
Currently, all Load/StoreFixedXXXArray methods exp
|
- var_index.Bind(Int32Add(index, Int32Constant(1))); |
+ var_index.Bind(IntPtrAdd(index, IntPtrConstant(1))); |
Goto(&loop); |
} |
Bind(&done_loop); |
@@ -1295,14 +1305,15 @@ Node* InterpreterAssembler::ExportRegisterFile(Node* array) { |
} |
Node* InterpreterAssembler::ImportRegisterFile(Node* array) { |
+ Node* register_count = RegisterCount(); |
if (FLAG_debug_code) { |
Node* array_size = LoadAndUntagFixedArrayBaseLength(array); |
- AbortIfWordNotEqual( |
- array_size, RegisterCount(), kInvalidRegisterFileInGenerator); |
+ AbortIfWordNotEqual(array_size, register_count, |
+ kInvalidRegisterFileInGenerator); |
} |
- Variable var_index(this, MachineRepresentation::kWord32); |
- var_index.Bind(Int32Constant(0)); |
+ Variable var_index(this, MachineType::PointerRepresentation()); |
+ var_index.Bind(IntPtrConstant(0)); |
// Iterate over array and write values into register file. Also erase the |
// array contents to not keep them alive artificially. |
@@ -1311,18 +1322,17 @@ Node* InterpreterAssembler::ImportRegisterFile(Node* array) { |
Bind(&loop); |
{ |
Node* index = var_index.value(); |
- Node* condition = Int32LessThan(index, RegisterCount()); |
- GotoUnless(condition, &done_loop); |
+ GotoUnless(UintPtrLessThan(index, register_count), &done_loop); |
- Node* value = LoadFixedArrayElement(array, index); |
+ Node* value = LoadFixedArrayElement(array, index, 0, INTPTR_PARAMETERS); |
- Node* reg_index = |
- Int32Sub(Int32Constant(Register(0).ToOperand()), index); |
- StoreRegister(value, ChangeInt32ToIntPtr(reg_index)); |
+ Node* reg_index = IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index); |
+ StoreRegister(value, reg_index); |
- StoreFixedArrayElement(array, index, StaleRegisterConstant()); |
+ StoreFixedArrayElement(array, index, StaleRegisterConstant(), |
+ UPDATE_WRITE_BARRIER, 0, INTPTR_PARAMETERS); |
- var_index.Bind(Int32Add(index, Int32Constant(1))); |
+ var_index.Bind(IntPtrAdd(index, IntPtrConstant(1))); |
Goto(&loop); |
} |
Bind(&done_loop); |