Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1136)

Unified Diff: src/x64/macro-assembler-x64.cc

Issue 196893003: Introduce addp, idivp, imulp and subp for x64 port (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: src/x64/macro-assembler-x64.cc
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 48dc16ad7f87f93a7c800e09036da691fd832e98..e5b4160084716c54b62c51004e0d82c84c9d62a8 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -236,7 +236,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
// Store pointer to buffer.
movp(Operand(scratch, 0), addr);
// Increment buffer top.
- addq(scratch, Immediate(kPointerSize));
+ addp(scratch, Immediate(kPointerSize));
// Write back new top of buffer.
StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
// Call stub on end of buffer.
@@ -291,7 +291,7 @@ void MacroAssembler::InNewSpace(Register object,
Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
Assembler::RelocInfoNone());
if (scratch.is(object)) {
- addq(scratch, kScratchRegister);
+ addp(scratch, kScratchRegister);
} else {
lea(scratch, Operand(object, kScratchRegister, times_1, 0));
}
@@ -560,7 +560,7 @@ bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
void MacroAssembler::IllegalOperation(int num_arguments) {
if (num_arguments > 0) {
- addq(rsp, Immediate(num_arguments * kPointerSize));
+ addp(rsp, Immediate(num_arguments * kPointerSize));
}
LoadRoot(rax, Heap::kUndefinedValueRootIndex);
}
@@ -886,7 +886,7 @@ void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
}
// R12 to r15 are callee save on all platforms.
if (fp_mode == kSaveFPRegs) {
- subq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
+ subp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movsd(Operand(rsp, i * kDoubleSize), reg);
@@ -904,7 +904,7 @@ void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
XMMRegister reg = XMMRegister::from_code(i);
movsd(reg, Operand(rsp, i * kDoubleSize));
}
- addq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
+ addp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
}
for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
Register reg = saved_regs[i];
@@ -1449,7 +1449,7 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
ASSERT(!dst.is(kScratchRegister));
switch (constant->value()) {
case 1:
- addq(dst, kSmiConstantRegister);
+ addp(dst, kSmiConstantRegister);
return;
case 2:
lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
@@ -1462,7 +1462,7 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
return;
default:
Register constant_reg = GetSmiConstant(constant);
- addq(dst, constant_reg);
+ addp(dst, constant_reg);
return;
}
} else {
@@ -1481,7 +1481,7 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
return;
default:
LoadSmiConstant(dst, constant);
- addq(dst, src);
+ addp(dst, src);
return;
}
}
@@ -1508,16 +1508,16 @@ void MacroAssembler::SmiAddConstant(Register dst,
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
LoadSmiConstant(kScratchRegister, constant);
- addq(dst, kScratchRegister);
+ addp(dst, kScratchRegister);
if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
j(no_overflow, bailout_label, near_jump);
ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
- subq(dst, kScratchRegister);
+ subp(dst, kScratchRegister);
} else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
Label done;
j(no_overflow, &done, Label::kNear);
- subq(dst, kScratchRegister);
+ subp(dst, kScratchRegister);
jmp(bailout_label, near_jump);
bind(&done);
} else {
@@ -1531,7 +1531,7 @@ void MacroAssembler::SmiAddConstant(Register dst,
ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
LoadSmiConstant(dst, constant);
- addq(dst, src);
+ addp(dst, src);
j(overflow, bailout_label, near_jump);
}
}
@@ -1545,17 +1545,17 @@ void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- subq(dst, constant_reg);
+ subp(dst, constant_reg);
} else {
if (constant->value() == Smi::kMinValue) {
LoadSmiConstant(dst, constant);
// Adding and subtracting the min-value gives the same result, it only
// differs on the overflow bit, which we don't check here.
- addq(dst, src);
+ addp(dst, src);
} else {
// Subtract by adding the negation.
LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
- addq(dst, src);
+ addp(dst, src);
}
}
}
@@ -1574,16 +1574,16 @@ void MacroAssembler::SmiSubConstant(Register dst,
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
LoadSmiConstant(kScratchRegister, constant);
- subq(dst, kScratchRegister);
+ subp(dst, kScratchRegister);
if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
j(no_overflow, bailout_label, near_jump);
ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
- addq(dst, kScratchRegister);
+ addp(dst, kScratchRegister);
} else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
Label done;
j(no_overflow, &done, Label::kNear);
- addq(dst, kScratchRegister);
+ addp(dst, kScratchRegister);
jmp(bailout_label, near_jump);
bind(&done);
} else {
@@ -1600,12 +1600,12 @@ void MacroAssembler::SmiSubConstant(Register dst,
ASSERT(!dst.is(kScratchRegister));
movp(dst, src);
LoadSmiConstant(kScratchRegister, constant);
- subq(dst, kScratchRegister);
+ subp(dst, kScratchRegister);
j(overflow, bailout_label, near_jump);
} else {
// Subtract by adding the negation.
LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
- addq(dst, src);
+ addp(dst, src);
j(overflow, bailout_label, near_jump);
}
}
@@ -1643,15 +1643,15 @@ static void SmiAddHelper(MacroAssembler* masm,
Label::Distance near_jump) {
if (dst.is(src1)) {
Label done;
- masm->addq(dst, src2);
+ masm->addp(dst, src2);
masm->j(no_overflow, &done, Label::kNear);
// Restore src1.
- masm->subq(dst, src2);
+ masm->subp(dst, src2);
masm->jmp(on_not_smi_result, near_jump);
masm->bind(&done);
} else {
masm->movp(dst, src1);
- masm->addq(dst, src2);
+ masm->addp(dst, src2);
masm->j(overflow, on_not_smi_result, near_jump);
}
}
@@ -1687,12 +1687,12 @@ void MacroAssembler::SmiAdd(Register dst,
if (!dst.is(src1)) {
if (emit_debug_code()) {
movp(kScratchRegister, src1);
- addq(kScratchRegister, src2);
+ addp(kScratchRegister, src2);
Check(no_overflow, kSmiAdditionOverflow);
}
lea(dst, Operand(src1, src2, times_1, 0));
} else {
- addq(dst, src2);
+ addp(dst, src2);
Assert(no_overflow, kSmiAdditionOverflow);
}
}
@@ -1707,15 +1707,15 @@ static void SmiSubHelper(MacroAssembler* masm,
Label::Distance near_jump) {
if (dst.is(src1)) {
Label done;
- masm->subq(dst, src2);
+ masm->subp(dst, src2);
masm->j(no_overflow, &done, Label::kNear);
// Restore src1.
- masm->addq(dst, src2);
+ masm->addp(dst, src2);
masm->jmp(on_not_smi_result, near_jump);
masm->bind(&done);
} else {
masm->movp(dst, src1);
- masm->subq(dst, src2);
+ masm->subp(dst, src2);
masm->j(overflow, on_not_smi_result, near_jump);
}
}
@@ -1753,7 +1753,7 @@ static void SmiSubNoOverflowHelper(MacroAssembler* masm,
if (!dst.is(src1)) {
masm->movp(dst, src1);
}
- masm->subq(dst, src2);
+ masm->subp(dst, src2);
masm->Assert(no_overflow, kSmiSubtractionOverflow);
}
@@ -1785,7 +1785,7 @@ void MacroAssembler::SmiMul(Register dst,
Label failure, zero_correct_result;
movp(kScratchRegister, src1); // Create backup for later testing.
SmiToInteger64(dst, src1);
- imul(dst, src2);
+ imulp(dst, src2);
j(overflow, &failure, Label::kNear);
// Check for negative zero result. If product is zero, and one
@@ -1809,7 +1809,7 @@ void MacroAssembler::SmiMul(Register dst,
bind(&correct_result);
} else {
SmiToInteger64(dst, src1);
- imul(dst, src2);
+ imulp(dst, src2);
j(overflow, on_not_smi_result, near_jump);
// Check for negative zero result. If product is zero, and one
// argument is negative, go to slow case.
@@ -2176,7 +2176,7 @@ void MacroAssembler::SelectNonSmi(Register dst,
// Exactly one operand is a smi.
ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
// kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
- subq(kScratchRegister, Immediate(1));
+ subp(kScratchRegister, Immediate(1));
// If src1 is a smi, then scratch register all 1s, else it is all 0s.
movp(dst, src1);
xor_(dst, src2);
@@ -2289,7 +2289,7 @@ void MacroAssembler::LookupNumberStringCache(Register object,
SmiToInteger32(
mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
shrl(mask, Immediate(1));
- subq(mask, Immediate(1)); // Make mask.
+ subp(mask, Immediate(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the
// number string cache for smis is just the smi value, and the hash for
@@ -2567,7 +2567,7 @@ void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
- addq(rsp, Immediate(stack_elements * kPointerSize));
+ addp(rsp, Immediate(stack_elements * kPointerSize));
}
}
@@ -2644,7 +2644,8 @@ void MacroAssembler::Pop(const Operand& dst) {
leal(rsp, Operand(rsp, 4));
if (scratch.is(kSmiConstantRegister)) {
// Restore kSmiConstantRegister.
- movp(kSmiConstantRegister, Smi::FromInt(kSmiConstantRegisterValue),
+ movp(kSmiConstantRegister,
+ reinterpret_cast<void*>(Smi::FromInt(kSmiConstantRegisterValue)),
Assembler::RelocInfoNone());
}
}
@@ -2788,7 +2789,7 @@ void MacroAssembler::Popad() {
void MacroAssembler::Dropad() {
- addq(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
+ addp(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
}
@@ -2879,7 +2880,7 @@ void MacroAssembler::PopTryHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
Pop(ExternalOperand(handler_address));
- addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+ addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
}
@@ -2993,7 +2994,7 @@ void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
ret(bytes_dropped);
} else {
PopReturnAddressTo(scratch);
- addq(rsp, Immediate(bytes_dropped));
+ addp(rsp, Immediate(bytes_dropped));
PushReturnAddressFrom(scratch);
ret(0);
}
@@ -3199,10 +3200,10 @@ void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
// Slow case.
if (input_reg.is(result_reg)) {
- subq(rsp, Immediate(kDoubleSize));
+ subp(rsp, Immediate(kDoubleSize));
movsd(MemOperand(rsp, 0), xmm0);
SlowTruncateToI(result_reg, rsp, 0);
- addq(rsp, Immediate(kDoubleSize));
+ addp(rsp, Immediate(kDoubleSize));
} else {
SlowTruncateToI(result_reg, input_reg);
}
@@ -3219,10 +3220,10 @@ void MacroAssembler::TruncateDoubleToI(Register result_reg,
cmpq(result_reg, kScratchRegister);
j(not_equal, &done, Label::kNear);
- subq(rsp, Immediate(kDoubleSize));
+ subp(rsp, Immediate(kDoubleSize));
movsd(MemOperand(rsp, 0), input_reg);
SlowTruncateToI(result_reg, rsp, 0);
- addq(rsp, Immediate(kDoubleSize));
+ addp(rsp, Immediate(kDoubleSize));
bind(&done);
}
@@ -3716,7 +3717,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (!code_constant.is_null()) {
Move(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
- addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ addp(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
} else if (!code_register.is(rdx)) {
movp(rdx, code_register);
}
@@ -3824,14 +3825,14 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
if (save_doubles) {
int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize +
arg_stack_space * kRegisterSize;
- subq(rsp, Immediate(space));
+ subp(rsp, Immediate(space));
int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
XMMRegister reg = XMMRegister::FromAllocationIndex(i);
movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
}
} else if (arg_stack_space > 0) {
- subq(rsp, Immediate(arg_stack_space * kRegisterSize));
+ subp(rsp, Immediate(arg_stack_space * kRegisterSize));
}
// Get the required frame alignment for the OS.
@@ -4193,7 +4194,7 @@ void MacroAssembler::Allocate(int object_size,
if (!top_reg.is(result)) {
movp(top_reg, result);
}
- addq(top_reg, Immediate(object_size));
+ addp(top_reg, Immediate(object_size));
j(carry, gc_required);
Operand limit_operand = ExternalOperand(allocation_limit);
cmpq(top_reg, limit_operand);
@@ -4205,9 +4206,9 @@ void MacroAssembler::Allocate(int object_size,
bool tag_result = (flags & TAG_OBJECT) != 0;
if (top_reg.is(result)) {
if (tag_result) {
- subq(result, Immediate(object_size - kHeapObjectTag));
+ subp(result, Immediate(object_size - kHeapObjectTag));
} else {
- subq(result, Immediate(object_size));
+ subp(result, Immediate(object_size));
}
} else if (tag_result) {
// Tag the result if requested.
@@ -4269,7 +4270,7 @@ void MacroAssembler::Allocate(Register object_size,
if (!object_size.is(result_end)) {
movp(result_end, object_size);
}
- addq(result_end, result);
+ addp(result_end, result);
j(carry, gc_required);
Operand limit_operand = ExternalOperand(allocation_limit);
cmpq(result_end, limit_operand);
@@ -4280,7 +4281,7 @@ void MacroAssembler::Allocate(Register object_size,
// Tag the result if requested.
if ((flags & TAG_OBJECT) != 0) {
- addq(result, Immediate(kHeapObjectTag));
+ addp(result, Immediate(kHeapObjectTag));
}
}
@@ -4328,7 +4329,7 @@ void MacroAssembler::AllocateTwoByteString(Register result,
kHeaderAlignment));
and_(scratch1, Immediate(~kObjectAlignmentMask));
if (kHeaderAlignment > 0) {
- subq(scratch1, Immediate(kHeaderAlignment));
+ subp(scratch1, Immediate(kHeaderAlignment));
}
// Allocate two byte string in new space.
@@ -4363,10 +4364,10 @@ void MacroAssembler::AllocateAsciiString(Register result,
kObjectAlignmentMask;
movl(scratch1, length);
ASSERT(kCharSize == 1);
- addq(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
+ addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
and_(scratch1, Immediate(~kObjectAlignmentMask));
if (kHeaderAlignment > 0) {
- subq(scratch1, Immediate(kHeaderAlignment));
+ subp(scratch1, Immediate(kHeaderAlignment));
}
// Allocate ASCII string in new space.
@@ -4517,7 +4518,7 @@ void MacroAssembler::CopyBytes(Register destination,
andl(scratch, Immediate(kPointerSize - 1));
movp(length, Operand(source, scratch, times_1, -kPointerSize));
movp(Operand(destination, scratch, times_1, -kPointerSize), length);
- addq(destination, scratch);
+ addp(destination, scratch);
if (min_length <= kLongStringLimit) {
jmp(&done, Label::kNear);
@@ -4533,7 +4534,7 @@ void MacroAssembler::CopyBytes(Register destination,
// Move remaining bytes of length.
movp(scratch, Operand(source, length, times_1, -kPointerSize));
movp(Operand(destination, length, times_1, -kPointerSize), scratch);
- addq(destination, length);
+ addp(destination, length);
jmp(&done, Label::kNear);
bind(&short_string);
@@ -4562,7 +4563,7 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
jmp(&entry);
bind(&loop);
movp(Operand(start_offset, 0), filler);
- addq(start_offset, Immediate(kPointerSize));
+ addp(start_offset, Immediate(kPointerSize));
bind(&entry);
cmpq(start_offset, end_offset);
j(less, &loop);
@@ -4716,7 +4717,7 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments) {
ASSERT(IsPowerOf2(frame_alignment));
int argument_slots_on_stack =
ArgumentStackSlotsForCFunctionCall(num_arguments);
- subq(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
+ subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
and_(rsp, Immediate(-frame_alignment));
movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
}
@@ -4874,7 +4875,7 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
Immediate((Page::kPageAlignmentMask >> shift) &
~(Bitmap::kBytesPerCell - 1)));
- addq(bitmap_reg, rcx);
+ addp(bitmap_reg, rcx);
movp(rcx, addr_reg);
shrl(rcx, Immediate(kPointerSizeLog2));
and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
@@ -4910,7 +4911,7 @@ void MacroAssembler::EnsureNotWhite(
Label ok;
Push(mask_scratch);
// shl. May overflow making the check conservative.
- addq(mask_scratch, mask_scratch);
+ addp(mask_scratch, mask_scratch);
testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
j(zero, &ok, Label::kNear);
int3();
@@ -4960,11 +4961,11 @@ void MacroAssembler::EnsureNotWhite(
ASSERT(kOneByteStringTag == 0x04);
and_(length, Immediate(kStringEncodingMask));
xor_(length, Immediate(kStringEncodingMask));
- addq(length, Immediate(0x04));
+ addp(length, Immediate(0x04));
// Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
- imul(length, FieldOperand(value, String::kLengthOffset));
+ imulp(length, FieldOperand(value, String::kLengthOffset));
shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
- addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
+ addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
and_(length, Immediate(~kObjectAlignmentMask));
bind(&is_data_object);

Powered by Google App Engine
This is Rietveld 408576698